aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_apd.c10
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpi_platform.c5
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c10
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c4
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c12
-rw-r--r--drivers/acpi/nfit/core.c210
-rw-r--r--drivers/acpi/nfit/mce.c24
-rw-r--r--drivers/acpi/nfit/nfit.h17
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/property.c117
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/thermal.c3
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/ata/ahci.c156
-rw-r--r--drivers/ata/ahci.h24
-rw-r--r--drivers/ata/ahci_qoriq.c20
-rw-r--r--drivers/ata/ahci_st.c4
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-scsi.c288
-rw-r--r--drivers/ata/pata_at91.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/auxdisplay/Kconfig9
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c443
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/memory.c5
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/aoe/aoecmd.c41
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/loop.c9
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3
-rw-r--r--drivers/block/nbd.c412
-rw-r--r--drivers/block/null_blk.c129
-rw-r--r--drivers/block/rbd.c1459
-rw-r--r--drivers/block/rbd_types.h11
-rw-r--r--drivers/block/virtio_blk.c11
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/bluetooth/btusb.c9
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bus/Kconfig15
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/qcom-ebi2.c408
-rw-r--r--drivers/bus/tegra-aconnect.c22
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/amd-rng.c140
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c5
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c99
-rw-r--r--drivers/char/hw_random/cavium-rng.c94
-rw-r--r--drivers/char/hw_random/core.c43
-rw-r--r--drivers/char/hw_random/geode-rng.c58
-rw-r--r--drivers/char/hw_random/meson-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c10
-rw-r--r--drivers/char/hw_random/pasemi-rng.c39
-rw-r--r--drivers/char/hw_random/pic32-rng.c1
-rw-r--r--drivers/char/hw_random/st-rng.c4
-rw-r--r--drivers/char/hw_random/tx4939-rng.c11
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/random.c40
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tb0219.c1
-rw-r--r--drivers/char/tpm/tpm-interface.c3
-rw-r--r--drivers/char/virtio_console.c25
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-xgene.c10
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/imx/clk-imx1.c46
-rw-r--r--drivers/clk/imx/clk-pllv3.c8
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/meson/gxbb.h18
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c2
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/mvebu/orion.c70
-rw-r--r--drivers/clk/rockchip/clk-ddr.c5
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c12
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c81
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/cpuidle/driver.c5
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/caam/caamalg.c170
-rw-r--r--drivers/crypto/caam/caamhash.c581
-rw-r--r--drivers/crypto/caam/ctrl.c3
-rw-r--r--drivers/crypto/caam/desc.h6
-rw-r--r--drivers/crypto/caam/desc_constr.h17
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c26
-rw-r--r--drivers/crypto/caam/regs.h8
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h2
-rw-r--r--drivers/crypto/ccp/Makefile1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c18
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c182
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c1017
-rw-r--r--drivers/crypto/ccp/ccp-dev.c113
-rw-r--r--drivers/crypto/ccp/ccp-dev.h312
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c11
-rw-r--r--drivers/crypto/ccp/ccp-ops.c576
-rw-r--r--drivers/crypto/ccp/ccp-pci.c23
-rw-r--r--drivers/crypto/hifn_795x.c12
-rw-r--r--drivers/crypto/img-hash.c108
-rw-r--r--drivers/crypto/ixp4xx_crypto.c9
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/marvell/hash.c44
-rw-r--r--drivers/crypto/marvell/tdma.c1
-rw-r--r--drivers/crypto/mv_cesa.c7
-rw-r--r--drivers/crypto/mxc-scc.c4
-rw-r--r--drivers/crypto/omap-aes.c141
-rw-r--r--drivers/crypto/omap-des.c35
-rw-r--r--drivers/crypto/omap-sham.c568
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c20
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c8
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c6
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c6
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c68
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c165
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/vmx/Kconfig1
-rw-r--r--drivers/crypto/vmx/ghash.c31
-rw-r--r--drivers/dax/Kconfig7
-rw-r--r--drivers/dax/dax.c577
-rw-r--r--drivers/dax/dax.h5
-rw-r--r--drivers/dax/pmem.c9
-rw-r--r--drivers/devfreq/devfreq.c8
-rw-r--r--drivers/devfreq/event/Kconfig1
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/dma-buf/dma-buf.c23
-rw-r--r--drivers/dma-buf/fence-array.c7
-rw-r--r--drivers/dma-buf/reservation.c2
-rw-r--r--drivers/dma-buf/sync_debug.c12
-rw-r--r--drivers/dma-buf/sync_file.c204
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/cppi41.c31
-rw-r--r--drivers/dma/edma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/meson/Kconfig9
-rw-r--r--drivers/firmware/meson/Makefile1
-rw-r--r--drivers/firmware/meson/meson_sm.c248
-rw-r--r--drivers/firmware/qcom_scm.c19
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c92
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c36
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib.c106
-rw-r--r--drivers/gpu/drm/Kconfig146
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c341
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c512
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c484
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c268
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h)45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c503
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c431
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c3170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.h (renamed from drivers/gpu/drm/amd/amdgpu/tonga_smum.h)21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c802
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smum.h)20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c863
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c3362
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1006
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1071
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c677
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/r600_dpm.h127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c242
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c1965
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h (renamed from drivers/gpu/drm/amd/amdgpu/fiji_smum.h)23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c915
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c8041
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1015
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sislands_smc.h423
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c862
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c240
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c447
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c537
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c8
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h941
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h105
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/sid.h2461
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/include/cgs_common.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c135
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile14
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c251
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5599
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c613
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c316
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5290
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h)2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c)284
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h)10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c)160
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h)25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h55
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c4393
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h)244
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c)985
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h)56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c)264
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6276
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h397
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h175
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/power_state.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu71.h510
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h631
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h77
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2374
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h51
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c612
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2576
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c250
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h71
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2287
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c708
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c589
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c110
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3206
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h)44
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c672
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h46
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c12
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c159
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c20
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c1
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c9
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c26
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig7
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c12
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c7
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c436
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h48
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c522
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h34
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c223
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c1
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c18
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_atomic.c16
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c128
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_blend.c207
-rw-r--r--drivers/gpu/drm/drm_bridge.c34
-rw-r--r--drivers/gpu/drm/drm_bufs.c36
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c296
-rw-r--r--drivers/gpu/drm/drm_connector.c1123
-rw-r--r--drivers/gpu/drm/drm_context.c24
-rw-r--r--drivers/gpu/drm/drm_crtc.c4727
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c56
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h65
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h169
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_dma.c6
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c19
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c201
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c110
-rw-r--r--drivers/gpu/drm/drm_edid.c271
-rw-r--r--drivers/gpu/drm/drm_encoder.c233
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c145
-rw-r--r--drivers/gpu/drm/drm_fops.c7
-rw-r--r--drivers/gpu/drm/drm_fourcc.c62
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c857
-rw-r--r--drivers/gpu/drm/drm_gem.c8
-rw-r--r--drivers/gpu/drm/drm_global.c24
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_info.c8
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c18
-rw-r--r--drivers/gpu/drm/drm_irq.c61
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c3
-rw-r--r--drivers/gpu/drm/drm_lock.c4
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c63
-rw-r--r--drivers/gpu/drm/drm_mm.c142
-rw-r--r--drivers/gpu/drm/drm_mode_object.c438
-rw-r--r--drivers/gpu/drm/drm_modes.c30
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c153
-rw-r--r--drivers/gpu/drm/drm_pci.c12
-rw-r--r--drivers/gpu/drm/drm_plane.c906
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c207
-rw-r--r--drivers/gpu/drm/drm_platform.c4
-rw-r--r--drivers/gpu/drm/drm_prime.c133
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c8
-rw-r--r--drivers/gpu/drm/drm_property.c912
-rw-r--r--drivers/gpu/drm/drm_rect.c30
-rw-r--r--drivers/gpu/drm/drm_scatter.c6
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c113
-rw-r--r--drivers/gpu/drm/drm_sysfs.c8
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c83
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c105
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c233
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c261
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c150
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h9
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h9
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c54
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c242
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c112
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c68
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c5
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c5
-rw-r--r--drivers/gpu/drm/gma500/opregion.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c21
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c6
-rw-r--r--drivers/gpu/drm/i2c/Kconfig1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c297
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c381
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1531
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c402
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1244
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3453
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c255
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c70
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c196
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c915
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c488
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c881
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h244
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c947
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h689
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c78
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c112
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c80
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c871
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h3
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c382
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c217
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c101
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c84
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c296
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h173
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c47
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c362
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h65
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c239
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h35
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c45
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c63
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c152
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c142
-rw-r--r--drivers/gpu/drm/i915/intel_color.c24
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c48
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c432
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c140
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2057
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c759
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c139
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c99
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c469
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h188
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c27
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c321
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c91
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c36
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c128
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h91
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h24
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c157
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c211
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c134
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1596
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h57
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c214
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c61
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c235
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c72
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c906
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c26
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1578
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h326
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c69
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c222
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c39
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c334
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h6
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c160
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c23
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c54
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c16
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c151
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c120
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c28
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c42
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c23
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c46
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c25
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c4
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c14
-rw-r--r--drivers/gpu/drm/panel/Kconfig11
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c532
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c44
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c56
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c19
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c18
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c136
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h1
-rw-r--r--drivers/gpu/drm/radeon/si.c13
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c55
-rw-r--r--drivers/gpu/drm/radeon/sid.h2
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c15
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c74
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c79
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c275
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c507
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c243
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h193
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/savage/savage_state.c12
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig6
-rw-r--r--drivers/gpu/drm/sti/Makefile1
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c51
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h14
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c26
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c8
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c32
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c35
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c39
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c31
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c336
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c25
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c19
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c41
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c4
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c3
-rw-r--r--drivers/gpu/drm/sun4i/Makefile2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c23
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c56
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c67
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c98
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c118
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c26
-rw-r--r--drivers/gpu/drm/tegra/drm.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c464
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c276
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h18
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c129
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c7
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c3
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c20
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c31
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c96
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c206
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h19
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c17
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c37
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/gpu/ipu-v3/Makefile3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c169
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c13
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c26
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c18
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c42
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c1709
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h39
-rw-r--r--drivers/gpu/ipu-v3/ipu-vdi.c243
-rw-r--r--drivers/gpu/vga/vgaarb.c110
-rw-r--r--drivers/hid/Kconfig19
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-asus.c299
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-cp2112.c318
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h19
-rw-r--r--drivers/hid/hid-input.c96
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/hid-lg.c14
-rw-r--r--drivers/hid/hid-magicmouse.c12
-rw-r--r--drivers/hid/hid-mf.c166
-rw-r--r--drivers/hid/hid-microsoft.c6
-rw-r--r--drivers/hid/hid-multitouch.c88
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/hid-sensor-custom.c6
-rw-r--r--drivers/hid/hid-sensor-hub.c22
-rw-r--r--drivers/hid/hid-sony.c445
-rw-r--r--drivers/hid/hid-udraw-ps3.c474
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c169
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/intel-ish-hid/ipc/utils.h64
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c7
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c92
-rw-r--r--drivers/hid/wacom_wac.c601
-rw-r--r--drivers/hid/wacom_wac.h76
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig25
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-amd756.c5
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c8
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c4
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c213
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c43
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c3
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c4
-rw-r--r--drivers/i2c/busses/i2c-dln2.c4
-rw-r--r--drivers/i2c/busses/i2c-efm32.c1
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c4
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c21
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c46
-rw-r--r--drivers/i2c/busses/i2c-isch.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c5
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c4
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c (renamed from drivers/i2c/busses/i2c-octeon.c)1080
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h216
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c286
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c4
-rw-r--r--drivers/i2c/busses/i2c-puv3.c5
-rw-r--r--drivers/i2c/busses/i2c-pxa.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-riic.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c279
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c259
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c106
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c78
-rw-r--r--drivers/i2c/busses/i2c-wmt.c4
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c3
-rw-r--r--drivers/i2c/busses/i2c-xiic.c1
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c5
-rw-r--r--drivers/i2c/busses/i2c-xlr.c5
-rw-r--r--drivers/i2c/i2c-core.c295
-rw-r--r--drivers/i2c/i2c-mux.c73
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c22
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c11
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c50
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c56
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c8
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c18
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/core/addr.c13
-rw-r--r--drivers/infiniband/core/cm.c126
-rw-r--r--drivers/infiniband/core/cma.c77
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c121
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/core/verbs.c77
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c73
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h12
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c72
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c27
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c37
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c19
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h89
-rw-r--r--drivers/infiniband/hw/hfi1/init.c104
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c13
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c19
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h60
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c3
-rw-r--r--drivers/infiniband/hw/hns/Kconfig10
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c128
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c257
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c368
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h80
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h325
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c453
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h728
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c758
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c404
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h135
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2921
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h990
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c1168
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c617
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c138
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c838
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_user.h (renamed from drivers/infiniband/hw/cxgb3/iwch_user.h)51
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c7
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c73
-rw-r--r--drivers/infiniband/hw/mlx4/main.c147
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c25
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/user.h107
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c404
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h52
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c9
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c171
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mlx5/user.h281
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h114
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h149
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig8
-rw-r--r--drivers/infiniband/hw/qedr/Makefile3
-rw-r--r--drivers/infiniband/hw/qedr/main.c914
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h495
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c622
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h (renamed from drivers/infiniband/hw/cxgb4/user.h)73
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h748
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3547
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h101
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c59
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c40
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c27
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h28
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c57
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c22
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c56
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/input/mouse/alps.c87
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/mouse/focaltech.c6
-rw-r--r--drivers/input/rmi4/Kconfig11
-rw-r--r--drivers/input/rmi4/Makefile1
-rw-r--r--drivers/input/rmi4/rmi_bus.c4
-rw-r--r--drivers/input/rmi4/rmi_driver.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.h1
-rw-r--r--drivers/input/rmi4/rmi_f01.c1
-rw-r--r--drivers/input/rmi4/rmi_f11.c1
-rw-r--r--drivers/input/rmi4/rmi_f54.c757
-rw-r--r--drivers/input/rmi4/rmi_i2c.c38
-rw-r--r--drivers/input/rmi4/rmi_spi.c22
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-ip22io.h2
-rw-r--r--drivers/input/serio/i8042-ppcio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h103
-rw-r--r--drivers/input/serio/i8042.c55
-rw-r--r--drivers/input/touchscreen/Kconfig9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c521
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c38
-rw-r--r--drivers/input/touchscreen/sur40.c142
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c15
-rw-r--r--drivers/iommu/amd_iommu_init.c3
-rw-r--r--drivers/iommu/amd_iommu_proto.h6
-rw-r--r--drivers/iommu/arm-smmu-v3.c570
-rw-r--r--drivers/iommu/arm-smmu.c1007
-rw-r--r--drivers/iommu/dma-iommu.c161
-rw-r--r--drivers/iommu/exynos-iommu.c4
-rw-r--r--drivers/iommu/intel-iommu.c117
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c4
-rw-r--r--drivers/iommu/iommu.c58
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/of_iommu.c52
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c1
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c1
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v2m.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-i8259.c30
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/irqchip/irq-metag-ext.c1
-rw-r--r--drivers/irqchip/irq-vic.c1
-rw-r--r--drivers/lightnvm/Kconfig2
-rw-r--r--drivers/lightnvm/Makefile2
-rw-r--r--drivers/lightnvm/core.c55
-rw-r--r--drivers/lightnvm/lightnvm.h35
-rw-r--r--drivers/lightnvm/sysfs.c198
-rw-r--r--drivers/macintosh/macio_asic.c4
-rw-r--r--drivers/macintosh/rack-meter.c2
-rw-r--r--drivers/macintosh/smu.c18
-rw-r--r--drivers/macintosh/via-cuda.c2
-rw-r--r--drivers/macintosh/via-pmu.c6
-rw-r--r--drivers/mailbox/pcc.c13
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/movinggc.c5
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/dm-bufio.c31
-rw-r--r--drivers/md/dm-cache-metadata.c183
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-cache-policy-internal.h6
-rw-r--r--drivers/md/dm-cache-policy-smq.c45
-rw-r--r--drivers/md/dm-cache-policy.h10
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-log-writes.c6
-rw-r--r--drivers/md/dm-mpath.c59
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c120
-rw-r--r--drivers/md/dm-rq.h2
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c45
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/persistent-data/dm-array.c228
-rw-r--r--drivers/md/persistent-data/dm-array.h52
-rw-r--r--drivers/md/persistent-data/dm-btree.c162
-rw-r--r--drivers/md/persistent-data/dm-btree.h35
-rw-r--r--drivers/md/raid1.c34
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5-cache.c6
-rw-r--r--drivers/media/Kconfig7
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c14
-rw-r--r--drivers/media/dvb-core/demux.h44
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c28
-rw-r--r--drivers/media/dvb-core/dvb_math.h2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.h226
-rw-r--r--drivers/media/dvb-frontends/Kconfig7
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/af9013.h1
-rw-r--r--drivers/media/dvb-frontends/af9033.h2
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c2
-rw-r--r--drivers/media/dvb-frontends/ascot2e.h1
-rw-r--r--drivers/media/dvb-frontends/atbm8830.h1
-rw-r--r--drivers/media/dvb-frontends/au8522.h1
-rw-r--r--drivers/media/dvb-frontends/cx22702.h1
-rw-r--r--drivers/media/dvb-frontends/cx24113.h2
-rw-r--r--drivers/media/dvb-frontends/cx24116.h1
-rw-r--r--drivers/media/dvb-frontends/cx24117.h1
-rw-r--r--drivers/media/dvb-frontends/cx24120.h1
-rw-r--r--drivers/media/dvb-frontends/cx24123.h1
-rw-r--r--drivers/media/dvb-frontends/cxd2820r.h27
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c302
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c597
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h42
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c300
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c278
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c108
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.h1
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.h2
-rw-r--r--drivers/media/dvb-frontends/dib7000m.h2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.h2
-rw-r--r--drivers/media/dvb-frontends/drxd.h1
-rw-r--r--drivers/media/dvb-frontends/drxk.h1
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.h1
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/dvb_dummy_fe.h1
-rw-r--r--drivers/media/dvb-frontends/ec100.h1
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c (renamed from drivers/media/usb/dvb-usb/gp8psk-fe.c)156
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.h82
-rw-r--r--drivers/media/dvb-frontends/hd29l2.h1
-rw-r--r--drivers/media/dvb-frontends/helene.c12
-rw-r--r--drivers/media/dvb-frontends/helene.h1
-rw-r--r--drivers/media/dvb-frontends/horus3a.c2
-rw-r--r--drivers/media/dvb-frontends/horus3a.h1
-rw-r--r--drivers/media/dvb-frontends/ix2505v.c2
-rw-r--r--drivers/media/dvb-frontends/ix2505v.h1
-rw-r--r--drivers/media/dvb-frontends/lg2160.h1
-rw-r--r--drivers/media/dvb-frontends/lgdt3305.h1
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c18
-rw-r--r--drivers/media/dvb-frontends/lgs8gl5.h1
-rw-r--r--drivers/media/dvb-frontends/lgs8gxx.h1
-rw-r--r--drivers/media/dvb-frontends/lnbh24.h2
-rw-r--r--drivers/media/dvb-frontends/lnbh25.h1
-rw-r--r--drivers/media/dvb-frontends/lnbp21.h2
-rw-r--r--drivers/media/dvb-frontends/lnbp22.h2
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.h1
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c104
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.h1
-rw-r--r--drivers/media/dvb-frontends/s5h1409.h1
-rw-r--r--drivers/media/dvb-frontends/s5h1411.h1
-rw-r--r--drivers/media/dvb-frontends/s5h1432.h1
-rw-r--r--drivers/media/dvb-frontends/s921.h1
-rw-r--r--drivers/media/dvb-frontends/si2165.c228
-rw-r--r--drivers/media/dvb-frontends/si2165.h27
-rw-r--r--drivers/media/dvb-frontends/si2165_priv.h17
-rw-r--r--drivers/media/dvb-frontends/si21xx.h1
-rw-r--r--drivers/media/dvb-frontends/sp2.h1
-rw-r--r--drivers/media/dvb-frontends/stb6000.c2
-rw-r--r--drivers/media/dvb-frontends/stb6000.h1
-rw-r--r--drivers/media/dvb-frontends/stb6100.c2
-rw-r--r--drivers/media/dvb-frontends/stv0288.h1
-rw-r--r--drivers/media/dvb-frontends/stv0367.h1
-rw-r--r--drivers/media/dvb-frontends/stv0900.h1
-rw-r--r--drivers/media/dvb-frontends/stv6110.c2
-rw-r--r--drivers/media/dvb-frontends/stv6110.h1
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.h1
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.h2
-rw-r--r--drivers/media/dvb-frontends/tda665x.c2
-rw-r--r--drivers/media/dvb-frontends/tda8261.c2
-rw-r--r--drivers/media/dvb-frontends/tda826x.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.h1
-rw-r--r--drivers/media/dvb-frontends/tua6100.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.h1
-rw-r--r--drivers/media/dvb-frontends/zl10039.c2
-rw-r--r--drivers/media/dvb-frontends/zl10039.h2
-rw-r--r--drivers/media/i2c/Kconfig18
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad5820.c372
-rw-r--r--drivers/media/i2c/ad9389b.c23
-rw-r--r--drivers/media/i2c/adv7180.c122
-rw-r--r--drivers/media/i2c/adv7183.c1
-rw-r--r--drivers/media/i2c/adv7393.c1
-rw-r--r--drivers/media/i2c/adv7511.c1
-rw-r--r--drivers/media/i2c/ak881x.c28
-rw-r--r--drivers/media/i2c/cs3308.c1
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c90
-rw-r--r--drivers/media/i2c/mt9m111.c (renamed from drivers/media/i2c/soc_camera/mt9m111.c)116
-rw-r--r--drivers/media/i2c/ov9650.c7
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5k4ecgx.c1
-rw-r--r--drivers/media/i2c/s5k6a3.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c180
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c16
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c22
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h5
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig7
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c42
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c70
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c54
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c60
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c68
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c41
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c53
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c74
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c44
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c41
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c41
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c52
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c47
-rw-r--r--drivers/media/i2c/ths8200.c1
-rw-r--r--drivers/media/i2c/tlv320aic23b.c1
-rw-r--r--drivers/media/i2c/tvp514x.c3
-rw-r--r--drivers/media/i2c/tvp5150.c89
-rw-r--r--drivers/media/i2c/tvp7002.c1
-rw-r--r--drivers/media/i2c/vs6624.c1
-rw-r--r--drivers/media/media-device.c224
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c59
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h2
-rw-r--r--drivers/media/pci/cobalt/cobalt-alsa-pcm.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c47
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c7
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c3
-rw-r--r--drivers/media/pci/cx23885/altera-ci.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c29
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c135
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885.h5
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c14
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821.h1
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c8
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c18
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c6
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c5
-rw-r--r--drivers/media/pci/ngene/ngene-cards.c14
-rw-r--r--drivers/media/pci/pt3/pt3.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c41
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c8
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/tw5864/Kconfig12
-rw-r--r--drivers/media/pci/tw5864/Makefile3
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c359
-rw-r--r--drivers/media/pci/tw5864/tw5864-h264.c259
-rw-r--r--drivers/media/pci/tw5864/tw5864-reg.h2133
-rw-r--r--drivers/media/pci/tw5864/tw5864-util.c37
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c1510
-rw-r--r--drivers/media/pci/tw5864/tw5864.h205
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c182
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c113
-rw-r--r--drivers/media/platform/Kconfig28
-rw-r--r--drivers/media/platform/Makefile5
-rw-r--r--drivers/media/platform/atmel/Kconfig9
-rw-r--r--drivers/media/platform/atmel/Makefile1
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h165
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c1520
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c65
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c52
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c9
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c27
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c29
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c17
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c6
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c1
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c71
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c55
-rw-r--r--drivers/media/platform/omap3isp/isp.c6
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c88
-rw-r--r--drivers/media/platform/pxa_camera.c (renamed from drivers/media/platform/soc_camera/pxa_camera.c)1909
-rw-r--r--drivers/media/platform/rcar-fcp.c9
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig2
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c263
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c59
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c299
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h27
-rw-r--r--drivers/media/platform/rcar_jpu.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c43
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c86
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c11
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig88
-rw-r--r--drivers/media/platform/s5p-tv/Makefile19
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c1059
-rw-r--r--drivers/media/platform/s5p-tv/hdmiphy_drv.c324
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h364
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c527
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c270
-rw-r--r--drivers/media/platform/s5p-tv/mixer_reg.c551
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c1130
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c242
-rw-r--r--drivers/media/platform/s5p-tv/regs-hdmi.h146
-rw-r--r--drivers/media/platform/s5p-tv/regs-mixer.h122
-rw-r--r--drivers/media/platform/s5p-tv/regs-sdo.h63
-rw-r--r--drivers/media/platform/s5p-tv/regs-vp.h88
-rw-r--r--drivers/media/platform/s5p-tv/sdo_drv.c497
-rw-r--r--drivers/media/platform/s5p-tv/sii9234_drv.c407
-rw-r--r--drivers/media/platform/sh_vou.c17
-rw-r--r--drivers/media/platform/soc_camera/Kconfig25
-rw-r--r--drivers/media/platform/soc_camera/Makefile3
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c2
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c1970
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c269
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_csi2.c400
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c130
-rw-r--r--drivers/media/platform/soc_camera/soc_camera_platform.c45
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c97
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.h6
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/hva/Makefile2
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c1050
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c538
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.h42
-rw-r--r--drivers/media/platform/sti/hva/hva-mem.c59
-rw-r--r--drivers/media/platform/sti/hva/hva-mem.h34
-rw-r--r--drivers/media/platform/sti/hva/hva-v4l2.c1415
-rw-r--r--drivers/media/platform/sti/hva/hva.h315
-rw-r--r--drivers/media/platform/ti-vpe/cal.c2
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vim2m.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c63
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c3
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c36
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c62
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c126
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c26
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c45
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c22
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h25
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c20
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c20
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c42
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c13
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h11
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c109
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c83
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h13
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c50
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c71
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c192
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c132
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c1
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c2
-rw-r--r--drivers/media/rc/igorplugusb.c3
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c6
-rw-r--r--drivers/media/rc/ir-nec-decoder.c8
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c4
-rw-r--r--drivers/media/rc/meson-ir.c29
-rw-r--r--drivers/media/rc/nuvoton-cir.c40
-rw-r--r--drivers/media/rc/rc-ir-raw.c9
-rw-r--r--drivers/media/rc/rc-main.c13
-rw-r--r--drivers/media/rc/redrat3.c67
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/spi/Kconfig14
-rw-r--r--drivers/media/spi/Makefile1
-rw-r--r--drivers/media/spi/gs1662.c478
-rw-r--r--drivers/media/tuners/fc0011.h1
-rw-r--r--drivers/media/tuners/fc0012.h1
-rw-r--r--drivers/media/tuners/fc0013.h1
-rw-r--r--drivers/media/tuners/max2165.h2
-rw-r--r--drivers/media/tuners/mc44s803.h2
-rw-r--r--drivers/media/tuners/mt2063.c2
-rw-r--r--drivers/media/tuners/mt20xx.c4
-rw-r--r--drivers/media/tuners/mxl5005s.h2
-rw-r--r--drivers/media/tuners/mxl5007t.c2
-rw-r--r--drivers/media/tuners/r820t.h1
-rw-r--r--drivers/media/tuners/si2157.h1
-rw-r--r--drivers/media/tuners/tda18212.h1
-rw-r--r--drivers/media/tuners/tda18218.h1
-rw-r--r--drivers/media/tuners/tda18271-fe.c11
-rw-r--r--drivers/media/tuners/tda18271-priv.h2
-rw-r--r--drivers/media/tuners/tda827x.c4
-rw-r--r--drivers/media/tuners/tea5761.c2
-rw-r--r--drivers/media/tuners/tea5767.c11
-rw-r--r--drivers/media/tuners/tuner-simple.c2
-rw-r--r--drivers/media/tuners/xc5000.h1
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c3
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c105
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h4
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c34
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c67
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c82
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c9
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c13
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c9
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig21
-rw-r--r--drivers/media/usb/dvb-usb/Makefile13
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c304
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c77
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-fe.c100
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c27
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c40
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c25
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c272
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c168
-rw-r--r--drivers/media/usb/dvb-usb/dibusb.h3
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c26
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h5
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u-fe.c128
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c109
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h9
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c2
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c132
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.h63
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c25
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c136
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c16
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/go7007/go7007-i2c.c2
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c2
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/finepix.c8
-rw-r--r--drivers/media/usb/gspca/jl2005bcd.c8
-rw-r--r--drivers/media/usb/gspca/sonixj.c13
-rw-r--r--drivers/media/usb/gspca/vicam.c8
-rw-r--r--drivers/media/usb/hackrf/hackrf.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c1
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c23
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c81
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c17
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c16
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c30
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c14
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c44
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c128
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c278
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c9
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c19
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c142
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c13
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/memory/atmel-sdramc.c4
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c6
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/misc/cxl/api.c11
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h30
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/misc/cxl/of.c8
-rw-r--r--drivers/misc/cxl/pci.c9
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/eeprom/at24.c15
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/mmc_test.c8
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c15
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c4
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c78
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/mtdcore.c105
-rw-r--r--drivers/mtd/mtdpart.c20
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c15
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.h13
-rw-r--r--drivers/mtd/nand/brcmnand/iproc_nand.c18
-rw-r--r--drivers/mtd/nand/docg4.c3
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c3
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c3
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c3
-rw-r--r--drivers/mtd/nand/jz4780_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c19
-rw-r--r--drivers/mtd/nand/mxc_nand.c137
-rw-r--r--drivers/mtd/nand/nand_base.c308
-rw-r--r--drivers/mtd/nand/nand_bbt.c161
-rw-r--r--drivers/mtd/nand/nand_timings.c470
-rw-r--r--drivers/mtd/nand/ndfc.c3
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c3
-rw-r--r--drivers/mtd/nand/qcom_nandc.c3
-rw-r--r--drivers/mtd/nand/s3c2410.c7
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sunxi_nand.c108
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/ubi/attach.c304
-rw-r--r--drivers/mtd/ubi/block.c1
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c6
-rw-r--r--drivers/mtd/ubi/eba.c650
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c213
-rw-r--r--drivers/mtd/ubi/io.c39
-rw-r--r--drivers/mtd/ubi/kapi.c16
-rw-r--r--drivers/mtd/ubi/ubi.h132
-rw-r--r--drivers/mtd/ubi/vmt.c40
-rw-r--r--drivers/mtd/ubi/vtbl.c17
-rw-r--r--drivers/mtd/ubi/wl.c60
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/can/sja1000/plx_pci.c18
-rw-r--r--drivers/net/dsa/b53/b53_common.c16
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/arc/emac_main.c7
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c65
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c153
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c118
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c15
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile10
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c78
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c55
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c83
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c55
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c25
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c10
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig12
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c218
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c124
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c17
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c324
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h72
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c1
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c11
-rw-r--r--drivers/net/ethernet/sun/sunqe.h4
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c17
-rw-r--r--drivers/net/ethernet/ti/cpsw.c95
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c10
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c15
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c96
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/macvlan.c31
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/mscc.c128
-rw-r--r--drivers/net/phy/phy.c22
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c30
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c35
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c84
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/xen-netback/Makefile2
-rw-r--r--drivers/net/xen-netback/common.h29
-rw-r--r--drivers/net/xen-netback/hash.c68
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c754
-rw-r--r--drivers/net/xen-netback/rx.c631
-rw-r--r--drivers/net/xen-netback/xenbus.c58
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nfc/mei_phy.c2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c15
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c73
-rw-r--r--drivers/nvdimm/dimm.c11
-rw-r--r--drivers/nvdimm/dimm_devs.c226
-rw-r--r--drivers/nvdimm/label.c192
-rw-r--r--drivers/nvdimm/namespace_devs.c794
-rw-r--r--drivers/nvdimm/nd-core.h24
-rw-r--r--drivers/nvdimm/nd.h29
-rw-r--r--drivers/nvdimm/pmem.c36
-rw-r--r--drivers/nvdimm/region_devs.c75
-rw-r--r--drivers/nvme/host/core.c170
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fabrics.h11
-rw-r--r--drivers/nvme/host/lightnvm.c33
-rw-r--r--drivers/nvme/host/nvme.h31
-rw-r--r--drivers/nvme/host/pci.c207
-rw-r--r--drivers/nvme/host/rdma.c69
-rw-r--r--drivers/nvme/host/scsi.c84
-rw-r--r--drivers/nvme/target/admin-cmd.c96
-rw-r--r--drivers/nvme/target/core.c12
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/nvme/target/io-cmd.c3
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/rdma.c20
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/meson-efuse.c93
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/irq.c78
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/of/of_pci.c102
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/oprofile/oprofilefs.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c39
-rw-r--r--drivers/pci/host/pci-dra7xx.c103
-rw-r--r--drivers/pci/host/pci-exynos.c220
-rw-r--r--drivers/pci/host/pci-imx6.c250
-rw-r--r--drivers/pci/host/pci-keystone-dw.c123
-rw-r--r--drivers/pci/host/pci-keystone.c28
-rw-r--r--drivers/pci/host/pci-keystone.h9
-rw-r--r--drivers/pci/host/pci-layerscape.c67
-rw-r--r--drivers/pci/host/pci-mvebu.c21
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c46
-rw-r--r--drivers/pci/host/pci-tegra.c237
-rw-r--r--drivers/pci/host/pci-xgene.c151
-rw-r--r--drivers/pci/host/pcie-altera.c77
-rw-r--r--drivers/pci/host/pcie-armada8k.c78
-rw-r--r--drivers/pci/host/pcie-artpec6.c115
-rw-r--r--drivers/pci/host/pcie-designware-plat.c27
-rw-r--r--drivers/pci/host/pcie-designware.c109
-rw-r--r--drivers/pci/host/pcie-designware.h7
-rw-r--r--drivers/pci/host/pcie-hisi.c86
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c14
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c27
-rw-r--r--drivers/pci/host/pcie-iproc.c52
-rw-r--r--drivers/pci/host/pcie-qcom.c32
-rw-r--r--drivers/pci/host/pcie-rcar.c98
-rw-r--r--drivers/pci/host/pcie-rockchip.c66
-rw-r--r--drivers/pci/host/pcie-spear13xx.c108
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c109
-rw-r--r--drivers/pci/host/pcie-xilinx.c62
-rw-r--r--drivers/pci/hotplug/pnv_php.c283
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/quirks.c11
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pcmcia/sa1100_assabet.c9
-rw-r--r--drivers/pcmcia/sa1100_cerf.c9
-rw-r--r--drivers/pcmcia/soc_common.c238
-rw-r--r--drivers/pcmcia/soc_common.h29
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/xgene_pmu.c1398
-rw-r--r--drivers/phy/phy-da8xx-usb.c5
-rw-r--r--drivers/phy/phy-rockchip-pcie.c13
-rw-r--r--drivers/phy/phy-sun4i-usb.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c102
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c17
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c23
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c77
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c58
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.h5
-rw-r--r--drivers/platform/x86/dell-smo8800.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c110
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba-wmi.c26
-rw-r--r--drivers/platform/x86/toshiba_acpi.c257
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/toshiba_haps.c14
-rw-r--r--drivers/pps/Kconfig2
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-berlin.c84
-rw-r--r--drivers/pwm/pwm-cros-ec.c4
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c12
-rw-r--r--drivers/pwm/pwm-meson.c529
-rw-r--r--drivers/pwm/pwm-mtk-disp.c87
-rw-r--r--drivers/pwm/pwm-samsung.c15
-rw-r--r--drivers/pwm/pwm-sti.c483
-rw-r--r--drivers/pwm/pwm-sun4i.c9
-rw-r--r--drivers/pwm/pwm-tipwmss.c19
-rw-r--r--drivers/pwm/pwm-twl.c16
-rw-r--r--drivers/pwm/sysfs.c18
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/rapidio/rio_cm.c15
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/max8973-regulator.c3
-rw-r--r--drivers/reset/Kconfig65
-rw-r--r--drivers/reset/Makefile20
-rw-r--r--drivers/reset/core.c12
-rw-r--r--drivers/reset/hisilicon/Kconfig3
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-socfpga.c19
-rw-r--r--drivers/reset/reset-stm32.c108
-rw-r--r--drivers/reset/reset-uniphier.c440
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ac100.c5
-rw-r--r--drivers/rtc/rtc-asm9260.c21
-rw-r--r--drivers/rtc/rtc-at32ap700x.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c16
-rw-r--r--drivers/rtc/rtc-cmos.c104
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c54
-rw-r--r--drivers/rtc/rtc-ds1347.c96
-rw-r--r--drivers/rtc/rtc-gemini.c2
-rw-r--r--drivers/rtc/rtc-isl12057.c643
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-mcp795.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c208
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf50633.c2
-rw-r--r--drivers/rtc/rtc-pic32.c1
-rw-r--r--drivers/rtc/rtc-rv8803.c50
-rw-r--r--drivers/rtc/rtc-rx6110.c3
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sysfs.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/be2iscsi/be_main.c49
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/g_NCR5380.c699
-rw-r--r--drivers/scsi/g_NCR5380.h8
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/scsi/vmw_pvscsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile12
-rw-r--r--drivers/soc/fsl/qbman/bman.c797
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c263
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c219
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h80
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h103
-rw-r--r--drivers/soc/fsl/qbman/qman.c2881
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c808
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c355
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h371
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h36
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c252
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c617
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c10
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/qcom/smd.c265
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/rockchip/pm_domains.c100
-rw-r--r--drivers/soc/tegra/pmc.c28
-rw-r--r--drivers/spi/spi-fsl-dspi.c7
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi.c23
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_of.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/es2.c3
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/module.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c17
-rw-r--r--drivers/staging/lustre/lnet/Kconfig1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h6
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c96
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h15
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c34
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h14
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c45
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c44
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/cec/Kconfig3
-rw-r--r--drivers/staging/media/cec/cec-adap.c4
-rw-r--r--drivers/staging/media/cec/cec-core.c3
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c10
-rw-r--r--drivers/staging/media/omap4iss/iss.c8
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c99
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c402
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c19
-rw-r--r--drivers/staging/media/st-cec/Kconfig8
-rw-r--r--drivers/staging/media/st-cec/Makefile1
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c380
-rw-r--r--drivers/staging/media/tw686x-kh/Kconfig17
-rw-r--r--drivers/staging/media/tw686x-kh/Makefile3
-rw-r--r--drivers/staging/media/tw686x-kh/TODO6
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-core.c140
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-regs.h103
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-video.c813
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh.h117
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/thermal/Kconfig37
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thermal/hisi_thermal.c3
-rw-r--r--drivers/thermal/imx_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c3
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c9
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.c60
-rw-r--r--drivers/thermal/int340x_thermal/int340x_thermal_zone.h6
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c3
-rw-r--r--drivers/thermal/intel_bxt_pmic_thermal.c300
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c9
-rw-r--r--drivers/thermal/intel_soc_dts_iosf.c3
-rw-r--r--drivers/thermal/max77620_thermal.c166
-rw-r--r--drivers/thermal/mtk_thermal.c226
-rw-r--r--drivers/thermal/of-thermal.c46
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c2
-rw-r--r--drivers/thermal/qcom/Kconfig11
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8916.c113
-rw-r--r--drivers/thermal/qcom/tsens-8960.c292
-rw-r--r--drivers/thermal/qcom/tsens-8974.c244
-rw-r--r--drivers/thermal/qcom/tsens-8996.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c141
-rw-r--r--drivers/thermal/qcom/tsens.c200
-rw-r--r--drivers/thermal/qcom/tsens.h94
-rw-r--r--drivers/thermal/qoriq_thermal.c328
-rw-r--r--drivers/thermal/rcar_thermal.c26
-rw-r--r--drivers/thermal/rockchip_thermal.c107
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c3
-rw-r--r--drivers/thermal/tango_thermal.c19
-rw-r--r--drivers/thermal/tegra/soctherm.c842
-rw-r--r--drivers/thermal/tegra/soctherm.h10
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c18
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c18
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c18
-rw-r--r--drivers/thermal/thermal_core.c106
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c29
-rw-r--r--drivers/thermal/user_space.c15
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c3
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vt.c13
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/core/devio.c18
-rw-r--r--drivers/usb/dwc2/core.c11
-rw-r--r--drivers/usb/dwc2/core.h7
-rw-r--r--drivers/usb/dwc2/gadget.c53
-rw-r--r--drivers/usb/dwc2/platform.c34
-rw-r--r--drivers/usb/dwc3/core.c5
-rw-r--r--drivers/usb/dwc3/dwc3-st.c1
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/early/ehci-dbgp.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c115
-rw-r--r--drivers/usb/gadget/function/u_ether.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-sead3.c185
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/da8xx.c3
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c61
-rw-r--r--drivers/uwb/lc-rc.c16
-rw-r--r--drivers/uwb/pal.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c33
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/video/fbdev/Kconfig9
-rw-r--r--drivers/video/fbdev/Makefile3
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.c259
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.h24
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c395
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.h17
-rw-r--r--drivers/video/fbdev/amba-clcd.c190
-rw-r--r--drivers/video/fbdev/arcfb.c4
-rw-r--r--drivers/video/fbdev/asiliantfb.c4
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c1
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c5
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c42
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/exynos/Kconfig32
-rw-r--r--drivers/video/fbdev/exynos/Makefile9
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c574
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c880
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h46
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c618
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h112
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h149
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c887
-rw-r--r--drivers/video/fbdev/hecubafb.c4
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/i740fb.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c9
-rw-r--r--drivers/video/fbdev/mx3fb.c2
-rw-r--r--drivers/video/fbdev/mxsfb.c9
-rw-r--r--drivers/video/fbdev/offb.c15
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c8
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxafb.c3
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c4
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/s3c2410fb.h2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c2
-rw-r--r--drivers/video/fbdev/simplefb.c13
-rw-r--r--drivers/video/fbdev/sm712fb.c2
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c9
-rw-r--r--drivers/video/fbdev/tdfxfb.c4
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/vfb.c129
-rw-r--r--drivers/video/fbdev/vga16fb.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/virtio/config.c12
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c16
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/watchdog/Kconfig49
-rw-r--r--drivers/watchdog/Makefile8
-rw-r--r--drivers/watchdog/asm9260_wdt.c1
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm7038_wdt.c2
-rw-r--r--drivers/watchdog/cadence_wdt.c20
-rw-r--r--drivers/watchdog/dw_wdt.c11
-rw-r--r--drivers/watchdog/hpwdt.c8
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c60
-rw-r--r--drivers/watchdog/kempld_wdt.c2
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c25
-rw-r--r--drivers/watchdog/pretimeout_noop.c47
-rw-r--r--drivers/watchdog/pretimeout_panic.c47
-rw-r--r--drivers/watchdog/rn5t618_wdt.c2
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/softdog.c24
-rw-r--r--drivers/watchdog/st_lpc_wdt.c33
-rw-r--r--drivers/watchdog/tegra_wdt.c2
-rw-r--r--drivers/watchdog/txx9wdt.c6
-rw-r--r--drivers/watchdog/w83627hf_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c101
-rw-r--r--drivers/watchdog/watchdog_pretimeout.c220
-rw-r--r--drivers/watchdog/watchdog_pretimeout.h60
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/watchdog/ziirave_wdt.c409
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
2420 files changed, 164605 insertions, 87687 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
obj-y += idle/
# IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
+obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d58fbf7f04e6..7dd70927991e 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -122,7 +122,7 @@ static int acpi_apd_create_device(struct acpi_device *adev,
int ret;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
@@ -139,14 +139,8 @@ static int acpi_apd_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev))
return 1;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 552010288135..373657f7e35a 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -395,7 +395,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
dev_desc = (const struct lpss_device_desc *)id->driver_data;
if (!dev_desc) {
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, NULL);
return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -451,14 +451,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
goto err_out;
}
- if (dev_desc->properties) {
- ret = device_add_properties(&adev->dev, dev_desc->properties);
- if (ret)
- goto err_out;
- }
-
adev->driver_data = pdata;
- pdev = acpi_create_platform_device(adev);
+ pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
return 1;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..eb76a4c10dbf 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <asm/mwait.h>
+#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = {
static int __init acpi_pad_init(void)
{
+ /* Xen ACPI PAD is used when running as Xen Dom0. */
+ if (xen_initial_domain())
+ return -ENODEV;
+
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b200ae1f3c6f..b4c1a6a51da4 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -50,6 +50,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
/**
* acpi_create_platform_device - Create platform device for ACPI device node
* @adev: ACPI device node to create a platform device for.
+ * @properties: Optional collection of build-in properties.
*
* Check if the given @adev can be represented as a platform device and, if
* that's the case, create and register a platform device, populate its common
@@ -57,7 +58,8 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
*
* Name of the platform device will be the same as @adev's.
*/
-struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
+ struct property_entry *properties)
{
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
@@ -106,6 +108,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
+ pdevinfo.properties = properties;
if (acpi_dma_supported(adev))
pdevinfo.dma_mask = DMA_BIT_MASK(32);
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
#include "acdispat.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/*
* We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock.
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
- NULL, &info, NULL);
+ 0, acpi_ds_init_one_object, NULL, &info,
+ NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node));
- acpi_ex_enter_interpreter();
-
/* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
acpi_ps_free_op(op);
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op);
-unlock:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state);
/*
- * If method is serialized, release the mutex and restore the
- * current sync level for this thread
- */
- if (method_desc->method.mutex) {
-
- /* Acquisition Depth handles recursive calls */
-
- method_desc->method.mutex->mutex.acquisition_depth--;
- if (!method_desc->method.mutex->mutex.acquisition_depth) {
- walk_state->thread->current_sync_level =
- method_desc->method.mutex->mutex.
- original_sync_level;
-
- acpi_os_release_mutex(method_desc->method.
- mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = 0;
- }
- }
-
- /*
* Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless:
* 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = 0;
+ }
+ }
}
/* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
}
- acpi_ex_exit_interpreter();
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
}
+ acpi_ex_exit_interpreter();
status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT);
+ acpi_ex_enter_interpreter();
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n"));
+ acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node);
+ acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 046c4d0394ee..5fb838e592dc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
u32 i;
/*
- * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which
+ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
* should be zero are indeed zero. This will workaround BIOSs that
* inadvertently place values in these fields.
*
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located
* at offset 45, 55, 95, and the word located at offset 109, 110.
*
- * Note: The FADT revision value is unreliable because of BIOS errors.
- * The table length is instead used as the final word on the version.
- *
- * Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
+ * Note: The FADT revision value is unreliable. Only the length can be
+ * trusted.
*/
- if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) {
+ if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
acpi_gbl_FADT.preferred_profile = 0;
acpi_gbl_FADT.pstate_control = 0;
acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..86364097e236 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -34,11 +34,11 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev);
+ acpi_create_platform_device(adev, NULL);
return 1;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 680531062160..48e19d013170 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
acpi_ec_clear(ec);
}
+#ifdef CONFIG_PM_SLEEP
static bool acpi_ec_query_flushed(struct acpi_ec *ec)
{
bool flushed;
@@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 384cfc3083e1..6cf4988206f2 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
control = obj->package.elements[1].integer.value;
for (i = 0; i < fan->fps_count; i++) {
- if (control == fan->fps[i].control)
+ /*
+ * When Fine Grain Control is set, return the state
+ * corresponding to maximum fan->fps[i].control
+ * value compared to the current speed. Here the
+ * fan->fps[] is sorted array with increasing speed.
+ */
+ if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
+ i = (i > 0) ? i - 1 : 0;
break;
+ } else if (control == fan->fps[i].control) {
+ break;
+ }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index e1d5ea6d5e40..71a7d07c28c9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -886,6 +886,58 @@ static ssize_t revision_show(struct device *dev,
}
static DEVICE_ATTR_RO(revision);
+static ssize_t hw_error_scrub_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
+}
+
+/*
+ * The 'hw_error_scrub' attribute can have the following values written to it:
+ * '0': Switch to the default mode where an exception will only insert
+ * the address of the memory error into the poison and badblocks lists.
+ * '1': Enable a full scrub to happen if an exception for a memory error is
+ * received.
+ */
+static ssize_t hw_error_scrub_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
+ ssize_t rc;
+ long val;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ device_lock(dev);
+ nd_desc = dev_get_drvdata(dev);
+ if (nd_desc) {
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ switch (val) {
+ case HW_ERROR_SCRUB_ON:
+ acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
+ break;
+ case HW_ERROR_SCRUB_OFF:
+ acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ }
+ device_unlock(dev);
+ if (rc)
+ return rc;
+ return size;
+}
+static DEVICE_ATTR_RW(hw_error_scrub);
+
/*
* This shows the number of full Address Range Scrubs that have been
* completed since driver load time. Userspace can wait on this using
@@ -958,6 +1010,7 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_revision.attr,
&dev_attr_scrub.attr,
+ &dev_attr_hw_error_scrub.attr,
NULL,
};
@@ -1256,6 +1309,44 @@ static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
return NULL;
}
+void __acpi_nvdimm_notify(struct device *dev, u32 event)
+{
+ struct nfit_mem *nfit_mem;
+ struct acpi_nfit_desc *acpi_desc;
+
+ dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
+ event);
+
+ if (event != NFIT_NOTIFY_DIMM_HEALTH) {
+ dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
+ event);
+ return;
+ }
+
+ acpi_desc = dev_get_drvdata(dev->parent);
+ if (!acpi_desc)
+ return;
+
+ /*
+ * If we successfully retrieved acpi_desc, then we know nfit_mem data
+ * is still valid.
+ */
+ nfit_mem = dev_get_drvdata(dev);
+ if (nfit_mem && nfit_mem->flags_attr)
+ sysfs_notify_dirent(nfit_mem->flags_attr);
+}
+EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
+
+static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_device *adev = data;
+ struct device *dev = &adev->dev;
+
+ device_lock(dev->parent);
+ __acpi_nvdimm_notify(dev, event);
+ device_unlock(dev->parent);
+}
+
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
@@ -1280,6 +1371,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return force_enable_dimms ? 0 : -ENODEV;
}
+ if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
+ ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
+ dev_err(dev, "%s: notification registration failed\n",
+ dev_name(&adev_dimm->dev));
+ return -ENXIO;
+ }
+
/*
* Until standardization materializes we need to consider 4
* different command sets. Note, that checking for function0 (bit0)
@@ -1318,18 +1416,41 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return 0;
}
+static void shutdown_dimm_notify(void *data)
+{
+ struct acpi_nfit_desc *acpi_desc = data;
+ struct nfit_mem *nfit_mem;
+
+ mutex_lock(&acpi_desc->init_mutex);
+ /*
+ * Clear out the nfit_mem->flags_attr and shut down dimm event
+ * notifications.
+ */
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+ struct acpi_device *adev_dimm = nfit_mem->adev;
+
+ if (nfit_mem->flags_attr) {
+ sysfs_put(nfit_mem->flags_attr);
+ nfit_mem->flags_attr = NULL;
+ }
+ if (adev_dimm)
+ acpi_remove_notify_handler(adev_dimm->handle,
+ ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
+}
+
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_mem *nfit_mem;
- int dimm_count = 0;
+ int dimm_count = 0, rc;
+ struct nvdimm *nvdimm;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
struct acpi_nfit_flush_address *flush;
unsigned long flags = 0, cmd_mask;
- struct nvdimm *nvdimm;
u32 device_handle;
u16 mem_flags;
- int rc;
device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
@@ -1382,7 +1503,30 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
}
- return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+ rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+ if (rc)
+ return rc;
+
+ /*
+ * Now that dimms are successfully registered, and async registration
+ * is flushed, attempt to enable event notification.
+ */
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+ struct kernfs_node *nfit_kernfs;
+
+ nvdimm = nfit_mem->nvdimm;
+ nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
+ if (nfit_kernfs)
+ nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
+ "flags");
+ sysfs_put(nfit_kernfs);
+ if (!nfit_mem->flags_attr)
+ dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
+ nvdimm_name(nvdimm));
+ }
+
+ return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
+ acpi_desc);
}
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@ -1491,9 +1635,9 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
if (!info)
return -ENOMEM;
for (i = 0; i < nr; i++) {
- struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
+ struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nfit_set_info_map *map = &info->mapping[i];
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
spa->range_index, i);
@@ -1917,7 +2061,7 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
}
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
- struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
+ struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
struct acpi_nfit_memory_map *memdev,
struct nfit_spa *nfit_spa)
{
@@ -1934,12 +2078,12 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
return -ENODEV;
}
- nd_mapping->nvdimm = nvdimm;
+ mapping->nvdimm = nvdimm;
switch (nfit_spa_type(spa)) {
case NFIT_SPA_PM:
case NFIT_SPA_VOLATILE:
- nd_mapping->start = memdev->address;
- nd_mapping->size = memdev->region_size;
+ mapping->start = memdev->address;
+ mapping->size = memdev->region_size;
break;
case NFIT_SPA_DCR:
nfit_mem = nvdimm_provider_data(nvdimm);
@@ -1947,13 +2091,13 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
spa->range_index, nvdimm_name(nvdimm));
} else {
- nd_mapping->size = nfit_mem->bdw->capacity;
- nd_mapping->start = nfit_mem->bdw->start_address;
+ mapping->size = nfit_mem->bdw->capacity;
+ mapping->start = nfit_mem->bdw->start_address;
ndr_desc->num_lanes = nfit_mem->bdw->windows;
blk_valid = 1;
}
- ndr_desc->nd_mapping = nd_mapping;
+ ndr_desc->mapping = mapping;
ndr_desc->num_mappings = blk_valid;
ndbr_desc = to_blk_region_desc(ndr_desc);
ndbr_desc->enable = acpi_nfit_blk_region_enable;
@@ -1979,7 +2123,7 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa)
{
- static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
+ static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
struct acpi_nfit_system_address *spa = nfit_spa->spa;
struct nd_blk_region_desc ndbr_desc;
struct nd_region_desc *ndr_desc;
@@ -1998,7 +2142,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
}
memset(&res, 0, sizeof(res));
- memset(&nd_mappings, 0, sizeof(nd_mappings));
+ memset(&mappings, 0, sizeof(mappings));
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
res.start = spa->address;
res.end = res.start + spa->length - 1;
@@ -2014,7 +2158,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
- struct nd_mapping *nd_mapping;
+ struct nd_mapping_desc *mapping;
if (memdev->range_index != spa->range_index)
continue;
@@ -2023,14 +2167,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
spa->range_index, ND_MAX_MAPPINGS);
return -ENXIO;
}
- nd_mapping = &nd_mappings[count++];
- rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
+ mapping = &mappings[count++];
+ rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
memdev, nfit_spa);
if (rc)
goto out;
}
- ndr_desc->nd_mapping = nd_mappings;
+ ndr_desc->mapping = mappings;
ndr_desc->num_mappings = count;
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
if (rc)
@@ -2678,29 +2822,30 @@ static int acpi_nfit_remove(struct acpi_device *adev)
return 0;
}
-static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
{
- struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+ struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- struct device *dev = &adev->dev;
union acpi_object *obj;
acpi_status status;
int ret;
dev_dbg(dev, "%s: event: %d\n", __func__, event);
- device_lock(dev);
+ if (event != NFIT_NOTIFY_UPDATE)
+ return;
+
if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
- goto out_unlock;
+ return;
}
if (!acpi_desc) {
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
if (!acpi_desc)
- goto out_unlock;
- acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ return;
+ acpi_nfit_desc_init(acpi_desc, dev);
} else {
/*
* Finish previous registration before considering new
@@ -2710,10 +2855,10 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}
/* Evaluate _FIT */
- status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
+ status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to evaluate _FIT\n");
- goto out_unlock;
+ return;
}
obj = buf.pointer;
@@ -2725,9 +2870,14 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
} else
dev_err(dev, "Invalid _FIT\n");
kfree(buf.pointer);
+}
+EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
- out_unlock:
- device_unlock(dev);
+static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+{
+ device_lock(&adev->dev);
+ __acpi_nfit_notify(&adev->dev, adev->handle, event);
+ device_unlock(&adev->dev);
}
static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 161f91539ae6..e5ce81c38eed 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -14,6 +14,7 @@
*/
#include <linux/notifier.h>
#include <linux/acpi.h>
+#include <linux/nd.h>
#include <asm/mce.h>
#include "nfit.h"
@@ -62,12 +63,25 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
}
mutex_unlock(&acpi_desc->init_mutex);
- /*
- * We can ignore an -EBUSY here because if an ARS is already
- * in progress, just let that be the last authoritative one
- */
- if (found_match)
+ if (!found_match)
+ continue;
+
+ /* If this fails due to an -ENOMEM, there is little we can do */
+ nvdimm_bus_add_poison(acpi_desc->nvdimm_bus,
+ ALIGN(mce->addr, L1_CACHE_BYTES),
+ L1_CACHE_BYTES);
+ nvdimm_region_notify(nfit_spa->nd_region,
+ NVDIMM_REVALIDATE_POISON);
+
+ if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) {
+ /*
+ * We can ignore an -EBUSY here because if an ARS is
+ * already in progress, just let that be the last
+ * authoritative one
+ */
acpi_nfit_ars_rescan(acpi_desc);
+ }
+ break;
}
mutex_unlock(&acpi_desc_lock);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index e894ded24d99..14296f5267c8 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -78,6 +78,14 @@ enum {
NFIT_ARS_TIMEOUT = 90,
};
+enum nfit_root_notifiers {
+ NFIT_NOTIFY_UPDATE = 0x80,
+};
+
+enum nfit_dimm_notifiers {
+ NFIT_NOTIFY_DIMM_HEALTH = 0x81,
+};
+
struct nfit_spa {
struct list_head list;
struct nd_region *nd_region;
@@ -124,6 +132,7 @@ struct nfit_mem {
struct acpi_nfit_system_address *spa_bdw;
struct acpi_nfit_interleave *idt_dcr;
struct acpi_nfit_interleave *idt_bdw;
+ struct kernfs_node *flags_attr;
struct nfit_flush *nfit_flush;
struct list_head list;
struct acpi_device *adev;
@@ -152,6 +161,7 @@ struct acpi_nfit_desc {
struct list_head list;
struct kernfs_node *scrub_count_state;
unsigned int scrub_count;
+ unsigned int scrub_mode;
unsigned int cancel:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
@@ -159,6 +169,11 @@ struct acpi_nfit_desc {
void *iobuf, u64 len, int rw);
};
+enum scrub_mode {
+ HW_ERROR_SCRUB_OFF,
+ HW_ERROR_SCRUB_ON,
+};
+
enum nd_blk_mmio_selector {
BDW,
DCR,
@@ -223,5 +238,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
+void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
+void __acpi_nvdimm_notify(struct device *dev, u32 event);
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4305ee9db4b2..416953a42510 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args)
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
- printk(KERN_CONT "%s", buffer);
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
}
#else
- if (acpi_debugger_write_log(buffer) < 0)
- printk(KERN_CONT "%s", buffer);
+ if (acpi_debugger_write_log(buffer) < 0) {
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
+ }
#endif
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- /*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
- * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
- * use for PCI IRQs.
- */
- if (irq == acpi_gbl_FADT.sci_interrupt) {
- u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
- if (type != IRQ_TYPE_LEVEL_LOW)
- penalty += PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty += PIRQ_PENALTY_PCI_USING;
- }
+ if (irq == sci_irq)
+ penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
- penalty += acpi_irq_pci_sharing_penalty(irq);
- return penalty;
+ return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
+ if (link->irq.active < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_USING;
+
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue;
if (used)
- new_penalty = acpi_irq_get_penalty(irq) +
+ new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
- acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ sci_irq = irq;
+
+ if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+ polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+ sci_penalty = PIRQ_PENALTY_PCI_USING;
+ else
+ sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cea52528aa18..2237d3f24f0e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -31,6 +31,7 @@
#include <linux/sched.h> /* need_resched() */
#include <linux/tick.h>
#include <linux/cpuidle.h>
+#include <linux/cpu.h>
#include <acpi/processor.h>
/*
@@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
* Callers should disable interrupts before the call and enable
* interrupts after return.
*/
-static void acpi_safe_halt(void)
+static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
safe_halt();
@@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void)
*
* Caller disables interrupt before call and enables interrupt after return.
*/
-static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index f2fd3fee588a..03f5ec11ab31 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
}
/**
- * acpi_data_get_property_reference - returns handle to the referenced object
- * @data: ACPI device data object containing the property
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
*
* Find property with @name, verifify that it is a package containing at least
@@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 return %-ENOENT and with index %3
+ * returns the last entry. If the property does not contain any more values
+ * %-ENODATA is returned. The NULL entry must be single integer and
+ * preferably contain value %0.
+ *
* Return: %0 on success, negative error code on failure.
*/
-static int acpi_data_get_property_reference(struct acpi_device_data *data,
- const char *propname, size_t index,
- struct acpi_reference_args *args)
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *propname, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
+ struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return -EINVAL;
+
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret;
@@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data,
while (element < end) {
u32 nargs, i;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
- return -EPROTO;
-
- ret = acpi_bus_get_device(element->reference.handle, &device);
- if (ret)
- return -ENODEV;
-
- element++;
- nargs = 0;
-
- /* assume following integer elements are all args */
- for (i = 0; element + i < end; i++) {
- int type = element[i].type;
+ if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ ret = acpi_bus_get_device(element->reference.handle,
+ &device);
+ if (ret)
+ return -ENODEV;
+
+ nargs = 0;
+ element++;
+
+ /* assume following integer elements are all args */
+ for (i = 0; element + i < end && i < num_args; i++) {
+ int type = element[i].type;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+ else
+ return -EPROTO;
+ }
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- else
+ if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO;
- }
- if (idx++ == index) {
- args->adev = device;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ if (idx == index) {
+ args->adev = device;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = element[i].integer.value;
- return 0;
+ return 0;
+ }
+
+ element += nargs;
+ } else if (element->type == ACPI_TYPE_INTEGER) {
+ if (idx == index)
+ return -ENOENT;
+ element++;
+ } else {
+ return -EPROTO;
}
- element += nargs;
+ idx++;
}
- return -EPROTO;
-}
-
-/**
- * acpi_node_get_property_reference - get a handle to the referenced object.
- * @fwnode: Firmware node to get the property from.
- * @propname: Name of the property.
- * @index: Index of the reference to return.
- * @args: Location to store the returned reference with optional arguments.
- */
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
-{
- struct acpi_device_data *data = acpi_device_data_of_node(fwnode);
-
- return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL;
+ return -ENODATA;
}
-EXPORT_SYMBOL_GPL(acpi_node_get_property_reference);
+EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(struct acpi_device_data *data,
const char *propname,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 035ac646d8db..3d1856f1f4d0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1734,7 +1734,7 @@ static void acpi_default_enumeration(struct acpi_device *device)
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
- acpi_create_platform_device(device);
+ acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
blocking_notifier_call_chain(&acpi_reconfig_chain,
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index f4ebe39539af..35e8fbca10ad 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -520,7 +520,8 @@ static void acpi_thermal_check(void *data)
if (!tz->tz_enabled)
return;
- thermal_zone_device_update(tz->thermal_zone);
+ thermal_zone_device_update(tz->thermal_zone,
+ THERMAL_EVENT_UNSPECIFIED);
}
/* sys I/F for generic thermal sysfs support */
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 562af94bec35..3c71b982bf2a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 90eabaf81215..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1400,142 +1400,59 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
-/*
- * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
- * to single msi.
- */
-static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv, unsigned long flags)
+static int ahci_get_irq_vector(struct ata_host *host, int port)
{
- int nvec, i, rc;
-
- /* Do not init MSI-X if MSI is disabled for the device */
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
-
- nvec = pci_msix_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
- /*
- * Proper MSI-X implementations will have a vector per-port.
- * Barring that, we prefer single-MSI over single-MSIX. If this
- * check fails (not enough MSI-X vectors for all ports) we will
- * be called again with the flag clear iff ahci_init_msi()
- * fails.
- */
- if (flags & AHCI_HFLAG_MULTI_MSIX) {
- if (nvec < n_ports)
- return -ENODEV;
- nvec = n_ports;
- } else if (nvec) {
- nvec = 1;
- } else {
- /*
- * Emit dev_err() since this was the non-legacy irq
- * method of last resort.
- */
- rc = -ENODEV;
- goto fail;
- }
-
- for (i = 0; i < nvec; i++)
- hpriv->msix[i].entry = i;
- rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
- if (rc < 0)
- goto fail;
-
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
- hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
-
- return nvec;
-fail:
- dev_err(&pdev->dev,
- "failed to enable MSI-X with error %d, # of vectors: %d\n",
- rc, nvec);
-
- return rc;
+ return pci_irq_vector(to_pci_dev(host->dev), port);
}
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
return -ENODEV;
- nvec = pci_msi_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (nvec < n_ports)
- goto single_msi;
+ if (n_ports > 1) {
+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvec > 0) {
+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+ hpriv->get_irq_vector = ahci_get_irq_vector;
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+ return nvec;
+ }
- rc = pci_enable_msi_exact(pdev, nvec);
- if (rc == -ENOSPC)
- goto single_msi;
- if (rc < 0)
- return rc;
+ /*
+ * Fallback to single MSI mode if the controller
+ * enforced MRSM mode.
+ */
+ printk(KERN_INFO
+ "ahci: MRSM is on, fallback to single MSI\n");
+ pci_free_irq_vectors(pdev);
+ }
- /* fallback to single MSI mode if the controller enforced MRSM mode */
- if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
- pci_disable_msi(pdev);
- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
- goto single_msi;
+ /*
+ * -ENOSPC indicated we don't have enough vectors. Don't bother
+ * trying a single vectors for any other error:
+ */
+ if (nvec < 0 && nvec != -ENOSPC)
+ return nvec;
}
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
- goto out;
-
-single_msi:
- nvec = 1;
-
- rc = pci_enable_msi(pdev);
- if (rc < 0)
- return rc;
-out:
- hpriv->irq = pdev->irq;
-
- return nvec;
-}
-
-static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv)
-{
- int nvec;
-
/*
- * Try to enable per-port MSI-X. If the host is not capable
- * fall back to single MSI before finally attempting single
- * MSI-X.
+ * If the host is not capable of supporting per-port vectors, fall
+ * back to single MSI before finally attempting single MSI-X.
*/
- nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
- if (nvec >= 0)
- return nvec;
-
- nvec = ahci_init_msi(pdev, n_ports, hpriv);
- if (nvec >= 0)
- return nvec;
-
- /* try single-msix */
- nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
- if (nvec >= 0)
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1)
return nvec;
-
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- hpriv->irq = pdev->irq;
-
- return 0;
+ return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1698,11 +1615,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->msix = devm_kzalloc(&pdev->dev,
- sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
- if (!hpriv->msix)
- return -ENOMEM;
- ahci_init_interrupts(pdev, n_ports, hpriv);
+
+ if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+ /* legacy intx interrupts */
+ pci_intx(pdev, 1);
+ }
+ hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 70b06bcfb7e3..0cc08f892fea 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -242,12 +242,10 @@ enum {
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
#ifdef CONFIG_PCI_MSI
- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
- AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
+ AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
- AHCI_HFLAG_MULTI_MSIX = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
@@ -351,7 +349,6 @@ struct ahci_host_priv {
* the PHY position in this array.
*/
struct phy **phys;
- struct msix_entry *msix; /* Optional MSI-X support */
unsigned nports; /* Number of ports */
void *plat_data; /* Other platform data */
unsigned int irq; /* interrupt line */
@@ -362,22 +359,11 @@ struct ahci_host_priv {
*/
void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
-};
-#ifdef CONFIG_PCI_MSI
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
- return hpriv->msix[port].vector;
- else
- return hpriv->irq + port;
-}
-#else
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- return hpriv->irq;
-}
-#endif
+ /* only required for per-port MSI(-X) support */
+ int (*get_irq_vector)(struct ata_host *host,
+ int port);
+};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 7bdee9bd8786..1eba8dff875e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -30,24 +30,23 @@
#define PORT_PHY3 0xB0
#define PORT_PHY4 0xB4
#define PORT_PHY5 0xB8
+#define PORT_AXICC 0xBC
#define PORT_TRANS 0xC8
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
#define AHCI_PORT_TRANS_CFG 0x08000029
+#define AHCI_PORT_AXICC_CFG 0x3fffffff
/* for ls1021a */
#define LS1021A_PORT_PHY2 0x28183414
#define LS1021A_PORT_PHY3 0x0e080e06
#define LS1021A_PORT_PHY4 0x064a080b
#define LS1021A_PORT_PHY5 0x2aa86470
+#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
-/* for ls1043a */
-#define LS1043A_PORT_PHY2 0x28184d1f
-#define LS1043A_PORT_PHY3 0x0e081509
-
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
@@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = {
.hardreset = ahci_qoriq_hardreset,
};
-static struct ata_port_info ahci_qoriq_port_info = {
+static const struct ata_port_info ahci_qoriq_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
- writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
- writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
@@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
if (rc)
goto disable_resources;
- /* Workaround for ls2080a */
- if (qoriq_priv->type == AHCI_LS2080A) {
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
- }
-
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
&ahci_qoriq_sht);
if (rc)
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8ff428fe8e0f..bc345f249555 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = {
static int st_ahci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
@@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev)
st_ahci_configure_oob(hpriv->mmio);
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index dcf2c724fd06..0d028ead99e8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
*/
for (i = 0; i < host->n_ports; i++) {
struct ahci_port_priv *pp = host->ports[i]->private_data;
- int irq = ahci_irq_vector(hpriv, i);
+ int irq = hpriv->get_irq_vector(host, i);
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
@@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
if (hpriv->irq_handler)
dev_warn(host->dev,
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+ if (!hpriv->get_irq_vector) {
+ dev_err(host->dev,
+ "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+ return -EIO;
+ }
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e207b33e4ce9..9cceb4a875a5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
@@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
return 1;
}
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ * 63:48 Range Length
+ * 47:0 LBA
+ *
+ * Range Length of 0 is ignored.
+ * LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+ u64 sector, u32 count)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ __le64 *buf;
+ u32 i = 0;
+ unsigned long flags;
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+ memset(buf, 0, len);
+ while (i < trmax) {
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buf[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
+ * @cmd: SCSI command being translated
+ * @lba: Starting sector
+ * @num: Number of sectors to be zero'd.
+ *
+ * Rewrite the WRITE SAME payload to be an SCT Write Same formatted
+ * descriptor.
+ * NOTE: Writes a pattern (0's) in the foreground.
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ u16 *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+
+ put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */
+ put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */
+ put_unaligned_le64(lba, &buf[2]);
+ put_unaligned_le64(num, &buf[6]);
+ put_unaligned_le32(0u, &buf[10]); /* pattern */
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag
+ * When set translate to DSM TRIM
+ * When clear translate to SCT Write Same
+ */
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
+ struct scsi_device *sdp = scmd->device;
+ size_t len = sdp->sector_size;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
+ const u32 trmax = len >> 3;
u32 size;
- void *buf;
u16 fp;
u8 bp = 0xff;
+ u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
@@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- /* for now we only support WRITE SAME with the unmap bit set */
- if (unlikely(!(cdb[1] & 0x8))) {
- fp = 1;
- bp = 3;
- goto invalid_fld;
+ if (unmap) {
+ /* If trim is not enabled the cmd is invalid. */
+ if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ !ata_id_has_trim(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
+ /* If the request is too large the cmd is invalid */
+ if (n_block > 0xffff * trmax) {
+ fp = 2;
+ goto invalid_fld;
+ }
+ } else {
+ /* If write same is not available the cmd is invalid */
+ if (!ata_id_sct_write_same(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
}
/*
@@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd))
goto invalid_param_len;
- buf = page_address(sg_page(scsi_sglist(scmd)));
-
- if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
- size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
- } else {
- fp = 2;
- goto invalid_fld;
- }
+ /*
+ * size must match sector size in bytes
+ * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+ * is defined as number of 512 byte blocks to be transferred.
+ */
+ if (unmap) {
+ size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
- if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
- /* Newer devices support queued TRIM commands */
- tf->protocol = ATA_PROT_NCQ;
- tf->command = ATA_CMD_FPDMA_SEND;
- tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
- tf->nsect = qc->tag << 3;
- tf->hob_feature = (size / 512) >> 8;
- tf->feature = size / 512;
+ if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+ /* Newer devices support queued TRIM commands */
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_SEND;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->hob_feature = (size / 512) >> 8;
+ tf->feature = size / 512;
- tf->auxiliary = 1;
+ tf->auxiliary = 1;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = 0;
+ tf->feature = ATA_DSM_TRIM;
+ tf->hob_nsect = (size / 512) >> 8;
+ tf->nsect = size / 512;
+ tf->command = ATA_CMD_DSM;
+ }
} else {
- tf->protocol = ATA_PROT_DMA;
+ size = ata_format_sct_write_same(scmd, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
+
tf->hob_feature = 0;
- tf->feature = ATA_DSM_TRIM;
- tf->hob_nsect = (size / 512) >> 8;
- tf->nsect = size / 512;
- tf->command = ATA_CMD_DSM;
+ tf->feature = 0;
+ tf->hob_nsect = 0;
+ tf->nsect = 1;
+ tf->lbah = 0;
+ tf->lbam = 0;
+ tf->lbal = ATA_CMD_STANDBYNOW1;
+ tf->hob_lbah = 0;
+ tf->hob_lbam = 0;
+ tf->hob_lbal = 0;
+ tf->device = ATA_CMD_STANDBYNOW1;
+ tf->protocol = ATA_PROT_DMA;
+ tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
+ if (unlikely(dev->flags & ATA_DFLAG_PIO))
+ tf->command = ATA_CMD_WRITE_LOG_EXT;
}
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
@@ -3368,6 +3510,76 @@ invalid_opcode:
}
/**
+ * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields a subset to satisfy scsi_report_opcode()
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_device *dev = args->dev;
+ u8 *cdb = args->cmd->cmnd;
+ u8 supported = 0;
+ unsigned int err = 0;
+
+ if (cdb[2] != 1) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+ err = 2;
+ goto out;
+ }
+ switch (cdb[3]) {
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_CAPACITY:
+ case SERVICE_ACTION_IN_16:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ case SYNCHRONIZE_CACHE:
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ case TEST_UNIT_READY:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ case READ_6:
+ case READ_10:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
+ case ATA_12:
+ case ATA_16:
+ case VERIFY:
+ case VERIFY_16:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case START_STOP:
+ supported = 3;
+ break;
+ case WRITE_SAME_16:
+ if (!ata_id_sct_write_same(dev->id))
+ break;
+ /* fallthrough: if SCT ... only enable for ZBC */
+ case ZBC_IN:
+ case ZBC_OUT:
+ if (ata_id_zoned_cap(dev->id) ||
+ dev->class == ATA_DEV_ZAC)
+ supported = 3;
+ break;
+ default:
+ break;
+ }
+out:
+ rbuf[1] = supported; /* supported */
+ return err;
+}
+
+/**
* ata_scsi_report_zones_complete - convert ATA output
* @qc: command structure returning the data
*
@@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
- char mpage[CACHE_MPAGE_LEN];
+ u8 mpage[CACHE_MPAGE_LEN];
u8 wce;
int i;
@@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
- char mpage[CONTROL_MPAGE_LEN];
+ u8 mpage[CONTROL_MPAGE_LEN];
u8 d_sense;
int i;
@@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
dev->flags |= ATA_DFLAG_D_SENSE;
else
dev->flags &= ~ATA_DFLAG_D_SENSE;
- qc->scsicmd->result = SAM_STAT_GOOD;
- qc->scsicmd->scsi_done(qc->scsicmd);
return 0;
}
@@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
+ } else {
+ goto skip; /* No ATA command to send */
}
break;
default: /* invalid page code */
@@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_invalid_field(dev, cmd, 1);
break;
+ case MAINTENANCE_IN:
+ if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ else
+ ata_scsi_invalid_field(dev, cmd, 1);
+ break;
+
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
@@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 9f27b14009f9..1611e0e8d767 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields.mode = devm_regmap_field_alloc(dev, smc, field);
- if (IS_ERR(fields.mode))
- return PTR_ERR(fields.mode);
- return 0;
+ return PTR_ERR_OR_ZERO(fields.mode);
}
static int pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 27245957eee3..475a00669427 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
div = 8;
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
- if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
- BUG();
+ BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
t1 = timing.setup;
if (t1)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 745489a1c86a..efc48bf89d51 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap)
return -ENOMEM;
ap->private_data = pp;
- pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
- memset(pp->crqb, 0, MV_CRQB_Q_SZ);
- pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
- memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725ea93d..10e1b9eee10e 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,13 @@ config CFAG12864B_RATE
If you compile this as a module, you can still override this
value using the module parameters.
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a468b9..3127175c89df 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
new file mode 100644
index 000000000000..bf43b5d2aafc
--- /dev/null
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct img_ascii_lcd_ctx;
+
+/**
+ * struct img_ascii_lcd_config - Configuration information about an LCD model
+ * @num_chars: the number of characters the LCD can display
+ * @external_regmap: true if registers are in a system controller, else false
+ * @update: function called to update the LCD
+ */
+struct img_ascii_lcd_config {
+ unsigned int num_chars;
+ bool external_regmap;
+ void (*update)(struct img_ascii_lcd_ctx *ctx);
+};
+
+/**
+ * struct img_ascii_lcd_ctx - Private data structure
+ * @pdev: the ASCII LCD platform device
+ * @base: the base address of the LCD registers
+ * @regmap: the regmap through which LCD registers are accessed
+ * @offset: the offset within regmap to the start of the LCD registers
+ * @cfg: pointer to the LCD model configuration
+ * @message: the full message to display or scroll on the LCD
+ * @message_len: the length of the @message string
+ * @scroll_pos: index of the first character of @message currently displayed
+ * @scroll_rate: scroll interval in jiffies
+ * @timer: timer used to implement scrolling
+ * @curr: the string currently displayed on the LCD
+ */
+struct img_ascii_lcd_ctx {
+ struct platform_device *pdev;
+ union {
+ void __iomem *base;
+ struct regmap *regmap;
+ };
+ u32 offset;
+ const struct img_ascii_lcd_config *cfg;
+ char *message;
+ unsigned int message_len;
+ unsigned int scroll_pos;
+ unsigned int scroll_rate;
+ struct timer_list timer;
+ char curr[] __aligned(8);
+};
+
+/*
+ * MIPS Boston development board
+ */
+
+static void boston_update(struct img_ascii_lcd_ctx *ctx)
+{
+ ulong val;
+
+#if BITS_PER_LONG == 64
+ val = *((u64 *)&ctx->curr[0]);
+ __raw_writeq(val, ctx->base);
+#elif BITS_PER_LONG == 32
+ val = *((u32 *)&ctx->curr[0]);
+ __raw_writel(val, ctx->base);
+ val = *((u32 *)&ctx->curr[4]);
+ __raw_writel(val, ctx->base + 4);
+#else
+# error Not 32 or 64 bit
+#endif
+}
+
+static struct img_ascii_lcd_config boston_config = {
+ .num_chars = 8,
+ .update = boston_update,
+};
+
+/*
+ * MIPS Malta development board
+ */
+
+static void malta_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = regmap_write(ctx->regmap,
+ ctx->offset + (i * 8), ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config malta_config = {
+ .num_chars = 8,
+ .external_regmap = true,
+ .update = malta_update,
+};
+
+/*
+ * MIPS SEAD3 development board
+ */
+
+enum {
+ SEAD3_REG_LCD_CTRL = 0x00,
+#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7)
+ SEAD3_REG_LCD_DATA = 0x08,
+ SEAD3_REG_CPLD_STATUS = 0x10,
+#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0)
+ SEAD3_REG_CPLD_DATA = 0x18,
+#define SEAD3_REG_CPLD_DATA_BUSY BIT(7)
+};
+
+static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int status;
+ int err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_STATUS,
+ &status);
+ if (err)
+ return err;
+ } while (status & SEAD3_REG_CPLD_STATUS_BUSY);
+
+ return 0;
+
+}
+
+static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int cpld_data;
+ int err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ &cpld_data);
+ if (err)
+ return err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_DATA,
+ &cpld_data);
+ if (err)
+ return err;
+ } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY);
+
+ return 0;
+}
+
+static void sead3_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ SEAD3_REG_LCD_CTRL_SETDRAM | i);
+ if (err)
+ break;
+
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_DATA,
+ ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config sead3_config = {
+ .num_chars = 16,
+ .external_regmap = true,
+ .update = sead3_update,
+};
+
+static const struct of_device_id img_ascii_lcd_matches[] = {
+ { .compatible = "img,boston-lcd", .data = &boston_config },
+ { .compatible = "mti,malta-lcd", .data = &malta_config },
+ { .compatible = "mti,sead3-lcd", .data = &sead3_config },
+};
+
+/**
+ * img_ascii_lcd_scroll() - scroll the display by a character
+ * @arg: really a pointer to the private data structure
+ *
+ * Scroll the current message along the LCD by one character, rearming the
+ * timer if required.
+ */
+static void img_ascii_lcd_scroll(unsigned long arg)
+{
+ struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+ unsigned int i, ch = ctx->scroll_pos;
+ unsigned int num_chars = ctx->cfg->num_chars;
+
+ /* update the current message string */
+ for (i = 0; i < num_chars;) {
+ /* copy as many characters from the string as possible */
+ for (; i < num_chars && ch < ctx->message_len; i++, ch++)
+ ctx->curr[i] = ctx->message[ch];
+
+ /* wrap around to the start of the string */
+ ch = 0;
+ }
+
+ /* update the LCD */
+ ctx->cfg->update(ctx);
+
+ /* move on to the next character */
+ ctx->scroll_pos++;
+ ctx->scroll_pos %= ctx->message_len;
+
+ /* rearm the timer */
+ if (ctx->message_len > ctx->cfg->num_chars)
+ mod_timer(&ctx->timer, jiffies + ctx->scroll_rate);
+}
+
+/**
+ * img_ascii_lcd_display() - set the message to be displayed
+ * @ctx: pointer to the private data structure
+ * @msg: the message to display
+ * @count: length of msg, or -1
+ *
+ * Display a new message @msg on the LCD. @msg can be longer than the number of
+ * characters the LCD can display, in which case it will begin scrolling across
+ * the LCD display.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
+ const char *msg, ssize_t count)
+{
+ char *new_msg;
+
+ /* stop the scroll timer */
+ del_timer_sync(&ctx->timer);
+
+ if (count == -1)
+ count = strlen(msg);
+
+ /* if the string ends with a newline, trim it */
+ if (msg[count - 1] == '\n')
+ count--;
+
+ new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL);
+ if (!new_msg)
+ return -ENOMEM;
+
+ memcpy(new_msg, msg, count);
+ new_msg[count] = 0;
+
+ if (ctx->message)
+ devm_kfree(&ctx->pdev->dev, ctx->message);
+
+ ctx->message = new_msg;
+ ctx->message_len = count;
+ ctx->scroll_pos = 0;
+
+ /* update the LCD */
+ img_ascii_lcd_scroll((unsigned long)ctx);
+
+ return 0;
+}
+
+/**
+ * message_show() - read message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer to read the message into
+ *
+ * Read the current message being displayed or scrolled across the LCD display
+ * into @buf, for reads from sysfs.
+ *
+ * Return: the number of characters written to @buf
+ */
+static ssize_t message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ctx->message);
+}
+
+/**
+ * message_store() - write a new message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer containing the new message
+ * @count: the size of the message in @buf
+ *
+ * Write a new message to display or scroll across the LCD display from sysfs.
+ *
+ * Return: the size of the message on success, else -ERRNO
+ */
+static ssize_t message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+ int err;
+
+ err = img_ascii_lcd_display(ctx, buf, count);
+ return err ?: count;
+}
+
+static DEVICE_ATTR_RW(message);
+
+/**
+ * img_ascii_lcd_probe() - probe an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Probe an LCD display device, ensuring that we have the required resources in
+ * order to access the LCD & setting up private data as well as sysfs files.
+ *
+ * Return: 0 on success, else -ERRNO
+ */
+static int img_ascii_lcd_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct img_ascii_lcd_config *cfg;
+ struct img_ascii_lcd_ctx *ctx;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ cfg = match->data;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars,
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (cfg->external_regmap) {
+ ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &ctx->offset))
+ return -EINVAL;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+ }
+
+ ctx->pdev = pdev;
+ ctx->cfg = cfg;
+ ctx->message = NULL;
+ ctx->scroll_pos = 0;
+ ctx->scroll_rate = HZ / 2;
+
+ /* initialise a timer for scrolling the message */
+ init_timer(&ctx->timer);
+ ctx->timer.function = img_ascii_lcd_scroll;
+ ctx->timer.data = (unsigned long)ctx;
+
+ platform_set_drvdata(pdev, ctx);
+
+ /* display a default message */
+ err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1);
+ if (err)
+ goto out_del_timer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_message);
+ if (err)
+ goto out_del_timer;
+
+ return 0;
+out_del_timer:
+ del_timer_sync(&ctx->timer);
+ return err;
+}
+
+/**
+ * img_ascii_lcd_remove() - remove an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Remove an LCD display device, freeing private resources & ensuring that the
+ * driver stops using the LCD display registers.
+ *
+ * Return: 0
+ */
+static int img_ascii_lcd_remove(struct platform_device *pdev)
+{
+ struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_message);
+ del_timer_sync(&ctx->timer);
+ return 0;
+}
+
+static struct platform_driver img_ascii_lcd_driver = {
+ .driver = {
+ .name = "img-ascii-lcd",
+ .of_match_table = img_ascii_lcd_matches,
+ },
+ .probe = img_ascii_lcd_probe,
+ .remove = img_ascii_lcd_remove,
+};
+module_platform_driver(img_ascii_lcd_driver);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fdf44cac08e6..d02e7c0f5bfd 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE
- bool "Test driver remove calls during probe"
+ bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL
help
Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module.
- If you are unsure about this, say N here.
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
config SYS_HYPERVISOR
bool
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d22a7260f42b..d76cd97a98b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -324,7 +324,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
- bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ !drv->suppress_bind_attrs;
if (defer_all_probes) {
/*
@@ -383,7 +384,7 @@ re_probe:
if (test_remove) {
test_remove = false;
- if (dev->bus && dev->bus->remove)
+ if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dc75de9059cd..62c63c0c5c22 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -361,8 +361,11 @@ store_mem_state(struct device *dev,
err:
unlock_device_hotplug();
- if (ret)
+ if (ret < 0)
return ret;
+ if (ret)
+ return -EINVAL;
+
return count;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e44944f4be77..2932a5bd892f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1027,6 +1027,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1038,8 +1040,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -1174,6 +1174,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
+ dpm_wait_for_children(dev, async);
+
if (async_error)
goto Complete;
@@ -1185,8 +1187,6 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- dpm_wait_for_children(dev, async);
-
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index ab19adb07a12..3c606c09fd5a 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -853,45 +853,6 @@ rqbiocnt(struct request *r)
return n;
}
-/* This can be removed if we are certain that no users of the block
- * layer will ever use zero-count pages in bios. Otherwise we have to
- * protect against the put_page sometimes done by the network layer.
- *
- * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
- * discussion.
- *
- * We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _refcount directly.
- */
-static void
-bio_pageinc(struct bio *bio)
-{
- struct bio_vec bv;
- struct page *page;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- /* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel.
- */
- page = compound_head(bv.bv_page);
- page_ref_inc(page);
- }
-}
-
-static void
-bio_pagedec(struct bio *bio)
-{
- struct page *page;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- page = compound_head(bv.bv_page);
- page_ref_dec(page);
- }
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -899,7 +860,6 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
buf->rq = rq;
buf->bio = bio;
buf->iter = bio->bi_iter;
- bio_pageinc(bio);
}
static struct buf *
@@ -1127,7 +1087,6 @@ aoe_end_buf(struct aoedev *d, struct buf *buf)
if (buf == d->ip.buf)
d->ip.buf = NULL;
rq = buf->rq;
- bio_pagedec(buf->bio);
mempool_free(buf, d->bufpool);
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 100be556e613..83482721bc01 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1871,7 +1871,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
drbd_update_congested(connection);
}
do {
- rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ rv = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
if (rv == -EAGAIN) {
if (we_should_drop_the_connection(connection, sock))
break;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c9f2107f7095..fa1b7a90ba11 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo)
static void loop_unprepare_queue(struct loop_device *lo)
{
- flush_kthread_worker(&lo->worker);
+ kthread_flush_worker(&lo->worker);
kthread_stop(lo->worker_task);
}
static int loop_prepare_queue(struct loop_device *lo)
{
- init_kthread_worker(&lo->worker);
+ kthread_init_worker(&lo->worker);
lo->worker_task = kthread_run(kthread_worker_fn,
&lo->worker, "loop%d", lo->lo_number);
if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
}
- queue_kthread_work(&lo->worker, &cmd->work);
+ kthread_queue_work(&lo->worker, &cmd->work);
return BLK_MQ_RQ_QUEUE_OK;
}
@@ -1696,14 +1696,13 @@ static int loop_init_request(void *data, struct request *rq,
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->rq = rq;
- init_kthread_work(&cmd->work, loop_queue_work);
+ kthread_init_work(&cmd->work, loop_queue_work);
return 0;
}
static struct blk_mq_ops loop_mq_ops = {
.queue_rq = loop_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = loop_init_request,
};
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 2aca98e8e427..3cfd879267b2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3686,7 +3686,7 @@ static int mtip_block_open(struct block_device *dev, fmode_t mode)
return -ENODEV;
}
-void mtip_block_release(struct gendisk *disk, fmode_t mode)
+static void mtip_block_release(struct gendisk *disk, fmode_t mode)
{
}
@@ -3895,7 +3895,6 @@ exit_handler:
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
.complete = mtip_softirq_done_fn,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a9e398019f38..7a1048755914 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -34,33 +34,29 @@
#include <linux/kthread.h>
#include <linux/types.h>
#include <linux/debugfs.h>
+#include <linux/blk-mq.h>
#include <asm/uaccess.h>
#include <asm/types.h>
#include <linux/nbd.h>
+#define NBD_TIMEDOUT 0
+#define NBD_DISCONNECT_REQUESTED 1
+
struct nbd_device {
u32 flags;
+ unsigned long runtime_flags;
struct socket * sock; /* If == NULL, device is not ready, yet */
int magic;
- spinlock_t queue_lock;
- struct list_head queue_head; /* Requests waiting result */
- struct request *active_req;
- wait_queue_head_t active_wq;
- struct list_head waiting_queue; /* Requests to be sent */
- wait_queue_head_t waiting_wq;
+ struct blk_mq_tag_set tag_set;
struct mutex tx_lock;
struct gendisk *disk;
int blksize;
loff_t bytesize;
- int xmit_timeout;
- bool timedout;
- bool disconnect; /* a disconnect has been requested by user */
- struct timer_list timeout_timer;
/* protects initialization and shutdown of the socket */
spinlock_t sock_lock;
struct task_struct *task_recv;
@@ -71,6 +67,11 @@ struct nbd_device {
#endif
};
+struct nbd_cmd {
+ struct nbd_device *nbd;
+ struct list_head list;
+};
+
#if IS_ENABLED(CONFIG_DEBUG_FS)
static struct dentry *nbd_dbg_dir;
#endif
@@ -83,18 +84,6 @@ static unsigned int nbds_max = 16;
static struct nbd_device *nbd_dev;
static int max_part;
-/*
- * Use just one lock (or at most 1 per NIC). Two arguments for this:
- * 1. Each NIC is essentially a synchronization point for all servers
- * accessed through that NIC so there's no need to have more locks
- * than NICs anyway.
- * 2. More locks lead to more "Dirty cache line bouncing" which will slow
- * down each lock to the point where they're actually slower than just
- * a single lock.
- * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
- */
-static DEFINE_SPINLOCK(nbd_lock);
-
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
return disk_to_dev(nbd->disk);
@@ -153,18 +142,16 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
return 0;
}
-static void nbd_end_request(struct nbd_device *nbd, struct request *req)
+static void nbd_end_request(struct nbd_cmd *cmd)
{
+ struct nbd_device *nbd = cmd->nbd;
+ struct request *req = blk_mq_rq_from_pdu(cmd);
int error = req->errors ? -EIO : 0;
- struct request_queue *q = req->q;
- unsigned long flags;
- dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
+ dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
error ? "failed" : "done");
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_end_request_all(req, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_complete_request(req, error);
}
/*
@@ -172,40 +159,49 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- spin_lock_irq(&nbd->sock_lock);
+ struct socket *sock;
+
+ spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
return;
}
+ sock = nbd->sock;
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
- kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
- sockfd_put(nbd->sock);
nbd->sock = NULL;
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
- del_timer(&nbd->timeout_timer);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sockfd_put(sock);
}
-static void nbd_xmit_timeout(unsigned long arg)
+static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
+ bool reserved)
{
- struct nbd_device *nbd = (struct nbd_device *)arg;
- unsigned long flags;
-
- if (list_empty(&nbd->queue_head))
- return;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ struct socket *sock = NULL;
- spin_lock_irqsave(&nbd->sock_lock, flags);
+ spin_lock(&nbd->sock_lock);
- nbd->timedout = true;
+ set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
- if (nbd->sock)
- kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+ if (nbd->sock) {
+ sock = nbd->sock;
+ get_file(sock->file);
+ }
- spin_unlock_irqrestore(&nbd->sock_lock, flags);
+ spin_unlock(&nbd->sock_lock);
+ if (sock) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sockfd_put(sock);
+ }
+ req->errors++;
dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+ return BLK_EH_HANDLED;
}
/*
@@ -255,9 +251,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
tsk_restore_flags(current, pflags, PF_MEMALLOC);
- if (!send && nbd->xmit_timeout)
- mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
-
return result;
}
@@ -273,8 +266,9 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
}
/* always call with the tx_lock held */
-static int nbd_send_req(struct nbd_device *nbd, struct request *req)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
int result, flags;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
@@ -298,10 +292,10 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &req, sizeof(req));
+ memcpy(request.handle, &req->tag, sizeof(req->tag));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
- req, nbdcmd_to_ascii(type),
+ cmd, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, 1, &request, sizeof(request),
(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
@@ -323,7 +317,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (!rq_iter_last(bvec, iter))
flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
- req, bvec.bv_len);
+ cmd, bvec.bv_len);
result = sock_send_bvec(nbd, &bvec, flags);
if (result <= 0) {
dev_err(disk_to_dev(nbd->disk),
@@ -336,29 +330,6 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
return 0;
}
-static struct request *nbd_find_request(struct nbd_device *nbd,
- struct request *xreq)
-{
- struct request *req, *tmp;
- int err;
-
- err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
- if (unlikely(err))
- return ERR_PTR(err);
-
- spin_lock(&nbd->queue_lock);
- list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
- if (req != xreq)
- continue;
- list_del_init(&req->queuelist);
- spin_unlock(&nbd->queue_lock);
- return req;
- }
- spin_unlock(&nbd->queue_lock);
-
- return ERR_PTR(-ENOENT);
-}
-
static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
{
int result;
@@ -370,11 +341,14 @@ static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
}
/* NULL returned = something went wrong, inform userspace */
-static struct request *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
{
int result;
struct nbd_reply reply;
- struct request *req;
+ struct nbd_cmd *cmd;
+ struct request *req = NULL;
+ u16 hwq;
+ int tag;
reply.magic = 0;
result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
@@ -390,25 +364,27 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
return ERR_PTR(-EPROTO);
}
- req = nbd_find_request(nbd, *(struct request **)reply.handle);
- if (IS_ERR(req)) {
- result = PTR_ERR(req);
- if (result != -ENOENT)
- return ERR_PTR(result);
+ memcpy(&tag, reply.handle, sizeof(int));
- dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
- reply.handle);
- return ERR_PTR(-EBADR);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ if (hwq < nbd->tag_set.nr_hw_queues)
+ req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(tag));
+ if (!req || !blk_mq_request_started(req)) {
+ dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
+ tag, req);
+ return ERR_PTR(-ENOENT);
}
+ cmd = blk_mq_rq_to_pdu(req);
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
req->errors++;
- return req;
+ return cmd;
}
- dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
+ dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
@@ -419,13 +395,13 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
req->errors++;
- return req;
+ return cmd;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
- req, bvec.bv_len);
+ cmd, bvec.bv_len);
}
}
- return req;
+ return cmd;
}
static ssize_t pid_show(struct device *dev,
@@ -444,7 +420,7 @@ static struct device_attribute pid_attr = {
static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
{
- struct request *req;
+ struct nbd_cmd *cmd;
int ret;
BUG_ON(nbd->magic != NBD_MAGIC);
@@ -460,13 +436,13 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_update(nbd, bdev);
while (1) {
- req = nbd_read_stat(nbd);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
+ cmd = nbd_read_stat(nbd);
+ if (IS_ERR(cmd)) {
+ ret = PTR_ERR(cmd);
break;
}
- nbd_end_request(nbd, req);
+ nbd_end_request(cmd);
}
nbd_size_clear(nbd, bdev);
@@ -475,44 +451,37 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
return ret;
}
-static void nbd_clear_que(struct nbd_device *nbd)
+static void nbd_clear_req(struct request *req, void *data, bool reserved)
{
- struct request *req;
+ struct nbd_cmd *cmd;
+
+ if (!blk_mq_request_started(req))
+ return;
+ cmd = blk_mq_rq_to_pdu(req);
+ req->errors++;
+ nbd_end_request(cmd);
+}
+static void nbd_clear_que(struct nbd_device *nbd)
+{
BUG_ON(nbd->magic != NBD_MAGIC);
/*
* Because we have set nbd->sock to NULL under the tx_lock, all
- * modifications to the list must have completed by now. For
- * the same reason, the active_req must be NULL.
- *
- * As a consequence, we don't need to take the spin lock while
- * purging the list here.
+ * modifications to the list must have completed by now.
*/
BUG_ON(nbd->sock);
- BUG_ON(nbd->active_req);
- while (!list_empty(&nbd->queue_head)) {
- req = list_entry(nbd->queue_head.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- req->errors++;
- nbd_end_request(nbd, req);
- }
-
- while (!list_empty(&nbd->waiting_queue)) {
- req = list_entry(nbd->waiting_queue.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- req->errors++;
- nbd_end_request(nbd, req);
- }
+ blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
}
-static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
+static void nbd_handle_cmd(struct nbd_cmd *cmd)
{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ struct nbd_device *nbd = cmd->nbd;
+
if (req->cmd_type != REQ_TYPE_FS)
goto error_out;
@@ -526,6 +495,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
req->errors = 0;
mutex_lock(&nbd->tx_lock);
+ nbd->task_send = current;
if (unlikely(!nbd->sock)) {
mutex_unlock(&nbd->tx_lock);
dev_err(disk_to_dev(nbd->disk),
@@ -533,106 +503,30 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
goto error_out;
}
- nbd->active_req = req;
-
- if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
- mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
-
- if (nbd_send_req(nbd, req) != 0) {
+ if (nbd_send_cmd(nbd, cmd) != 0) {
dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
req->errors++;
- nbd_end_request(nbd, req);
- } else {
- spin_lock(&nbd->queue_lock);
- list_add_tail(&req->queuelist, &nbd->queue_head);
- spin_unlock(&nbd->queue_lock);
+ nbd_end_request(cmd);
}
- nbd->active_req = NULL;
+ nbd->task_send = NULL;
mutex_unlock(&nbd->tx_lock);
- wake_up_all(&nbd->active_wq);
return;
error_out:
req->errors++;
- nbd_end_request(nbd, req);
+ nbd_end_request(cmd);
}
-static int nbd_thread_send(void *data)
+static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct nbd_device *nbd = data;
- struct request *req;
-
- nbd->task_send = current;
-
- set_user_nice(current, MIN_NICE);
- while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
- /* wait for something to do */
- wait_event_interruptible(nbd->waiting_wq,
- kthread_should_stop() ||
- !list_empty(&nbd->waiting_queue));
-
- /* extract request */
- if (list_empty(&nbd->waiting_queue))
- continue;
-
- spin_lock_irq(&nbd->queue_lock);
- req = list_entry(nbd->waiting_queue.next, struct request,
- queuelist);
- list_del_init(&req->queuelist);
- spin_unlock_irq(&nbd->queue_lock);
-
- /* handle request */
- nbd_handle_req(nbd, req);
- }
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- nbd->task_send = NULL;
-
- return 0;
-}
-
-/*
- * We always wait for result of write, for now. It would be nice to make it optional
- * in future
- * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
- * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
- */
-
-static void nbd_request_handler(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
-{
- struct request *req;
-
- while ((req = blk_fetch_request(q)) != NULL) {
- struct nbd_device *nbd;
-
- spin_unlock_irq(q->queue_lock);
-
- nbd = req->rq_disk->private_data;
-
- BUG_ON(nbd->magic != NBD_MAGIC);
-
- dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
- req, req->cmd_type);
-
- if (unlikely(!nbd->sock)) {
- dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
- req->errors++;
- nbd_end_request(nbd, req);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- spin_lock_irq(&nbd->queue_lock);
- list_add_tail(&req->queuelist, &nbd->waiting_queue);
- spin_unlock_irq(&nbd->queue_lock);
-
- wake_up(&nbd->waiting_wq);
-
- spin_lock_irq(q->queue_lock);
- }
+ blk_mq_start_request(bd->rq);
+ nbd_handle_cmd(cmd);
+ return BLK_MQ_RQ_QUEUE_OK;
}
static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
@@ -657,15 +551,13 @@ out:
/* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd)
{
- nbd->disconnect = false;
- nbd->timedout = false;
+ nbd->runtime_flags = 0;
nbd->blksize = 1024;
nbd->bytesize = 0;
set_capacity(nbd->disk, 0);
nbd->flags = 0;
- nbd->xmit_timeout = 0;
+ nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- del_timer_sync(&nbd->timeout_timer);
}
static void nbd_bdev_reset(struct block_device *bdev)
@@ -700,33 +592,37 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
{
switch (cmd) {
case NBD_DISCONNECT: {
- struct request sreq;
+ struct request *sreq;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->sock)
return -EINVAL;
+ sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
+ if (IS_ERR(sreq))
+ return -ENOMEM;
+
mutex_unlock(&nbd->tx_lock);
fsync_bdev(bdev);
mutex_lock(&nbd->tx_lock);
- blk_rq_init(NULL, &sreq);
- sreq.cmd_type = REQ_TYPE_DRV_PRIV;
+ sreq->cmd_type = REQ_TYPE_DRV_PRIV;
/* Check again after getting mutex back. */
- if (!nbd->sock)
+ if (!nbd->sock) {
+ blk_mq_free_request(sreq);
return -EINVAL;
+ }
- nbd->disconnect = true;
+ set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
- nbd_send_req(nbd, &sreq);
+ nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
+ blk_mq_free_request(sreq);
return 0;
}
case NBD_CLEAR_SOCK:
sock_shutdown(nbd);
nbd_clear_que(nbd);
- BUG_ON(!list_empty(&nbd->queue_head));
- BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
return 0;
@@ -758,13 +654,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
case NBD_SET_TIMEOUT:
- nbd->xmit_timeout = arg * HZ;
- if (arg)
- mod_timer(&nbd->timeout_timer,
- jiffies + nbd->xmit_timeout);
- else
- del_timer_sync(&nbd->timeout_timer);
-
+ nbd->tag_set.timeout = arg * HZ;
return 0;
case NBD_SET_FLAGS:
@@ -772,7 +662,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_DO_IT: {
- struct task_struct *thread;
int error;
if (nbd->task_recv)
@@ -786,18 +675,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_parse_flags(nbd, bdev);
- thread = kthread_run(nbd_thread_send, nbd, "%s",
- nbd_name(nbd));
- if (IS_ERR(thread)) {
- mutex_lock(&nbd->tx_lock);
- nbd->task_recv = NULL;
- return PTR_ERR(thread);
- }
-
nbd_dev_dbg_init(nbd);
error = nbd_thread_recv(nbd, bdev);
nbd_dev_dbg_close(nbd);
- kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
nbd->task_recv = NULL;
@@ -807,9 +687,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kill_bdev(bdev);
nbd_bdev_reset(bdev);
- if (nbd->disconnect) /* user requested, ignore socket errors */
+ /* user requested, ignore socket errors */
+ if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
error = 0;
- if (nbd->timedout)
+ if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
error = -ETIMEDOUT;
nbd_reset(nbd);
@@ -825,10 +706,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
case NBD_PRINT_DEBUG:
- dev_info(disk_to_dev(nbd->disk),
- "next = %p, prev = %p, head = %p\n",
- nbd->queue_head.next, nbd->queue_head.prev,
- &nbd->queue_head);
+ /*
+ * For compatibility only, we no longer keep a list of
+ * outstanding requests.
+ */
return 0;
}
return -ENOTTY;
@@ -935,7 +816,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
- debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+ debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
@@ -987,6 +868,23 @@ static void nbd_dbg_close(void)
#endif
+static int nbd_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->nbd = data;
+ INIT_LIST_HEAD(&cmd->list);
+ return 0;
+}
+
+static struct blk_mq_ops nbd_mq_ops = {
+ .queue_rq = nbd_queue_rq,
+ .init_request = nbd_init_request,
+ .timeout = nbd_xmit_timeout,
+};
+
/*
* And here should be modules and kernel interface
* (Just smiley confuses emacs :-)
@@ -1035,16 +933,34 @@ static int __init nbd_init(void)
if (!disk)
goto out;
nbd_dev[i].disk = disk;
+
+ nbd_dev[i].tag_set.ops = &nbd_mq_ops;
+ nbd_dev[i].tag_set.nr_hw_queues = 1;
+ nbd_dev[i].tag_set.queue_depth = 128;
+ nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
+ nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
+ nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
+ nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
+
+ err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
+ if (err) {
+ put_disk(disk);
+ goto out;
+ }
+
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
+ disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
if (!disk->queue) {
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
+
/*
* Tell the block layer that we are not a rotational device
*/
@@ -1069,16 +985,8 @@ static int __init nbd_init(void)
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = nbd_dev[i].disk;
nbd_dev[i].magic = NBD_MAGIC;
- INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
- spin_lock_init(&nbd_dev[i].queue_lock);
spin_lock_init(&nbd_dev[i].sock_lock);
- INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
- init_timer(&nbd_dev[i].timeout_timer);
- nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
- nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
- init_waitqueue_head(&nbd_dev[i].active_wq);
- init_waitqueue_head(&nbd_dev[i].waiting_wq);
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
@@ -1091,6 +999,7 @@ static int __init nbd_init(void)
return 0;
out:
while (i--) {
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
blk_cleanup_queue(nbd_dev[i].disk->queue);
put_disk(nbd_dev[i].disk);
}
@@ -1110,6 +1019,7 @@ static void __exit nbd_cleanup(void)
if (disk) {
del_gendisk(disk);
blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
}
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 75a7f88d6717..ba6f4a2e73db 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -34,6 +34,7 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
+ struct nvm_dev *ndev;
struct blk_mq_tag_set tag_set;
struct hrtimer timer;
unsigned int queue_depth;
@@ -393,7 +394,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
};
@@ -414,23 +414,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-static void null_del_dev(struct nullb *nullb)
-{
- list_del_init(&nullb->list);
-
- if (use_lightnvm)
- nvm_unregister(nullb->disk_name);
- else
- del_gendisk(nullb->disk);
- blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_tag_set(&nullb->tag_set);
- if (!use_lightnvm)
- put_disk(nullb->disk);
- cleanup_queues(nullb);
- kfree(nullb);
-}
-
#ifdef CONFIG_NVM
static void null_lnvm_end_io(struct request *rq, int error)
@@ -564,10 +547,58 @@ static struct nvm_dev_ops null_lnvm_dev_ops = {
/* Simulate nvme protocol restriction */
.max_phys_sect = 64,
};
+
+static int null_nvm_register(struct nullb *nullb)
+{
+ struct nvm_dev *dev;
+ int rv;
+
+ dev = nvm_alloc_dev(0);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->q = nullb->q;
+ memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
+ dev->ops = &null_lnvm_dev_ops;
+
+ rv = nvm_register(dev);
+ if (rv) {
+ kfree(dev);
+ return rv;
+ }
+ nullb->ndev = dev;
+ return 0;
+}
+
+static void null_nvm_unregister(struct nullb *nullb)
+{
+ nvm_unregister(nullb->ndev);
+}
#else
-static struct nvm_dev_ops null_lnvm_dev_ops;
+static int null_nvm_register(struct nullb *nullb)
+{
+ return -EINVAL;
+}
+static void null_nvm_unregister(struct nullb *nullb) {}
#endif /* CONFIG_NVM */
+static void null_del_dev(struct nullb *nullb)
+{
+ list_del_init(&nullb->list);
+
+ if (use_lightnvm)
+ null_nvm_unregister(nullb);
+ else
+ del_gendisk(nullb->disk);
+ blk_cleanup_queue(nullb->q);
+ if (queue_mode == NULL_Q_MQ)
+ blk_mq_free_tag_set(&nullb->tag_set);
+ if (!use_lightnvm)
+ put_disk(nullb->disk);
+ cleanup_queues(nullb);
+ kfree(nullb);
+}
+
static int null_open(struct block_device *bdev, fmode_t mode)
{
return 0;
@@ -640,11 +671,32 @@ static int init_driver_queues(struct nullb *nullb)
return 0;
}
-static int null_add_dev(void)
+static int null_gendisk_register(struct nullb *nullb)
{
struct gendisk *disk;
- struct nullb *nullb;
sector_t size;
+
+ disk = nullb->disk = alloc_disk_node(1, home_node);
+ if (!disk)
+ return -ENOMEM;
+ size = gb * 1024 * 1024 * 1024ULL;
+ set_capacity(disk, size >> 9);
+
+ disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
+ disk->major = null_major;
+ disk->first_minor = nullb->index;
+ disk->fops = &null_fops;
+ disk->private_data = nullb;
+ disk->queue = nullb->q;
+ strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
+ add_disk(disk);
+ return 0;
+}
+
+static int null_add_dev(void)
+{
+ struct nullb *nullb;
int rv;
nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
@@ -716,42 +768,19 @@ static int null_add_dev(void)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (use_lightnvm) {
- rv = nvm_register(nullb->q, nullb->disk_name,
- &null_lnvm_dev_ops);
- if (rv)
- goto out_cleanup_blk_queue;
- goto done;
- }
-
- disk = nullb->disk = alloc_disk_node(1, home_node);
- if (!disk) {
- rv = -ENOMEM;
- goto out_cleanup_lightnvm;
- }
- size = gb * 1024 * 1024 * 1024ULL;
- set_capacity(disk, size >> 9);
-
- disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
- disk->major = null_major;
- disk->first_minor = nullb->index;
- disk->fops = &null_fops;
- disk->private_data = nullb;
- disk->queue = nullb->q;
- strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (use_lightnvm)
+ rv = null_nvm_register(nullb);
+ else
+ rv = null_gendisk_register(nullb);
- add_disk(disk);
+ if (rv)
+ goto out_cleanup_blk_queue;
-done:
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
return 0;
-
-out_cleanup_lightnvm:
- if (use_lightnvm)
- nvm_unregister(nullb->disk_name);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6c6519f6492a..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -31,6 +31,7 @@
#include <linux/ceph/libceph.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/mon_client.h>
+#include <linux/ceph/cls_lock_client.h>
#include <linux/ceph/decode.h>
#include <linux/parser.h>
#include <linux/bsearch.h>
@@ -114,12 +115,17 @@ static int atomic_dec_return_safe(atomic_t *v)
#define RBD_OBJ_PREFIX_LEN_MAX 64
+#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
+#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
+
/* Feature bits */
#define RBD_FEATURE_LAYERING (1<<0)
#define RBD_FEATURE_STRIPINGV2 (1<<1)
-#define RBD_FEATURES_ALL \
- (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
+#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
+#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
+ RBD_FEATURE_STRIPINGV2 | \
+ RBD_FEATURE_EXCLUSIVE_LOCK)
/* Features supported by this (client software) implementation. */
@@ -128,11 +134,8 @@ static int atomic_dec_return_safe(atomic_t *v)
/*
* An RBD device name will be "rbd#", where the "rbd" comes from
* RBD_DRV_NAME above, and # is a unique integer identifier.
- * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
- * enough to hold all possible device names.
*/
#define DEV_NAME_LEN 32
-#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
/*
* block device image metadata (in-memory version)
@@ -322,6 +325,24 @@ struct rbd_img_request {
#define for_each_obj_request_safe(ireq, oreq, n) \
list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
+enum rbd_watch_state {
+ RBD_WATCH_STATE_UNREGISTERED,
+ RBD_WATCH_STATE_REGISTERED,
+ RBD_WATCH_STATE_ERROR,
+};
+
+enum rbd_lock_state {
+ RBD_LOCK_STATE_UNLOCKED,
+ RBD_LOCK_STATE_LOCKED,
+ RBD_LOCK_STATE_RELEASING,
+};
+
+/* WatchNotify::ClientId */
+struct rbd_client_id {
+ u64 gid;
+ u64 handle;
+};
+
struct rbd_mapping {
u64 size;
u64 features;
@@ -349,13 +370,29 @@ struct rbd_device {
unsigned long flags; /* possibly lock protected */
struct rbd_spec *spec;
struct rbd_options *opts;
+ char *config_info; /* add{,_single_major} string */
struct ceph_object_id header_oid;
struct ceph_object_locator header_oloc;
- struct ceph_file_layout layout;
+ struct ceph_file_layout layout; /* used for all rbd requests */
+ struct mutex watch_mutex;
+ enum rbd_watch_state watch_state;
struct ceph_osd_linger_request *watch_handle;
+ u64 watch_cookie;
+ struct delayed_work watch_dwork;
+
+ struct rw_semaphore lock_rwsem;
+ enum rbd_lock_state lock_state;
+ struct rbd_client_id owner_cid;
+ struct work_struct acquired_lock_work;
+ struct work_struct released_lock_work;
+ struct delayed_work lock_dwork;
+ struct work_struct unlock_work;
+ wait_queue_head_t lock_waitq;
+
+ struct workqueue_struct *task_wq;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -378,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -439,6 +476,29 @@ static int minor_to_rbd_dev_id(int minor)
return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
}
+static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
+{
+ return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
+ rbd_dev->spec->snap_id == CEPH_NOSNAP &&
+ !rbd_dev->mapping.read_only;
+}
+
+static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
+{
+ return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
+ rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
+}
+
+static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
+{
+ bool is_lock_owner;
+
+ down_read(&rbd_dev->lock_rwsem);
+ is_lock_owner = __rbd_is_lock_owner(rbd_dev);
+ up_read(&rbd_dev->lock_rwsem);
+ return is_lock_owner;
+}
+
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
@@ -735,6 +795,7 @@ enum {
/* string args above */
Opt_read_only,
Opt_read_write,
+ Opt_lock_on_read,
Opt_err
};
@@ -746,16 +807,19 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_only, "ro"}, /* Alternate spelling */
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
+ {Opt_lock_on_read, "lock_on_read"},
{Opt_err, NULL}
};
struct rbd_options {
int queue_depth;
bool read_only;
+ bool lock_on_read;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
#define RBD_READ_ONLY_DEFAULT false
+#define RBD_LOCK_ON_READ_DEFAULT false
static int parse_rbd_opts_token(char *c, void *private)
{
@@ -791,6 +855,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_read_write:
rbd_opts->read_only = false;
break;
+ case Opt_lock_on_read:
+ rbd_opts->lock_on_read = true;
+ break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@@ -919,7 +986,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
char *snap_names = NULL;
u64 *snap_sizes = NULL;
u32 snap_count;
- size_t size;
int ret = -ENOMEM;
u32 i;
@@ -957,9 +1023,9 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
goto out_err;
/* ...as well as the array of their sizes. */
-
- size = snap_count * sizeof (*header->snap_sizes);
- snap_sizes = kmalloc(size, GFP_KERNEL);
+ snap_sizes = kmalloc_array(snap_count,
+ sizeof(*header->snap_sizes),
+ GFP_KERNEL);
if (!snap_sizes)
goto out_err;
@@ -1551,11 +1617,18 @@ static bool obj_request_type_valid(enum obj_request_type type)
}
}
-static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
- struct rbd_obj_request *obj_request)
+static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
+
+static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
{
- dout("%s %p\n", __func__, obj_request);
- return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
+ struct ceph_osd_request *osd_req = obj_request->osd_req;
+
+ dout("%s %p osd_req %p\n", __func__, obj_request, osd_req);
+ if (obj_request_img_data_test(obj_request)) {
+ WARN_ON(obj_request->callback != rbd_img_obj_callback);
+ rbd_img_request_get(obj_request->img_request);
+ }
+ ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
}
static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
@@ -1745,6 +1818,22 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
complete_all(&obj_request->completion);
}
+static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err)
+{
+ obj_request->result = err;
+ obj_request->xferred = 0;
+ /*
+ * kludge - mirror rbd_obj_request_submit() to match a put in
+ * rbd_img_obj_callback()
+ */
+ if (obj_request_img_data_test(obj_request)) {
+ WARN_ON(obj_request->callback != rbd_img_obj_callback);
+ rbd_img_request_get(obj_request->img_request);
+ }
+ obj_request_done_set(obj_request);
+ rbd_obj_request_complete(obj_request);
+}
+
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
@@ -1877,11 +1966,10 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- if (img_request)
- osd_req->r_snapid = img_request->snap_id;
+ rbd_assert(obj_request_img_data_test(obj_request));
+ osd_req->r_snapid = obj_request->img_request->snap_id;
}
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
@@ -2074,7 +2162,9 @@ static void rbd_obj_request_destroy(struct kref *kref)
bio_chain_put(obj_request->bio_list);
break;
case OBJ_REQUEST_PAGES:
- if (obj_request->pages)
+ /* img_data requests don't own their page array */
+ if (obj_request->pages &&
+ !obj_request_img_data_test(obj_request))
ceph_release_page_vector(obj_request->pages,
obj_request->page_count);
break;
@@ -2295,13 +2385,6 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
xferred = obj_request->length;
}
- /* Image object requests don't own their page array */
-
- if (obj_request->type == OBJ_REQUEST_PAGES) {
- obj_request->pages = NULL;
- obj_request->page_count = 0;
- }
-
if (img_request_child_test(img_request)) {
rbd_assert(img_request->obj_request != NULL);
more = obj_request->which < img_request->obj_request_count - 1;
@@ -2520,8 +2603,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
- rbd_img_request_get(img_request);
-
img_offset += length;
resid -= length;
}
@@ -2579,7 +2660,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
{
struct rbd_obj_request *orig_request;
struct ceph_osd_request *osd_req;
- struct ceph_osd_client *osdc;
struct rbd_device *rbd_dev;
struct page **pages;
enum obj_operation_type op_type;
@@ -2603,7 +2683,7 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
rbd_assert(obj_request_type_valid(orig_request->type));
img_result = img_request->result;
parent_length = img_request->length;
- rbd_assert(parent_length == img_request->xferred);
+ rbd_assert(img_result || parent_length == img_request->xferred);
rbd_img_request_put(img_request);
rbd_assert(orig_request->img_request);
@@ -2616,13 +2696,9 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
* and re-submit the original write request.
*/
if (!rbd_dev->parent_overlap) {
- struct ceph_osd_client *osdc;
-
ceph_release_page_vector(pages, page_count);
- osdc = &rbd_dev->rbd_client->client->osdc;
- img_result = rbd_obj_request_submit(osdc, orig_request);
- if (!img_result)
- return;
+ rbd_obj_request_submit(orig_request);
+ return;
}
if (img_result)
@@ -2656,17 +2732,12 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
/* All set, send it off. */
- osdc = &rbd_dev->rbd_client->client->osdc;
- img_result = rbd_obj_request_submit(osdc, orig_request);
- if (!img_result)
- return;
-out_err:
- /* Record the error code and complete the request */
+ rbd_obj_request_submit(orig_request);
+ return;
- orig_request->result = img_result;
- orig_request->xferred = 0;
- obj_request_done_set(orig_request);
- rbd_obj_request_complete(orig_request);
+out_err:
+ ceph_release_page_vector(pages, page_count);
+ rbd_obj_request_error(orig_request, img_result);
}
/*
@@ -2680,26 +2751,19 @@ out_err:
* When the read completes, this page array will be transferred to
* the original object request for the copyup operation.
*
- * If an error occurs, record it as the result of the original
- * object request and mark it done so it gets completed.
+ * If an error occurs, it is recorded as the result of the original
+ * object request in rbd_img_obj_exists_callback().
*/
static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = NULL;
+ struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
struct rbd_img_request *parent_request = NULL;
- struct rbd_device *rbd_dev;
u64 img_offset;
u64 length;
struct page **pages = NULL;
u32 page_count;
int result;
- rbd_assert(obj_request_img_data_test(obj_request));
- rbd_assert(obj_request_type_valid(obj_request->type));
-
- img_request = obj_request->img_request;
- rbd_assert(img_request != NULL);
- rbd_dev = img_request->rbd_dev;
rbd_assert(rbd_dev->parent != NULL);
/*
@@ -2740,10 +2804,11 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
if (result)
goto out_err;
+
parent_request->copyup_pages = pages;
parent_request->copyup_page_count = page_count;
-
parent_request->callback = rbd_img_obj_parent_read_full_callback;
+
result = rbd_img_request_submit(parent_request);
if (!result)
return 0;
@@ -2757,10 +2822,6 @@ out_err:
ceph_release_page_vector(pages, page_count);
if (parent_request)
rbd_img_request_put(parent_request);
- obj_request->result = result;
- obj_request->xferred = 0;
- obj_request_done_set(obj_request);
-
return result;
}
@@ -2793,17 +2854,13 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
/*
* If the overlap has become 0 (most likely because the
- * image has been flattened) we need to free the pages
- * and re-submit the original write request.
+ * image has been flattened) we need to re-submit the
+ * original request.
*/
rbd_dev = orig_request->img_request->rbd_dev;
if (!rbd_dev->parent_overlap) {
- struct ceph_osd_client *osdc;
-
- osdc = &rbd_dev->rbd_client->client->osdc;
- result = rbd_obj_request_submit(osdc, orig_request);
- if (!result)
- return;
+ rbd_obj_request_submit(orig_request);
+ return;
}
/*
@@ -2816,31 +2873,45 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
obj_request_existence_set(orig_request, true);
} else if (result == -ENOENT) {
obj_request_existence_set(orig_request, false);
- } else if (result) {
- orig_request->result = result;
- goto out;
+ } else {
+ goto fail_orig_request;
}
/*
* Resubmit the original request now that we have recorded
* whether the target object exists.
*/
- orig_request->result = rbd_img_obj_request_submit(orig_request);
-out:
- if (orig_request->result)
- rbd_obj_request_complete(orig_request);
+ result = rbd_img_obj_request_submit(orig_request);
+ if (result)
+ goto fail_orig_request;
+
+ return;
+
+fail_orig_request:
+ rbd_obj_request_error(orig_request, result);
}
static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
{
+ struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
struct rbd_obj_request *stat_request;
- struct rbd_device *rbd_dev;
- struct ceph_osd_client *osdc;
- struct page **pages = NULL;
+ struct page **pages;
u32 page_count;
size_t size;
int ret;
+ stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
+ OBJ_REQUEST_PAGES);
+ if (!stat_request)
+ return -ENOMEM;
+
+ stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
+ stat_request);
+ if (!stat_request->osd_req) {
+ ret = -ENOMEM;
+ goto fail_stat_request;
+ }
+
/*
* The response data for a STAT call consists of:
* le64 length;
@@ -2852,52 +2923,33 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto fail_stat_request;
+ }
- ret = -ENOMEM;
- stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
- OBJ_REQUEST_PAGES);
- if (!stat_request)
- goto out;
+ osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
+ osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
+ false, false);
rbd_obj_request_get(obj_request);
stat_request->obj_request = obj_request;
stat_request->pages = pages;
stat_request->page_count = page_count;
-
- rbd_assert(obj_request->img_request);
- rbd_dev = obj_request->img_request->rbd_dev;
- stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
- stat_request);
- if (!stat_request->osd_req)
- goto out;
stat_request->callback = rbd_img_obj_exists_callback;
- osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
- osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
- false, false);
- rbd_osd_req_format_read(stat_request);
-
- osdc = &rbd_dev->rbd_client->client->osdc;
- ret = rbd_obj_request_submit(osdc, stat_request);
-out:
- if (ret)
- rbd_obj_request_put(obj_request);
+ rbd_obj_request_submit(stat_request);
+ return 0;
+fail_stat_request:
+ rbd_obj_request_put(stat_request);
return ret;
}
static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request;
- struct rbd_device *rbd_dev;
-
- rbd_assert(obj_request_img_data_test(obj_request));
-
- img_request = obj_request->img_request;
- rbd_assert(img_request);
- rbd_dev = img_request->rbd_dev;
+ struct rbd_img_request *img_request = obj_request->img_request;
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
/* Reads */
if (!img_request_write_test(img_request) &&
@@ -2936,14 +2988,13 @@ static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
{
- if (img_obj_request_simple(obj_request)) {
- struct rbd_device *rbd_dev;
- struct ceph_osd_client *osdc;
-
- rbd_dev = obj_request->img_request->rbd_dev;
- osdc = &rbd_dev->rbd_client->client->osdc;
+ rbd_assert(obj_request_img_data_test(obj_request));
+ rbd_assert(obj_request_type_valid(obj_request->type));
+ rbd_assert(obj_request->img_request);
- return rbd_obj_request_submit(osdc, obj_request);
+ if (img_obj_request_simple(obj_request)) {
+ rbd_obj_request_submit(obj_request);
+ return 0;
}
/*
@@ -3006,12 +3057,8 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
rbd_assert(obj_request->img_request);
rbd_dev = obj_request->img_request->rbd_dev;
if (!rbd_dev->parent_overlap) {
- struct ceph_osd_client *osdc;
-
- osdc = &rbd_dev->rbd_client->client->osdc;
- img_result = rbd_obj_request_submit(osdc, obj_request);
- if (!img_result)
- return;
+ rbd_obj_request_submit(obj_request);
+ return;
}
obj_request->result = img_result;
@@ -3084,65 +3131,724 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
-static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
+static const struct rbd_client_id rbd_empty_cid;
-static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
- u64 notifier_id, void *data, size_t data_len)
+static bool rbd_cid_equal(const struct rbd_client_id *lhs,
+ const struct rbd_client_id *rhs)
+{
+ return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
+}
+
+static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
+{
+ struct rbd_client_id cid;
+
+ mutex_lock(&rbd_dev->watch_mutex);
+ cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
+ cid.handle = rbd_dev->watch_cookie;
+ mutex_unlock(&rbd_dev->watch_mutex);
+ return cid;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
+ const struct rbd_client_id *cid)
+{
+ dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
+ rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
+ cid->gid, cid->handle);
+ rbd_dev->owner_cid = *cid; /* struct */
+}
+
+static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
+{
+ mutex_lock(&rbd_dev->watch_mutex);
+ sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
+ mutex_unlock(&rbd_dev->watch_mutex);
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_lock(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+ char cookie[32];
+ int ret;
+
+ WARN_ON(__rbd_is_lock_owner(rbd_dev));
+
+ format_lock_cookie(rbd_dev, cookie);
+ ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
+ RBD_LOCK_TAG, "", 0);
+ if (ret)
+ return ret;
+
+ rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
+ rbd_set_owner_cid(rbd_dev, &cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+ return 0;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_unlock(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ char cookie[32];
+ int ret;
+
+ WARN_ON(!__rbd_is_lock_owner(rbd_dev));
+
+ rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+
+ format_lock_cookie(rbd_dev, cookie);
+ ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+ RBD_LOCK_NAME, cookie);
+ if (ret && ret != -ENOENT) {
+ rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
+ return ret;
+ }
+
+ rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
+ return 0;
+}
+
+static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
+ enum rbd_notify_op notify_op,
+ struct page ***preply_pages,
+ size_t *preply_len)
{
- struct rbd_device *rbd_dev = arg;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+ int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
+ char buf[buf_size];
+ void *p = buf;
+
+ dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
+
+ /* encode *LockPayload NotifyMessage (op + ClientId) */
+ ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
+ ceph_encode_32(&p, notify_op);
+ ceph_encode_64(&p, cid.gid);
+ ceph_encode_64(&p, cid.handle);
+
+ return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, buf, buf_size,
+ RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
+}
+
+static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
+ enum rbd_notify_op notify_op)
+{
+ struct page **reply_pages;
+ size_t reply_len;
+
+ __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
+ ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+}
+
+static void rbd_notify_acquired_lock(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+ acquired_lock_work);
+
+ rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
+}
+
+static void rbd_notify_released_lock(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+ released_lock_work);
+
+ rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
+}
+
+static int rbd_request_lock(struct rbd_device *rbd_dev)
+{
+ struct page **reply_pages;
+ size_t reply_len;
+ bool lock_owner_responded = false;
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
+ &reply_pages, &reply_len);
+ if (ret && ret != -ETIMEDOUT) {
+ rbd_warn(rbd_dev, "failed to request lock: %d", ret);
+ goto out;
+ }
+
+ if (reply_len > 0 && reply_len <= PAGE_SIZE) {
+ void *p = page_address(reply_pages[0]);
+ void *const end = p + reply_len;
+ u32 n;
+
+ ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
+ while (n--) {
+ u8 struct_v;
+ u32 len;
+
+ ceph_decode_need(&p, end, 8 + 8, e_inval);
+ p += 8 + 8; /* skip gid and cookie */
+
+ ceph_decode_32_safe(&p, end, len, e_inval);
+ if (!len)
+ continue;
+
+ if (lock_owner_responded) {
+ rbd_warn(rbd_dev,
+ "duplicate lock owners detected");
+ ret = -EIO;
+ goto out;
+ }
+
+ lock_owner_responded = true;
+ ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
+ &struct_v, &len);
+ if (ret) {
+ rbd_warn(rbd_dev,
+ "failed to decode ResponseMessage: %d",
+ ret);
+ goto e_inval;
+ }
+
+ ret = ceph_decode_32(&p);
+ }
+ }
+
+ if (!lock_owner_responded) {
+ rbd_warn(rbd_dev, "no lock owners detected");
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
+ return ret;
+
+e_inval:
+ ret = -EINVAL;
+ goto out;
+}
+
+static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
+{
+ dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
+
+ cancel_delayed_work(&rbd_dev->lock_dwork);
+ if (wake_all)
+ wake_up_all(&rbd_dev->lock_waitq);
+ else
+ wake_up(&rbd_dev->lock_waitq);
+}
+
+static int get_lock_owner_info(struct rbd_device *rbd_dev,
+ struct ceph_locker **lockers, u32 *num_lockers)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ u8 lock_type;
+ char *lock_tag;
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ &lock_type, &lock_tag, lockers, num_lockers);
+ if (ret)
+ return ret;
+
+ if (*num_lockers == 0) {
+ dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
+ goto out;
+ }
+
+ if (strcmp(lock_tag, RBD_LOCK_TAG)) {
+ rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
+ lock_tag);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (lock_type == CEPH_CLS_LOCK_SHARED) {
+ rbd_warn(rbd_dev, "shared lock type detected");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
+ strlen(RBD_LOCK_COOKIE_PREFIX))) {
+ rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
+ (*lockers)[0].id.cookie);
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ kfree(lock_tag);
+ return ret;
+}
+
+static int find_watcher(struct rbd_device *rbd_dev,
+ const struct ceph_locker *locker)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ struct ceph_watch_item *watchers;
+ u32 num_watchers;
+ u64 cookie;
+ int i;
+ int ret;
+
+ ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, &watchers,
+ &num_watchers);
+ if (ret)
+ return ret;
+
+ sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
+ for (i = 0; i < num_watchers; i++) {
+ if (!memcmp(&watchers[i].addr, &locker->info.addr,
+ sizeof(locker->info.addr)) &&
+ watchers[i].cookie == cookie) {
+ struct rbd_client_id cid = {
+ .gid = le64_to_cpu(watchers[i].name.num),
+ .handle = cookie,
+ };
+
+ dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
+ rbd_dev, cid.gid, cid.handle);
+ rbd_set_owner_cid(rbd_dev, &cid);
+ ret = 1;
+ goto out;
+ }
+ }
+
+ dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
+ ret = 0;
+out:
+ kfree(watchers);
+ return ret;
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static int rbd_try_lock(struct rbd_device *rbd_dev)
+{
+ struct ceph_client *client = rbd_dev->rbd_client->client;
+ struct ceph_locker *lockers;
+ u32 num_lockers;
+ int ret;
+
+ for (;;) {
+ ret = rbd_lock(rbd_dev);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* determine if the current lock holder is still alive */
+ ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
+ if (ret)
+ return ret;
+
+ if (num_lockers == 0)
+ goto again;
+
+ ret = find_watcher(rbd_dev, lockers);
+ if (ret) {
+ if (ret > 0)
+ ret = 0; /* have to request lock */
+ goto out;
+ }
+
+ rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
+ ENTITY_NAME(lockers[0].id.name));
+
+ ret = ceph_monc_blacklist_add(&client->monc,
+ &lockers[0].info.addr);
+ if (ret) {
+ rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
+ ENTITY_NAME(lockers[0].id.name), ret);
+ goto out;
+ }
+
+ ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ lockers[0].id.cookie,
+ &lockers[0].id.name);
+ if (ret && ret != -ENOENT)
+ goto out;
+
+again:
+ ceph_free_lockers(lockers, num_lockers);
+ }
+
+out:
+ ceph_free_lockers(lockers, num_lockers);
+ return ret;
+}
+
+/*
+ * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
+ */
+static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
+ int *pret)
+{
+ enum rbd_lock_state lock_state;
+
+ down_read(&rbd_dev->lock_rwsem);
+ dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
+ rbd_dev->lock_state);
+ if (__rbd_is_lock_owner(rbd_dev)) {
+ lock_state = rbd_dev->lock_state;
+ up_read(&rbd_dev->lock_rwsem);
+ return lock_state;
+ }
+
+ up_read(&rbd_dev->lock_rwsem);
+ down_write(&rbd_dev->lock_rwsem);
+ dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
+ rbd_dev->lock_state);
+ if (!__rbd_is_lock_owner(rbd_dev)) {
+ *pret = rbd_try_lock(rbd_dev);
+ if (*pret)
+ rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
+ }
+
+ lock_state = rbd_dev->lock_state;
+ up_write(&rbd_dev->lock_rwsem);
+ return lock_state;
+}
+
+static void rbd_acquire_lock(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
+ struct rbd_device, lock_dwork);
+ enum rbd_lock_state lock_state;
int ret;
- dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
- cookie, notify_id);
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+again:
+ lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
+ if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
+ if (lock_state == RBD_LOCK_STATE_LOCKED)
+ wake_requests(rbd_dev, true);
+ dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
+ rbd_dev, lock_state, ret);
+ return;
+ }
+
+ ret = rbd_request_lock(rbd_dev);
+ if (ret == -ETIMEDOUT) {
+ goto again; /* treat this as a dead client */
+ } else if (ret < 0) {
+ rbd_warn(rbd_dev, "error requesting lock: %d", ret);
+ mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
+ RBD_RETRY_DELAY);
+ } else {
+ /*
+ * lock owner acked, but resend if we don't see them
+ * release the lock
+ */
+ dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
+ rbd_dev);
+ mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
+ msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
+ }
+}
+
+/*
+ * lock_rwsem must be held for write
+ */
+static bool rbd_release_lock(struct rbd_device *rbd_dev)
+{
+ dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
+ rbd_dev->lock_state);
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ return false;
+ rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
+ downgrade_write(&rbd_dev->lock_rwsem);
/*
- * Until adequate refresh error handling is in place, there is
- * not much we can do here, except warn.
+ * Ensure that all in-flight IO is flushed.
*
- * See http://tracker.ceph.com/issues/5040
+ * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
+ * may be shared with other devices.
*/
- ret = rbd_dev_refresh(rbd_dev);
- if (ret)
- rbd_warn(rbd_dev, "refresh failed: %d", ret);
+ ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
+ up_read(&rbd_dev->lock_rwsem);
+
+ down_write(&rbd_dev->lock_rwsem);
+ dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
+ rbd_dev->lock_state);
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
+ return false;
+
+ if (!rbd_unlock(rbd_dev))
+ /*
+ * Give others a chance to grab the lock - we would re-acquire
+ * almost immediately if we got new IO during ceph_osdc_sync()
+ * otherwise. We need to ack our own notifications, so this
+ * lock_dwork will be requeued from rbd_wait_state_locked()
+ * after wake_requests() in rbd_handle_released_lock().
+ */
+ cancel_delayed_work(&rbd_dev->lock_dwork);
+
+ return true;
+}
+
+static void rbd_release_lock_work(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
+ unlock_work);
+
+ down_write(&rbd_dev->lock_rwsem);
+ rbd_release_lock(rbd_dev);
+ up_write(&rbd_dev->lock_rwsem);
+}
+
+static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
+ void **p)
+{
+ struct rbd_client_id cid = { 0 };
+
+ if (struct_v >= 2) {
+ cid.gid = ceph_decode_64(p);
+ cid.handle = ceph_decode_64(p);
+ }
+
+ dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+ cid.handle);
+ if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
+ down_write(&rbd_dev->lock_rwsem);
+ if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
+ /*
+ * we already know that the remote client is
+ * the owner
+ */
+ up_write(&rbd_dev->lock_rwsem);
+ return;
+ }
+
+ rbd_set_owner_cid(rbd_dev, &cid);
+ downgrade_write(&rbd_dev->lock_rwsem);
+ } else {
+ down_read(&rbd_dev->lock_rwsem);
+ }
+
+ if (!__rbd_is_lock_owner(rbd_dev))
+ wake_requests(rbd_dev, false);
+ up_read(&rbd_dev->lock_rwsem);
+}
+
+static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
+ void **p)
+{
+ struct rbd_client_id cid = { 0 };
+
+ if (struct_v >= 2) {
+ cid.gid = ceph_decode_64(p);
+ cid.handle = ceph_decode_64(p);
+ }
+
+ dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+ cid.handle);
+ if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
+ down_write(&rbd_dev->lock_rwsem);
+ if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
+ dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
+ __func__, rbd_dev, cid.gid, cid.handle,
+ rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
+ up_write(&rbd_dev->lock_rwsem);
+ return;
+ }
+
+ rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+ downgrade_write(&rbd_dev->lock_rwsem);
+ } else {
+ down_read(&rbd_dev->lock_rwsem);
+ }
+
+ if (!__rbd_is_lock_owner(rbd_dev))
+ wake_requests(rbd_dev, false);
+ up_read(&rbd_dev->lock_rwsem);
+}
+
+static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
+ void **p)
+{
+ struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
+ struct rbd_client_id cid = { 0 };
+ bool need_to_send;
+
+ if (struct_v >= 2) {
+ cid.gid = ceph_decode_64(p);
+ cid.handle = ceph_decode_64(p);
+ }
+
+ dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
+ cid.handle);
+ if (rbd_cid_equal(&cid, &my_cid))
+ return false;
+
+ down_read(&rbd_dev->lock_rwsem);
+ need_to_send = __rbd_is_lock_owner(rbd_dev);
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
+ if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
+ dout("%s rbd_dev %p queueing unlock_work\n", __func__,
+ rbd_dev);
+ queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
+ }
+ }
+ up_read(&rbd_dev->lock_rwsem);
+ return need_to_send;
+}
+
+static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
+ u64 notify_id, u64 cookie, s32 *result)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
+ char buf[buf_size];
+ int ret;
+
+ if (result) {
+ void *p = buf;
+
+ /* encode ResponseMessage */
+ ceph_start_encoding(&p, 1, 1,
+ buf_size - CEPH_ENCODING_START_BLK_LEN);
+ ceph_encode_32(&p, *result);
+ } else {
+ buf_size = 0;
+ }
ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, notify_id, cookie,
- NULL, 0);
+ buf, buf_size);
if (ret)
- rbd_warn(rbd_dev, "notify_ack ret %d", ret);
+ rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
}
-static void rbd_watch_errcb(void *arg, u64 cookie, int err)
+static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
+ u64 cookie)
+{
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+ __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
+}
+
+static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
+ u64 notify_id, u64 cookie, s32 result)
+{
+ dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
+ __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
+}
+
+static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len)
{
struct rbd_device *rbd_dev = arg;
+ void *p = data;
+ void *const end = p + data_len;
+ u8 struct_v;
+ u32 len;
+ u32 notify_op;
int ret;
- rbd_warn(rbd_dev, "encountered watch error: %d", err);
+ dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
+ __func__, rbd_dev, cookie, notify_id, data_len);
+ if (data_len) {
+ ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
+ &struct_v, &len);
+ if (ret) {
+ rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
+ ret);
+ return;
+ }
- __rbd_dev_header_unwatch_sync(rbd_dev);
+ notify_op = ceph_decode_32(&p);
+ } else {
+ /* legacy notification for header updates */
+ notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
+ len = 0;
+ }
- ret = rbd_dev_header_watch_sync(rbd_dev);
- if (ret) {
- rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- return;
+ dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
+ switch (notify_op) {
+ case RBD_NOTIFY_OP_ACQUIRED_LOCK:
+ rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
+ rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+ break;
+ case RBD_NOTIFY_OP_RELEASED_LOCK:
+ rbd_handle_released_lock(rbd_dev, struct_v, &p);
+ rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+ break;
+ case RBD_NOTIFY_OP_REQUEST_LOCK:
+ if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
+ /*
+ * send ResponseMessage(0) back so the client
+ * can detect a missing owner
+ */
+ rbd_acknowledge_notify_result(rbd_dev, notify_id,
+ cookie, 0);
+ else
+ rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+ break;
+ case RBD_NOTIFY_OP_HEADER_UPDATE:
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "refresh failed: %d", ret);
+
+ rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+ break;
+ default:
+ if (rbd_is_lock_owner(rbd_dev))
+ rbd_acknowledge_notify_result(rbd_dev, notify_id,
+ cookie, -EOPNOTSUPP);
+ else
+ rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
+ break;
}
+}
- ret = rbd_dev_refresh(rbd_dev);
- if (ret)
- rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
+static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
+
+static void rbd_watch_errcb(void *arg, u64 cookie, int err)
+{
+ struct rbd_device *rbd_dev = arg;
+
+ rbd_warn(rbd_dev, "encountered watch error: %d", err);
+
+ down_write(&rbd_dev->lock_rwsem);
+ rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
+ up_write(&rbd_dev->lock_rwsem);
+
+ mutex_lock(&rbd_dev->watch_mutex);
+ if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
+ __rbd_unregister_watch(rbd_dev);
+ rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
+
+ queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
}
/*
- * Initiate a watch request, synchronously.
+ * watch_mutex must be locked
*/
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
+static int __rbd_register_watch(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_linger_request *handle;
rbd_assert(!rbd_dev->watch_handle);
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, rbd_watch_cb,
@@ -3154,13 +3860,16 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
return 0;
}
-static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+/*
+ * watch_mutex must be locked
+ */
+static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
- if (!rbd_dev->watch_handle)
- return;
+ rbd_assert(rbd_dev->watch_handle);
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
if (ret)
@@ -3169,17 +3878,106 @@ static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
rbd_dev->watch_handle = NULL;
}
-/*
- * Tear down a watch request, synchronously.
- */
-static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+static int rbd_register_watch(struct rbd_device *rbd_dev)
{
- __rbd_dev_header_unwatch_sync(rbd_dev);
+ int ret;
+
+ mutex_lock(&rbd_dev->watch_mutex);
+ rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
+ ret = __rbd_register_watch(rbd_dev);
+ if (ret)
+ goto out;
+
+ rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
+ rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
+
+out:
+ mutex_unlock(&rbd_dev->watch_mutex);
+ return ret;
+}
+
+static void cancel_tasks_sync(struct rbd_device *rbd_dev)
+{
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ cancel_delayed_work_sync(&rbd_dev->watch_dwork);
+ cancel_work_sync(&rbd_dev->acquired_lock_work);
+ cancel_work_sync(&rbd_dev->released_lock_work);
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+ cancel_work_sync(&rbd_dev->unlock_work);
+}
+
+static void rbd_unregister_watch(struct rbd_device *rbd_dev)
+{
+ WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
+ cancel_tasks_sync(rbd_dev);
+
+ mutex_lock(&rbd_dev->watch_mutex);
+ if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
+ __rbd_unregister_watch(rbd_dev);
+ rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
+ mutex_unlock(&rbd_dev->watch_mutex);
- dout("%s flushing notifies\n", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
+static void rbd_reregister_watch(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
+ struct rbd_device, watch_dwork);
+ bool was_lock_owner = false;
+ bool need_to_wake = false;
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ down_write(&rbd_dev->lock_rwsem);
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
+ was_lock_owner = rbd_release_lock(rbd_dev);
+
+ mutex_lock(&rbd_dev->watch_mutex);
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
+
+ ret = __rbd_register_watch(rbd_dev);
+ if (ret) {
+ rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
+ queue_delayed_work(rbd_dev->task_wq,
+ &rbd_dev->watch_dwork,
+ RBD_RETRY_DELAY);
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
+
+ need_to_wake = true;
+ rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
+ rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
+ mutex_unlock(&rbd_dev->watch_mutex);
+
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
+
+ if (was_lock_owner) {
+ ret = rbd_try_lock(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "reregisteration lock failed: %d",
+ ret);
+ }
+
+out:
+ up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
+}
+
/*
* Synchronous osd object method call. Returns the number of bytes
* returned in the outbound buffer, or a negative error code.
@@ -3193,7 +3991,6 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
void *inbound,
size_t inbound_size)
{
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct page **pages;
u32 page_count;
@@ -3242,11 +4039,8 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
obj_request->pages, inbound_size,
0, false, false);
- rbd_osd_req_format_read(obj_request);
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
+ rbd_obj_request_submit(obj_request);
ret = rbd_obj_request_wait(obj_request);
if (ret)
goto out;
@@ -3267,6 +4061,31 @@ out:
return ret;
}
+/*
+ * lock_rwsem must be held for read
+ */
+static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
+{
+ DEFINE_WAIT(wait);
+
+ do {
+ /*
+ * Note the use of mod_delayed_work() in rbd_acquire_lock()
+ * and cancel_delayed_work() in wake_requests().
+ */
+ dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
+ queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
+ prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ up_read(&rbd_dev->lock_rwsem);
+ schedule();
+ down_read(&rbd_dev->lock_rwsem);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
+ finish_wait(&rbd_dev->lock_waitq, &wait);
+}
+
static void rbd_queue_workfn(struct work_struct *work)
{
struct request *rq = blk_mq_rq_from_pdu(work);
@@ -3277,6 +4096,7 @@ static void rbd_queue_workfn(struct work_struct *work)
u64 length = blk_rq_bytes(rq);
enum obj_operation_type op_type;
u64 mapping_size;
+ bool must_be_locked;
int result;
if (rq->cmd_type != REQ_TYPE_FS) {
@@ -3338,6 +4158,10 @@ static void rbd_queue_workfn(struct work_struct *work)
if (op_type != OBJ_OP_READ) {
snapc = rbd_dev->header.snapc;
ceph_get_snap_context(snapc);
+ must_be_locked = rbd_is_lock_supported(rbd_dev);
+ } else {
+ must_be_locked = rbd_dev->opts->lock_on_read &&
+ rbd_is_lock_supported(rbd_dev);
}
up_read(&rbd_dev->header_rwsem);
@@ -3348,11 +4172,25 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
+ if (must_be_locked) {
+ down_read(&rbd_dev->lock_rwsem);
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
+ rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
+ }
+
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
snapc);
if (!img_request) {
result = -ENOMEM;
- goto err_rq;
+ goto err_unlock;
}
img_request->rq = rq;
snapc = NULL; /* img_request consumes a ref */
@@ -3370,10 +4208,15 @@ static void rbd_queue_workfn(struct work_struct *work)
if (result)
goto err_img_request;
+ if (must_be_locked)
+ up_read(&rbd_dev->lock_rwsem);
return;
err_img_request:
rbd_img_request_put(img_request);
+err_unlock:
+ if (must_be_locked)
+ up_read(&rbd_dev->lock_rwsem);
err_rq:
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
@@ -3415,7 +4258,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
u64 offset, u64 length, void *buf)
{
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
struct page **pages = NULL;
u32 page_count;
@@ -3448,11 +4290,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
obj_request->length,
obj_request->offset & ~PAGE_MASK,
false, false);
- rbd_osd_req_format_read(obj_request);
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
+ rbd_obj_request_submit(obj_request);
ret = rbd_obj_request_wait(obj_request);
if (ret)
goto out;
@@ -3621,7 +4460,6 @@ static int rbd_init_request(void *data, struct request *rq,
static struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
- .map_queue = blk_mq_map_queue,
.init_request = rbd_init_request,
};
@@ -3752,13 +4590,40 @@ static ssize_t rbd_minor_show(struct device *dev,
return sprintf(buf, "%d\n", rbd_dev->minor);
}
+static ssize_t rbd_client_addr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ struct ceph_entity_addr *client_addr =
+ ceph_client_addr(rbd_dev->rbd_client->client);
+
+ return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
+ le32_to_cpu(client_addr->nonce));
+}
+
static ssize_t rbd_client_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
return sprintf(buf, "client%lld\n",
- ceph_client_id(rbd_dev->rbd_client->client));
+ ceph_client_gid(rbd_dev->rbd_client->client));
+}
+
+static ssize_t rbd_cluster_fsid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
+}
+
+static ssize_t rbd_config_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%s\n", rbd_dev->config_info);
}
static ssize_t rbd_pool_show(struct device *dev,
@@ -3810,6 +4675,14 @@ static ssize_t rbd_snap_show(struct device *dev,
return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
}
+static ssize_t rbd_snap_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
+}
+
/*
* For a v2 image, shows the chain of parent images, separated by empty
* lines. For v1 images or if there is no parent, shows "(no parent
@@ -3862,13 +4735,17 @@ static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
+static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
+static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
+static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
+static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
static struct attribute *rbd_attrs[] = {
@@ -3876,12 +4753,16 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_features.attr,
&dev_attr_major.attr,
&dev_attr_minor.attr,
+ &dev_attr_client_addr.attr,
&dev_attr_client_id.attr,
+ &dev_attr_cluster_fsid.attr,
+ &dev_attr_config_info.attr,
&dev_attr_pool.attr,
&dev_attr_pool_id.attr,
&dev_attr_name.attr,
&dev_attr_image_id.attr,
&dev_attr_current_snap.attr,
+ &dev_attr_snap_id.attr,
&dev_attr_parent.attr,
&dev_attr_refresh.attr,
NULL
@@ -3944,18 +4825,32 @@ static void rbd_spec_free(struct kref *kref)
kfree(spec);
}
-static void rbd_dev_release(struct device *dev)
+static void rbd_dev_free(struct rbd_device *rbd_dev)
{
- struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- bool need_put = !!rbd_dev->opts;
+ WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
+ WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
ceph_oid_destroy(&rbd_dev->header_oid);
ceph_oloc_destroy(&rbd_dev->header_oloc);
+ kfree(rbd_dev->config_info);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev->opts);
kfree(rbd_dev);
+}
+
+static void rbd_dev_release(struct device *dev)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ bool need_put = !!rbd_dev->opts;
+
+ if (need_put) {
+ destroy_workqueue(rbd_dev->task_wq);
+ ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ }
+
+ rbd_dev_free(rbd_dev);
/*
* This is racy, but way better than putting module outside of
@@ -3966,25 +4861,34 @@ static void rbd_dev_release(struct device *dev)
module_put(THIS_MODULE);
}
-static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
- struct rbd_spec *spec,
- struct rbd_options *opts)
+static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+ struct rbd_spec *spec)
{
struct rbd_device *rbd_dev;
- rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
+ rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
if (!rbd_dev)
return NULL;
spin_lock_init(&rbd_dev->lock);
- rbd_dev->flags = 0;
- atomic_set(&rbd_dev->parent_ref, 0);
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
ceph_oid_init(&rbd_dev->header_oid);
ceph_oloc_init(&rbd_dev->header_oloc);
+ mutex_init(&rbd_dev->watch_mutex);
+ rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
+ INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
+
+ init_rwsem(&rbd_dev->lock_rwsem);
+ rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+ INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
+ INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
+ INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
+ INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
+ init_waitqueue_head(&rbd_dev->lock_waitq);
+
rbd_dev->dev.bus = &rbd_bus_type;
rbd_dev->dev.type = &rbd_device_type;
rbd_dev->dev.parent = &rbd_root_dev;
@@ -3992,9 +4896,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->rbd_client = rbdc;
rbd_dev->spec = spec;
- rbd_dev->opts = opts;
-
- /* Initialize the layout used for all rbd requests */
rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
rbd_dev->layout.stripe_count = 1;
@@ -4002,15 +4903,48 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
rbd_dev->layout.pool_id = spec->pool_id;
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
- /*
- * If this is a mapping rbd_dev (as opposed to a parent one),
- * pin our module. We have a ref from do_rbd_add(), so use
- * __module_get().
- */
- if (rbd_dev->opts)
- __module_get(THIS_MODULE);
+ return rbd_dev;
+}
+
+/*
+ * Create a mapping rbd_dev.
+ */
+static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ struct rbd_spec *spec,
+ struct rbd_options *opts)
+{
+ struct rbd_device *rbd_dev;
+
+ rbd_dev = __rbd_dev_create(rbdc, spec);
+ if (!rbd_dev)
+ return NULL;
+
+ rbd_dev->opts = opts;
+
+ /* get an id and fill in device name */
+ rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
+ minor_to_rbd_dev_id(1 << MINORBITS),
+ GFP_KERNEL);
+ if (rbd_dev->dev_id < 0)
+ goto fail_rbd_dev;
+ sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
+ rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
+ rbd_dev->name);
+ if (!rbd_dev->task_wq)
+ goto fail_dev_id;
+
+ /* we have a ref from do_rbd_add() */
+ __module_get(THIS_MODULE);
+
+ dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
return rbd_dev;
+
+fail_dev_id:
+ ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+fail_rbd_dev:
+ rbd_dev_free(rbd_dev);
+ return NULL;
}
static void rbd_dev_destroy(struct rbd_device *rbd_dev)
@@ -4646,46 +5580,6 @@ static int rbd_dev_header_info(struct rbd_device *rbd_dev)
}
/*
- * Get a unique rbd identifier for the given new rbd_dev, and add
- * the rbd_dev to the global list.
- */
-static int rbd_dev_id_get(struct rbd_device *rbd_dev)
-{
- int new_dev_id;
-
- new_dev_id = ida_simple_get(&rbd_dev_id_ida,
- 0, minor_to_rbd_dev_id(1 << MINORBITS),
- GFP_KERNEL);
- if (new_dev_id < 0)
- return new_dev_id;
-
- rbd_dev->dev_id = new_dev_id;
-
- spin_lock(&rbd_dev_list_lock);
- list_add_tail(&rbd_dev->node, &rbd_dev_list);
- spin_unlock(&rbd_dev_list_lock);
-
- dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
-
- return 0;
-}
-
-/*
- * Remove an rbd_dev from the global list, and record that its
- * identifier is no longer in use.
- */
-static void rbd_dev_id_put(struct rbd_device *rbd_dev)
-{
- spin_lock(&rbd_dev_list_lock);
- list_del_init(&rbd_dev->node);
- spin_unlock(&rbd_dev_list_lock);
-
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
-
- dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
-}
-
-/*
* Skips over white space at *buf, and updates *buf to point to the
* first found non-space character (if any). Returns the length of
* the token (string of non-white space characters) found. Note
@@ -4860,6 +5754,7 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
+ rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@@ -5077,8 +5972,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
goto out_err;
}
- parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
- NULL);
+ parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
if (!parent) {
ret = -ENOMEM;
goto out_err;
@@ -5113,22 +6007,12 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
- /* Get an id and fill in device name. */
-
- ret = rbd_dev_id_get(rbd_dev);
- if (ret)
- goto err_out_unlock;
-
- BUILD_BUG_ON(DEV_NAME_LEN
- < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
- sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
-
/* Record our major and minor device numbers. */
if (!single_major) {
ret = register_blkdev(0, rbd_dev->name);
if (ret < 0)
- goto err_out_id;
+ goto err_out_unlock;
rbd_dev->major = ret;
rbd_dev->minor = 0;
@@ -5160,9 +6044,14 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
up_write(&rbd_dev->header_rwsem);
+ spin_lock(&rbd_dev_list_lock);
+ list_add_tail(&rbd_dev->node, &rbd_dev_list);
+ spin_unlock(&rbd_dev_list_lock);
+
add_disk(rbd_dev->disk);
- pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
- (unsigned long long) rbd_dev->mapping.size);
+ pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
+ (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
+ rbd_dev->header.features);
return ret;
@@ -5173,8 +6062,6 @@ err_out_disk:
err_out_blkdev:
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_id:
- rbd_dev_id_put(rbd_dev);
err_out_unlock:
up_write(&rbd_dev->header_rwsem);
return ret;
@@ -5235,7 +6122,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
goto err_out_format;
if (!depth) {
- ret = rbd_dev_header_watch_sync(rbd_dev);
+ ret = rbd_register_watch(rbd_dev);
if (ret) {
if (ret == -ENOENT)
pr_info("image %s/%s does not exist\n",
@@ -5294,7 +6181,7 @@ err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
if (!depth)
- rbd_dev_header_unwatch_sync(rbd_dev);
+ rbd_unregister_watch(rbd_dev);
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
@@ -5346,10 +6233,18 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
+ if (!rbd_dev->config_info) {
+ rc = -ENOMEM;
+ goto err_out_rbd_dev;
+ }
+
down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
- if (rc < 0)
+ if (rc < 0) {
+ up_write(&rbd_dev->header_rwsem);
goto err_out_rbd_dev;
+ }
/* If we are mapping a snapshot it must be marked read-only */
@@ -5361,11 +6256,11 @@ static ssize_t do_rbd_add(struct bus_type *bus,
rc = rbd_dev_device_setup(rbd_dev);
if (rc) {
/*
- * rbd_dev_header_unwatch_sync() can't be moved into
+ * rbd_unregister_watch() can't be moved into
* rbd_dev_image_release() without refactoring, see
* commit 1f3ef78861ac.
*/
- rbd_dev_header_unwatch_sync(rbd_dev);
+ rbd_unregister_watch(rbd_dev);
rbd_dev_image_release(rbd_dev);
goto out;
}
@@ -5376,7 +6271,6 @@ out:
return rc;
err_out_rbd_dev:
- up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
@@ -5406,12 +6300,16 @@ static ssize_t rbd_add_single_major(struct bus_type *bus,
static void rbd_dev_device_release(struct rbd_device *rbd_dev)
{
rbd_free_disk(rbd_dev);
+
+ spin_lock(&rbd_dev_list_lock);
+ list_del_init(&rbd_dev->node);
+ spin_unlock(&rbd_dev_list_lock);
+
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
device_del(&rbd_dev->dev);
rbd_dev_mapping_clear(rbd_dev);
if (!single_major)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
- rbd_dev_id_put(rbd_dev);
}
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
@@ -5447,18 +6345,26 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
struct rbd_device *rbd_dev = NULL;
struct list_head *tmp;
int dev_id;
- unsigned long ul;
+ char opt_buf[6];
bool already = false;
+ bool force = false;
int ret;
- ret = kstrtoul(buf, 10, &ul);
- if (ret)
- return ret;
-
- /* convert to int; abort if we lost anything in the conversion */
- dev_id = (int)ul;
- if (dev_id != ul)
+ dev_id = -1;
+ opt_buf[0] = '\0';
+ sscanf(buf, "%d %5s", &dev_id, opt_buf);
+ if (dev_id < 0) {
+ pr_err("dev_id out of range\n");
return -EINVAL;
+ }
+ if (opt_buf[0] != '\0') {
+ if (!strcmp(opt_buf, "force")) {
+ force = true;
+ } else {
+ pr_err("bad remove option at '%s'\n", opt_buf);
+ return -EINVAL;
+ }
+ }
ret = -ENOENT;
spin_lock(&rbd_dev_list_lock);
@@ -5471,7 +6377,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
}
if (!ret) {
spin_lock_irq(&rbd_dev->lock);
- if (rbd_dev->open_count)
+ if (rbd_dev->open_count && !force)
ret = -EBUSY;
else
already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
@@ -5482,7 +6388,20 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
if (ret < 0 || already)
return ret;
- rbd_dev_header_unwatch_sync(rbd_dev);
+ if (force) {
+ /*
+ * Prevent new IO from being queued and wait for existing
+ * IO to complete/fail.
+ */
+ blk_mq_freeze_queue(rbd_dev->disk->queue);
+ blk_set_queue_dying(rbd_dev->disk->queue);
+ }
+
+ down_write(&rbd_dev->lock_rwsem);
+ if (__rbd_is_lock_owner(rbd_dev))
+ rbd_unlock(rbd_dev);
+ up_write(&rbd_dev->lock_rwsem);
+ rbd_unregister_watch(rbd_dev);
/*
* Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index 49d77cbcf8bd..94f367db27b0 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -28,6 +28,17 @@
#define RBD_DATA_PREFIX "rbd_data."
#define RBD_ID_PREFIX "rbd_id."
+#define RBD_LOCK_NAME "rbd_lock"
+#define RBD_LOCK_TAG "internal"
+#define RBD_LOCK_COOKIE_PREFIX "auto"
+
+enum rbd_notify_op {
+ RBD_NOTIFY_OP_ACQUIRED_LOCK = 0,
+ RBD_NOTIFY_OP_RELEASED_LOCK = 1,
+ RBD_NOTIFY_OP_REQUEST_LOCK = 2,
+ RBD_NOTIFY_OP_HEADER_UPDATE = 3,
+};
+
/*
* For format version 1, rbd image 'foo' consists of objects
* foo.rbd - image metadata
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 93b1aaa5ba3b..5545a679abd8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
static int init_vq(struct virtio_blk *vblk)
{
- int err = 0;
+ int err;
int i;
vq_callback_t **callbacks;
const char **names;
@@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
- vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
+ vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
- names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
+ names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
+ callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
+ vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
if (!names || !callbacks || !vqs) {
err = -ENOMEM;
goto out;
@@ -542,7 +542,6 @@ static int virtblk_init_request(void *data, struct request *rq,
static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 88ef6d4729b4..9908597c5209 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -909,7 +909,6 @@ out_busy:
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
- .map_queue = blk_mq_map_queue,
};
static void blkif_set_queue_limits(struct blkfront_info *info)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 6bd63b84abd0..2f633df9f4e6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -314,6 +314,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Marvell Bluetooth devices */
{ USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
+ { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
@@ -1042,6 +1043,10 @@ static int btusb_open(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
/* Patching USB firmware files prior to starting any URBs of HCI path
* It is more safe to use USB bulk channel for downloading USB patch
*/
@@ -1051,10 +1056,6 @@ static int btusb_open(struct hci_dev *hdev)
return err;
}
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- return err;
-
data->intf->needs_remote_wakeup = 1;
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 3b205e212337..78751057164a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -108,6 +108,15 @@ config OMAP_OCP2SCP
OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
OCP2SCP.
+config QCOM_EBI2
+ bool "Qualcomm External Bus Interface 2 (EBI2)"
+ depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say y here to enable support for the Qualcomm External Bus
+ Interface 2, which can be used to connect things like NAND Flash,
+ SRAM, ethernet adapters, FPGAs and LCD displays.
+
config SIMPLE_PM_BUS
bool "Simple Power-Managed Bus Driver"
depends on OF && PM
@@ -132,12 +141,8 @@ config SUNXI_RSB
with various RSB based devices, such as AXP223, AXP8XX PMICs,
and AC100/AC200 ICs.
-# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence
-# the driver will fail to build as a module. However there are patches to
-# address that queued for v4.8, so this can be turned into a tristate symbol
-# after v4.8-rc1.
config TEGRA_ACONNECT
- bool "Tegra ACONNECT Bus Driver"
+ tristate "Tegra ACONNECT Bus Driver"
depends on ARCH_TEGRA_210_SOC
depends on OF && PM
select PM_CLK
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ac84cc4348e3..c6cfa6b2606e 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
new file mode 100644
index 000000000000..a6444244c411
--- /dev/null
+++ b/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,408 @@
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ * memory continues to drive the data bus after OE is de-asserted.
+ * Inserted when reading one CS and switching to another CS or read
+ * followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ * every write minimum 1. The data out is driven from the time WE is
+ * asserted until CS is asserted. With a hold of 1, the CS stays
+ * active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ * write to a page or burst memory
+ * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
+ * read to a page or burst memory
+ * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT 28
+#define EBI2_XMEM_WR_HOLD_SHIFT 24
+#define EBI2_XMEM_WR_DELTA_SHIFT 16
+#define EBI2_XMEM_RD_DELTA_SHIFT 8
+#define EBI2_XMEM_WR_WAIT_SHIFT 4
+#define EBI2_XMEM_RD_WAIT_SHIFT 0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ * transfer. For a single read trandfer this will be the time
+ * from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ * assertion, with respect to the cycle where ADV is asserted.
+ * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ * hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT 24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg0: offset to XMEMC slow CS config
+ * @fast_cfg1: offset to XMEMC fast CS config
+ */
+struct cs_data {
+ u32 enable_mask;
+ u16 slow_cfg;
+ u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+ {
+ /* CS0 */
+ .enable_mask = EBI2_CS0_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+ },
+ {
+ /* CS1 */
+ .enable_mask = EBI2_CS1_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+ },
+ {
+ /* CS2 */
+ .enable_mask = EBI2_CS2_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+ },
+ {
+ /* CS3 */
+ .enable_mask = EBI2_CS3_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+ },
+ {
+ /* CS4 */
+ .enable_mask = EBI2_CS4_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+ },
+ {
+ /* CS5 */
+ .enable_mask = EBI2_CS5_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+ },
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+ const char *prop;
+ u32 max;
+ bool slowreg;
+ u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+ {
+ .prop = "qcom,xmem-recovery-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-hold-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_HOLD_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-address-hold-enable",
+ .max = 1, /* boolean prop */
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+ .max = 3,
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-hold-cycles",
+ .max = 15,
+ .slowreg = false,
+ .shift = EBI2_XMEM_RD_HOLD_SHIFT,
+ },
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+ struct device *dev,
+ void __iomem *ebi2_base,
+ void __iomem *ebi2_xmem,
+ u32 csindex)
+{
+ const struct cs_data *csd;
+ u32 slowcfg, fastcfg;
+ u32 val;
+ int ret;
+ int i;
+
+ csd = &cs_info[csindex];
+ val = readl(ebi2_base);
+ val |= csd->enable_mask;
+ writel(val, ebi2_base);
+ dev_dbg(dev, "enabled CS%u\n", csindex);
+
+ /* Next set up the XMEMC */
+ slowcfg = 0;
+ fastcfg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+ const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, xp->prop, &val);
+ if (ret) {
+ dev_dbg(dev, "could not read %s for CS%d\n",
+ xp->prop, csindex);
+ continue;
+ }
+
+ /* First check boolean props */
+ if (xp->max == 1 && val) {
+ if (xp->slowreg)
+ slowcfg |= BIT(xp->shift);
+ else
+ fastcfg |= BIT(xp->shift);
+ dev_dbg(dev, "set %s flag\n", xp->prop);
+ continue;
+ }
+
+ /* We're dealing with an u32 */
+ if (val > xp->max) {
+ dev_err(dev,
+ "too high value for %s: %u, capped at %u\n",
+ xp->prop, val, xp->max);
+ val = xp->max;
+ }
+ if (xp->slowreg)
+ slowcfg |= (val << xp->shift);
+ else
+ fastcfg |= (val << xp->shift);
+ dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+ }
+
+ dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+ csindex, slowcfg, fastcfg);
+
+ if (slowcfg)
+ writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+ if (fastcfg)
+ writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *ebi2_base;
+ void __iomem *ebi2_xmem;
+ struct clk *ebi2xclk;
+ struct clk *ebi2clk;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ ebi2xclk = devm_clk_get(dev, "ebi2x");
+ if (IS_ERR(ebi2xclk))
+ return PTR_ERR(ebi2xclk);
+
+ ret = clk_prepare_enable(ebi2xclk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+ return ret;
+ }
+
+ ebi2clk = devm_clk_get(dev, "ebi2");
+ if (IS_ERR(ebi2clk)) {
+ ret = PTR_ERR(ebi2clk);
+ goto err_disable_2x_clk;
+ }
+
+ ret = clk_prepare_enable(ebi2clk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2 clk\n");
+ goto err_disable_2x_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ebi2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_base)) {
+ ret = PTR_ERR(ebi2_base);
+ goto err_disable_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ebi2_xmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_xmem)) {
+ ret = PTR_ERR(ebi2_xmem);
+ goto err_disable_clk;
+ }
+
+ /* Allegedly this turns the power save mode off */
+ writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+ /* Disable all chipselects */
+ val = readl(ebi2_base);
+ val &= ~EBI2_CSN_MASK;
+ writel(val, ebi2_base);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ u32 csindex;
+
+ /* Figure out the chipselect */
+ ret = of_property_read_u32(child, "reg", &csindex);
+ if (ret)
+ return ret;
+
+ if (csindex > 5) {
+ dev_err(dev,
+ "invalid chipselect %u, we only support 0-5\n",
+ csindex);
+ continue;
+ }
+
+ qcom_ebi2_setup_chipselect(child,
+ dev,
+ ebi2_base,
+ ebi2_xmem,
+ csindex);
+
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+ clk_disable_unprepare(ebi2xclk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+ { .compatible = "qcom,msm8660-ebi2", },
+ { .compatible = "qcom,apq8060-ebi2", },
+ { }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+ .probe = qcom_ebi2_probe,
+ .driver = {
+ .name = "qcom-ebi2",
+ .of_match_table = qcom_ebi2_of_match,
+ },
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 7e4104b74fa8..084ae286fa23 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -15,24 +15,6 @@
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
-static int tegra_aconnect_add_clock(struct device *dev, char *name)
-{
- struct clk *clk;
- int ret;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk)) {
- dev_err(dev, "%s clock not found\n", name);
- return PTR_ERR(clk);
- }
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret)
- clk_put(clk);
-
- return ret;
-}
-
static int tegra_aconnect_probe(struct platform_device *pdev)
{
int ret;
@@ -44,11 +26,11 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = tegra_aconnect_add_clock(&pdev->dev, "ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "ape");
if (ret)
goto clk_destroy;
- ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
if (ret)
goto clk_destroy;
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 44311296ec02..0f7d28a98b9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -845,6 +845,8 @@ void intel_gtt_insert_page(dma_addr_t addr,
unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
+ if (intel_private.driver->chipset_flush)
+ intel_private.driver->chipset_flush();
}
EXPORT_SYMBOL(intel_gtt_insert_page);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8c0770bf8881..200dab5136a7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -410,6 +410,19 @@ config HW_RANDOM_MESON
If unsure, say Y.
+config HW_RANDOM_CAVIUM
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 04bb0b03356f..5f52b1e4e7be 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
+obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 48f6a83cdd61..4a99ac756f08 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -24,16 +24,18 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/hw_random.h>
-#include <linux/delay.h>
-#include <asm/io.h>
+#define DRV_NAME "AMD768-HWRNG"
-#define PFX KBUILD_MODNAME ": "
-
+#define RNGDATA 0x00
+#define RNGDONE 0x04
+#define PMBASE_OFFSET 0xF0
+#define PMBASE_SIZE 8
/*
* Data for PCI driver interface
@@ -50,72 +52,84 @@ static const struct pci_device_id pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
-static struct pci_dev *amd_pdev;
-
+struct amd768_priv {
+ void __iomem *iobase;
+ struct pci_dev *pcidev;
+};
-static int amd_rng_data_present(struct hwrng *rng, int wait)
+static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
- u32 pmbase = (u32)rng->priv;
- int data, i;
-
- for (i = 0; i < 20; i++) {
- data = !!(inl(pmbase + 0xF4) & 1);
- if (data || !wait)
- break;
- udelay(10);
+ u32 *data = buf;
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
+ size_t read = 0;
+ /* We will wait at maximum one time per read */
+ int timeout = max / 4 + 1;
+
+ /*
+ * RNG data is available when RNGDONE is set to 1
+ * New random numbers are generated approximately 128 microseconds
+ * after RNGDATA is read
+ */
+ while (read < max) {
+ if (ioread32(priv->iobase + RNGDONE) == 0) {
+ if (wait) {
+ /* Delay given by datasheet */
+ usleep_range(128, 196);
+ if (timeout-- == 0)
+ return read;
+ } else {
+ return 0;
+ }
+ } else {
+ *data = ioread32(priv->iobase + RNGDATA);
+ data++;
+ read += 4;
+ }
}
- return data;
-}
-static int amd_rng_data_read(struct hwrng *rng, u32 *data)
-{
- u32 pmbase = (u32)rng->priv;
-
- *data = inl(pmbase + 0xF0);
-
- return 4;
+ return read;
}
static int amd_rng_init(struct hwrng *rng)
{
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen;
- pci_read_config_byte(amd_pdev, 0x40, &rnen);
- rnen |= (1 << 7); /* RNG on */
- pci_write_config_byte(amd_pdev, 0x40, rnen);
+ pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+ rnen |= BIT(7); /* RNG on */
+ pci_write_config_byte(priv->pcidev, 0x40, rnen);
- pci_read_config_byte(amd_pdev, 0x41, &rnen);
- rnen |= (1 << 7); /* PMIO enable */
- pci_write_config_byte(amd_pdev, 0x41, rnen);
+ pci_read_config_byte(priv->pcidev, 0x41, &rnen);
+ rnen |= BIT(7); /* PMIO enable */
+ pci_write_config_byte(priv->pcidev, 0x41, rnen);
return 0;
}
static void amd_rng_cleanup(struct hwrng *rng)
{
+ struct amd768_priv *priv = (struct amd768_priv *)rng->priv;
u8 rnen;
- pci_read_config_byte(amd_pdev, 0x40, &rnen);
- rnen &= ~(1 << 7); /* RNG off */
- pci_write_config_byte(amd_pdev, 0x40, rnen);
+ pci_read_config_byte(priv->pcidev, 0x40, &rnen);
+ rnen &= ~BIT(7); /* RNG off */
+ pci_write_config_byte(priv->pcidev, 0x40, rnen);
}
-
static struct hwrng amd_rng = {
.name = "amd",
.init = amd_rng_init,
.cleanup = amd_rng_cleanup,
- .data_present = amd_rng_data_present,
- .data_read = amd_rng_data_read,
+ .read = amd_rng_read,
};
-
static int __init mod_init(void)
{
int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
u32 pmbase;
+ struct amd768_priv *priv;
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
@@ -123,42 +137,44 @@ static int __init mod_init(void)
goto found;
}
/* Device not found. */
- goto out;
+ return -ENODEV;
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err)
- goto out;
- err = -EIO;
+ return err;
+
pmbase &= 0x0000FF00;
if (pmbase == 0)
- goto out;
- if (!request_region(pmbase + 0xF0, 8, "AMD HWRNG")) {
- dev_err(&pdev->dev, "AMD HWRNG region 0x%x already in use!\n",
+ return -EIO;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
+ PMBASE_SIZE, DRV_NAME)) {
+ dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
pmbase + 0xF0);
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
- amd_rng.priv = (unsigned long)pmbase;
- amd_pdev = pdev;
-
- pr_info("AMD768 RNG detected\n");
- err = hwrng_register(&amd_rng);
- if (err) {
- pr_err(PFX "RNG registering failed (%d)\n",
- err);
- release_region(pmbase + 0xF0, 8);
- goto out;
+
+ priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
+ PMBASE_SIZE);
+ if (!priv->iobase) {
+ pr_err(DRV_NAME "Cannot map ioport\n");
+ return -ENOMEM;
}
-out:
- return err;
+
+ amd_rng.priv = (unsigned long)priv;
+ priv->pcidev = pdev;
+
+ pr_info(DRV_NAME " detected\n");
+ return devm_hwrng_register(&pdev->dev, &amd_rng);
}
static void __exit mod_exit(void)
{
- u32 pmbase = (unsigned long)amd_rng.priv;
- release_region(pmbase + 0xF0, 8);
- hwrng_unregister(&amd_rng);
}
module_init(mod_init);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index af2149273fe0..574211a49549 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -92,9 +92,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
bcm2835_rng_ops.priv = (unsigned long)rng_base;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id)
+ if (!rng_id) {
+ iounmap(rng_base);
return -EINVAL;
-
+ }
/* Check for rng init function, execute it */
rng_setup = rng_id->data;
if (rng_setup)
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
new file mode 100644
index 000000000000..066ae0e78d63
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -0,0 +1,99 @@
+/*
+ * Hardware Random Number Generator support for Cavium, Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+struct cavium_rng {
+ struct hwrng ops;
+ void __iomem *result;
+};
+
+/* Read data from the RNG unit */
+static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
+{
+ struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
+ unsigned int size = max;
+
+ while (size >= 8) {
+ *((u64 *)dat) = readq(p->result);
+ size -= 8;
+ dat += 8;
+ }
+ while (size > 0) {
+ *((u8 *)dat) = readb(p->result);
+ size--;
+ dat++;
+ }
+ return max;
+}
+
+/* Map Cavium RNG to an HWRNG object */
+static int cavium_rng_probe_vf(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct cavium_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ /* Map the RNG result */
+ rng->result = pcim_iomap(pdev, 0, 0);
+ if (!rng->result) {
+ dev_err(&pdev->dev, "Error iomap failed retrieving result.\n");
+ return -ENOMEM;
+ }
+
+ rng->ops.name = "cavium rng";
+ rng->ops.read = cavium_rng_read;
+ rng->ops.quality = 1000;
+
+ pci_set_drvdata(pdev, rng);
+
+ ret = hwrng_register(&rng->ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Remove the VF */
+void cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+ struct cavium_rng *rng;
+
+ rng = pci_get_drvdata(pdev);
+ hwrng_unregister(&rng->ops);
+}
+
+static const struct pci_device_id cavium_rng_vf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
+ {0,},
+};
+MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
+
+static struct pci_driver cavium_rng_vf_driver = {
+ .name = "cavium_rng_vf",
+ .id_table = cavium_rng_vf_id_table,
+ .probe = cavium_rng_probe_vf,
+ .remove = cavium_rng_remove_vf,
+};
+module_pci_driver(cavium_rng_vf_driver);
+
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
new file mode 100644
index 000000000000..a944e0a47f42
--- /dev/null
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -0,0 +1,94 @@
+/*
+ * Hardware Random Number Generator support for Cavium Inc.
+ * Thunder processor family.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define THUNDERX_RNM_ENT_EN 0x1
+#define THUNDERX_RNM_RNG_EN 0x2
+
+struct cavium_rng_pf {
+ void __iomem *control_status;
+};
+
+/* Enable the RNG hardware and activate the VF */
+static int cavium_rng_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct cavium_rng_pf *rng;
+ int iov_err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ /*Map the RNG control */
+ rng->control_status = pcim_iomap(pdev, 0, 0);
+ if (!rng->control_status) {
+ dev_err(&pdev->dev,
+ "Error iomap failed retrieving control_status.\n");
+ return -ENOMEM;
+ }
+
+ /* Enable the RNG hardware and entropy source */
+ writeq(THUNDERX_RNM_RNG_EN | THUNDERX_RNM_ENT_EN,
+ rng->control_status);
+
+ pci_set_drvdata(pdev, rng);
+
+ /* Enable the Cavium RNG as a VF */
+ iov_err = pci_enable_sriov(pdev, 1);
+ if (iov_err != 0) {
+ /* Disable the RNG hardware and entropy source */
+ writeq(0, rng->control_status);
+ dev_err(&pdev->dev,
+ "Error initializing RNG virtual function,(%i).\n",
+ iov_err);
+ return iov_err;
+ }
+
+ return 0;
+}
+
+/* Disable VF and RNG Hardware */
+void cavium_rng_remove(struct pci_dev *pdev)
+{
+ struct cavium_rng_pf *rng;
+
+ rng = pci_get_drvdata(pdev);
+
+ /* Remove the VF */
+ pci_disable_sriov(pdev);
+
+ /* Disable the RNG hardware and entropy source */
+ writeq(0, rng->control_status);
+}
+
+static const struct pci_device_id cavium_rng_pf_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa018), 0, 0, 0}, /* Thunder RNM */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cavium_rng_pf_id_table);
+
+static struct pci_driver cavium_rng_pf_driver = {
+ .name = "cavium_rng_pf",
+ .id_table = cavium_rng_pf_id_table,
+ .probe = cavium_rng_probe,
+ .remove = cavium_rng_remove,
+};
+
+module_pci_driver(cavium_rng_pf_driver);
+MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 9203f2d130c0..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
- unsigned char bytes[16];
int bytes_read;
+ size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 1);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ add_device_randomness(rng_buffer, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
@@ -449,22 +449,6 @@ int hwrng_register(struct hwrng *rng)
goto out;
mutex_lock(&rng_mutex);
-
- /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
- err = -ENOMEM;
- if (!rng_buffer) {
- rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
- if (!rng_buffer)
- goto out_unlock;
- }
- if (!rng_fillbuf) {
- rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
- if (!rng_fillbuf) {
- kfree(rng_buffer);
- goto out_unlock;
- }
- }
-
/* Must not register two RNGs with the same name. */
err = -EEXIST;
list_for_each_entry(tmp, &rng_list, list) {
@@ -573,7 +557,26 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void)
{
- return register_miscdev();
+ int ret = -ENOMEM;
+
+ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
+ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_buffer)
+ return -ENOMEM;
+
+ rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
+ if (!rng_fillbuf) {
+ kfree(rng_buffer);
+ return -ENOMEM;
+ }
+
+ ret = register_miscdev();
+ if (ret) {
+ kfree(rng_fillbuf);
+ kfree(rng_buffer);
+ }
+
+ return ret;
}
static void __exit hwrng_modexit(void)
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 0d0579fe465e..e7a245942029 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -24,15 +24,12 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/hw_random.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-
-#define PFX KBUILD_MODNAME ": "
#define GEODE_RNG_DATA_REG 0x50
#define GEODE_RNG_STATUS_REG 0x54
@@ -85,7 +82,6 @@ static struct hwrng geode_rng = {
static int __init mod_init(void)
{
- int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
void __iomem *mem;
@@ -93,43 +89,27 @@ static int __init mod_init(void)
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
- if (ent)
- goto found;
- }
- /* Device not found. */
- goto out;
-
-found:
- rng_base = pci_resource_start(pdev, 0);
- if (rng_base == 0)
- goto out;
- err = -ENOMEM;
- mem = ioremap(rng_base, 0x58);
- if (!mem)
- goto out;
- geode_rng.priv = (unsigned long)mem;
-
- pr_info("AMD Geode RNG detected\n");
- err = hwrng_register(&geode_rng);
- if (err) {
- pr_err(PFX "RNG registering failed (%d)\n",
- err);
- goto err_unmap;
+ if (ent) {
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+ return -ENODEV;
+
+ mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
+ if (!mem)
+ return -ENOMEM;
+ geode_rng.priv = (unsigned long)mem;
+
+ pr_info("AMD Geode RNG detected\n");
+ return devm_hwrng_register(&pdev->dev, &geode_rng);
+ }
}
-out:
- return err;
-err_unmap:
- iounmap(mem);
- goto out;
+ /* Device not found. */
+ return -ENODEV;
}
static void __exit mod_exit(void)
{
- void __iomem *mem = (void __iomem *)geode_rng.priv;
-
- hwrng_unregister(&geode_rng);
- iounmap(mem);
}
module_init(mod_init);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 0cfd81bcaeac..58bef39f7286 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -76,9 +76,6 @@ static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
struct meson_rng_data *data =
container_of(rng, struct meson_rng_data, rng);
- if (max < sizeof(u32))
- return 0;
-
*(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
return sizeof(u32);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 01d4be2c354b..f5c26a5f6875 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(dev);
return ret;
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 8da14f1a1f56..37a58d78aab3 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -71,12 +71,7 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count)
return 0;
}
-static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
-{
- return 1;
-}
-
-static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
+static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
{
int r;
@@ -88,8 +83,7 @@ static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
static struct hwrng omap3_rom_rng_ops = {
.name = "omap3-rom",
- .data_present = omap3_rom_rng_data_present,
- .data_read = omap3_rom_rng_data_read,
+ .read = omap3_rom_rng_read,
};
static int omap3_rom_rng_probe(struct platform_device *pdev)
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 699b7259f5d7..545df485bcc4 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -26,7 +26,7 @@
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define SDCRNG_CTL_REG 0x00
#define SDCRNG_CTL_FVLD_M 0x0000f000
@@ -95,42 +95,20 @@ static struct hwrng pasemi_rng = {
.data_read = pasemi_rng_data_read,
};
-static int rng_probe(struct platform_device *ofdev)
+static int rng_probe(struct platform_device *pdev)
{
void __iomem *rng_regs;
- struct device_node *rng_np = ofdev->dev.of_node;
- struct resource res;
- int err = 0;
+ struct resource *res;
- err = of_address_to_resource(rng_np, 0, &res);
- if (err)
- return -ENODEV;
-
- rng_regs = ioremap(res.start, 0x100);
-
- if (!rng_regs)
- return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng_regs))
+ return PTR_ERR(rng_regs);
pasemi_rng.priv = (unsigned long)rng_regs;
pr_info("Registering PA Semi RNG\n");
-
- err = hwrng_register(&pasemi_rng);
-
- if (err)
- iounmap(rng_regs);
-
- return err;
-}
-
-static int rng_remove(struct platform_device *dev)
-{
- void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
-
- hwrng_unregister(&pasemi_rng);
- iounmap(rng_regs);
-
- return 0;
+ return devm_hwrng_register(&pdev->dev, &pasemi_rng);
}
static const struct of_device_id rng_match[] = {
@@ -146,7 +124,6 @@ static struct platform_driver rng_driver = {
.of_match_table = rng_match,
},
.probe = rng_probe,
- .remove = rng_remove,
};
module_platform_driver(rng_driver);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 108897bea2d0..11dc9b7c09ce 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -143,7 +143,6 @@ static struct platform_driver pic32_rng_driver = {
.remove = pic32_rng_remove,
.driver = {
.name = "pic32-rng",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rng_of_match),
},
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 1d35363d23c5..938ec10e733d 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -54,9 +54,6 @@ static int st_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
u32 status;
int i;
- if (max < sizeof(u16))
- return -EINVAL;
-
/* Wait until FIFO is full - max 4uS*/
for (i = 0; i < ST_RNG_FILL_FIFO_TIMEOUT; i++) {
status = readl_relaxed(ddata->base + ST_RNG_STATUS_REG);
@@ -111,6 +108,7 @@ static int st_rng_probe(struct platform_device *pdev)
ret = hwrng_register(&ddata->ops);
if (ret) {
dev_err(&pdev->dev, "Failed to register HW RNG\n");
+ clk_disable_unprepare(clk);
return ret;
}
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
index a7b694913416..1093583b579c 100644
--- a/drivers/char/hw_random/tx4939-rng.c
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -144,22 +144,13 @@ static int __init tx4939_rng_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, rngdev);
- return hwrng_register(&rngdev->rng);
-}
-
-static int __exit tx4939_rng_remove(struct platform_device *dev)
-{
- struct tx4939_rng *rngdev = platform_get_drvdata(dev);
-
- hwrng_unregister(&rngdev->rng);
- return 0;
+ return devm_hwrng_register(&dev->dev, &rngdev->rng);
}
static struct platform_driver tx4939_rng_driver = {
.driver = {
.name = "tx4939-rng",
},
- .remove = tx4939_rng_remove,
};
module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
the IPMI management controller is capable of this.
endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..fc9e8891eae3
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ unsigned int mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= POLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+ struct bt_bmc *bt_bmc = (void *)data;
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = ioread32(bt_bmc->base + BT_CR2);
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ iowrite32(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 reg;
+ int rc;
+
+ bt_bmc->irq = platform_get_irq(pdev, 0);
+ if (!bt_bmc->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = 0;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = ioread32(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ iowrite32(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ setup_timer(&bt_bmc->poll_timer, poll_timer,
+ (unsigned long)bt_bmc);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (!bt_bmc->irq)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-ibt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->curr_channel = IPMI_MAX_CHANNELS;
}
+ rv = ipmi_bmc_register(intf, i);
+
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i);
-
out:
if (rv) {
if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
int intf_num = intf->intf_num;
ipmi_user_t user;
- ipmi_bmc_unregister(intf);
-
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
mutex_unlock(&ipmi_interfaces_mutex);
remove_proc_entries(intf);
+ ipmi_bmc_unregister(intf);
/*
* Call all the watcher interfaces to tell them that
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d23368874710..6af1ce04b3da 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -748,10 +748,7 @@ static int pp_release(struct inode *inode, struct file *file)
}
if (pp->pdev) {
- const char *name = pp->pdev->name;
-
parport_unregister_device(pp->pdev);
- kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 3efb3bf0ab83..d6876d506220 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
@@ -2100,23 +2100,37 @@ unsigned long get_random_long(void)
}
EXPORT_SYMBOL(get_random_long);
-/*
- * randomize_range() returns a start address such that
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
*
- * [...... <range> .....]
- * start end
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
*
- * a <range> with size "len" starting at the return value is inside in the
- * area defined by [start, end], but is otherwise randomized.
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
*/
unsigned long
-randomize_range(unsigned long start, unsigned long end, unsigned long len)
+randomize_page(unsigned long start, unsigned long range)
{
- unsigned long range = end - len - start;
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
+ }
- if (end <= start + len)
- return 0;
- return PAGE_ALIGN(get_random_int() % range + start);
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
}
/* Interface for in-kernel drivers of true hardware RNGs.
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index e496daefe9e0..719c5b4eed39 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -934,7 +934,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_fs_time(inode->i_sb);
+ inode->i_atime = current_time(inode);
}
return ret;
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 480a777db577..7c19d9b22785 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/reboot.h>
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8de61876f633..3a9149cf0110 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -813,9 +813,6 @@ int tpm_do_selftest(struct tpm_chip *chip)
continue;
}
- if (rc < TPM_HEADER_SIZE)
- return -EFAULT;
-
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5da47e26a012..5649234b7316 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -38,7 +38,6 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
-#include <linux/kconfig.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
@@ -889,7 +888,7 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
return 0;
/* Try lock this page */
- if (buf->ops->steal(pipe, buf) == 0) {
+ if (pipe_buf_steal(pipe, buf) == 0) {
/* Get reference and unlock page for moving */
get_page(buf->page);
unlock_page(buf->page);
@@ -1540,19 +1539,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->inbuf_lock);
+ do {
+ spin_lock_irq(&port->inbuf_lock);
+ buf = virtqueue_detach_unused_buf(port->in_vq);
+ spin_unlock_irq(&port->inbuf_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */
- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->outvq_lock);
+ do {
+ spin_lock_irq(&port->outvq_lock);
+ buf = virtqueue_detach_unused_buf(port->out_vq);
+ spin_unlock_irq(&port->outvq_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
}
/*
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
ret = clk_hw_register(NULL, &prog->hw);
if (ret) {
kfree(prog);
- hw = &prog->hw;
+ hw = ERR_PTR(ret);
}
return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
u32 ana[4];
int i;
- if (rate < data->min_rate || rate > data->max_rate) {
- dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
- clk_hw_get_name(hw), rate,
- data->min_rate, data->max_rate);
- return -EINVAL;
- }
-
if (rate > data->max_fb_rate) {
use_fb_prediv = true;
rate /= 2;
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
return -EINVAL;
}
+ drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 20b105584f82..80ae2a51452d 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -700,6 +700,7 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
struct mux_hwclock *hwc,
const struct clk_ops *ops,
unsigned long min_rate,
+ unsigned long max_rate,
unsigned long pct80_rate,
const char *fmt, int idx)
{
@@ -728,6 +729,8 @@ static struct clk * __init create_mux_common(struct clockgen *cg,
continue;
if (rate < min_rate)
continue;
+ if (rate > max_rate)
+ continue;
parent_names[j] = div->name;
hwc->parent_to_clksel[j] = i;
@@ -759,7 +762,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
struct mux_hwclock *hwc;
const struct clockgen_pll_div *div;
unsigned long plat_rate, min_rate;
- u64 pct80_rate;
+ u64 max_rate, pct80_rate;
u32 clksel;
hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
@@ -787,8 +790,8 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
return NULL;
}
- pct80_rate = clk_get_rate(div->clk);
- pct80_rate *= 8;
+ max_rate = clk_get_rate(div->clk);
+ pct80_rate = max_rate * 8;
do_div(pct80_rate, 10);
plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
@@ -798,7 +801,7 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
else
min_rate = plat_rate / 2;
- return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
pct80_rate, "cg-cmux%d", idx);
}
@@ -813,7 +816,7 @@ static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
hwc->reg = cg->regs + 0x20 * idx + 0x10;
hwc->info = cg->info.hwaccel[idx];
- return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
"cg-hwaccel%d", idx);
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 5daddf5ecc4b..bc37030e38ba 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -463,22 +463,20 @@ static int xgene_clk_enable(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
unsigned long flags = 0;
u32 data;
- phys_addr_t reg;
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
if (pclk->param.csr_reg != NULL) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
- reg = __pa(pclk->param.csr_reg);
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
data |= pclk->param.reg_clk_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_clk_offset);
- pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
data);
@@ -488,8 +486,8 @@ static int xgene_clk_enable(struct clk_hw *hw)
data &= ~pclk->param.reg_csr_mask;
xgene_clk_write(data, pclk->param.csr_reg +
pclk->param.reg_csr_offset);
- pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
- clk_hw_get_name(hw), &reg,
+ pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
+ clk_hw_get_name(hw),
pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
data);
}
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c
index 99cf802fa51f..eaa462ad09e8 100644
--- a/drivers/clk/imx/clk-imx1.c
+++ b/drivers/clk/imx/clk-imx1.c
@@ -45,10 +45,13 @@ static void __iomem *ccm __initdata;
#define CCM_PCDR (ccm + 0x0020)
#define SCM_GCCR (ccm + 0x0810)
-static void __init _mx1_clocks_init(unsigned long fref)
+static void __init mx1_clocks_init_dt(struct device_node *np)
{
+ ccm = of_iomap(np, 0);
+ BUG_ON(!ccm);
+
clk[IMX1_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", fref);
+ clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", 32768);
clk[IMX1_CLK_CLK16M_EXT] = imx_clk_fixed("clk16m_ext", 16000000);
clk[IMX1_CLK_CLK16M] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
clk[IMX1_CLK_CLK32_PREMULT] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
@@ -74,45 +77,6 @@ static void __init _mx1_clocks_init(unsigned long fref)
clk[IMX1_CLK_USBD_GATE] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
imx_check_clocks(clk, ARRAY_SIZE(clk));
-}
-
-int __init mx1_clocks_init(unsigned long fref)
-{
- ccm = ioremap(MX1_CCM_BASE_ADDR, SZ_4K);
- BUG_ON(!ccm);
-
- _mx1_clocks_init(fref);
-
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx-gpt.0");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[IMX1_CLK_DMA_GATE], "ahb", "imx1-dma");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-dma");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.0");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.0");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.1");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.1");
- clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.2");
- clk_register_clkdev(clk[IMX1_CLK_UART3_GATE], "ipg", "imx1-uart.2");
- clk_register_clkdev(clk[IMX1_CLK_HCLK], NULL, "imx1-i2c.0");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.0");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.1");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.1");
- clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-fb.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-fb.0");
- clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ahb", "imx1-fb.0");
-
- mxc_timer_init(MX1_TIM1_BASE_ADDR, MX1_TIM1_INT, GPT_TYPE_IMX1);
-
- return 0;
-}
-
-static void __init mx1_clocks_init_dt(struct device_node *np)
-{
- ccm = of_iomap(np, 0);
- BUG_ON(!ccm);
-
- _mx1_clocks_init(32768);
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 19f9b622981a..7a6acc3e4a92 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -223,7 +223,7 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
temp64 *= mfn;
do_div(temp64, mfd);
- return (parent_rate * div) + (u32)temp64;
+ return parent_rate * div + (unsigned long)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -247,7 +247,11 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- return parent_rate * div + parent_rate * mfn / mfd;
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return parent_rate * div + (unsigned long)temp64;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index ae461b16af75..0252939ba58f 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -183,14 +183,14 @@
/* CLKID_CLK81 */
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
-#define CLKID_MPLL2 15
+/* CLKID_MPLL2 */
#define CLKID_DDR 16
#define CLKID_DOS 17
#define CLKID_ISA 18
#define CLKID_PL301 19
#define CLKID_PERIPHS 20
#define CLKID_SPICC 21
-#define CLKID_I2C 22
+/* CLKID_I2C */
#define CLKID_SAR_ADC 23
#define CLKID_SMART_CARD 24
#define CLKID_RNG0 25
@@ -202,7 +202,7 @@
#define CLKID_ABUF 31
#define CLKID_HIU_IFACE 32
#define CLKID_ASSIST_MISC 33
-#define CLKID_SPI 34
+/* CLKID_SPI */
#define CLKID_I2S_SPDIF 35
#define CLKID_ETH 36
#define CLKID_DEMUX 37
@@ -218,12 +218,12 @@
#define CLKID_AIU 47
#define CLKID_UART1 48
#define CLKID_G2D 49
-#define CLKID_USB0 50
-#define CLKID_USB1 51
+/* CLKID_USB0 */
+/* CLKID_USB1 */
#define CLKID_RESET 52
#define CLKID_NAND 53
#define CLKID_DOS_PARSER 54
-#define CLKID_USB 55
+/* CLKID_USB */
#define CLKID_VDIN1 56
#define CLKID_AHB_ARB0 57
#define CLKID_EFUSE 58
@@ -232,8 +232,8 @@
#define CLKID_AHB_CTRL_BUS 61
#define CLKID_HDMI_INTR_SYNC 62
#define CLKID_HDMI_PCLK 63
-#define CLKID_USB1_DDR_BRIDGE 64
-#define CLKID_USB0_DDR_BRIDGE 65
+/* CLKID_USB1_DDR_BRIDGE */
+/* CLKID_USB0_DDR_BRIDGE */
#define CLKID_MMC_PCLK 66
#define CLKID_DVIN 67
#define CLKID_UART2 68
@@ -261,7 +261,7 @@
#define CLKID_AO_AHB_SRAM 90
#define CLKID_AO_AHB_BUS 91
#define CLKID_AO_IFACE 92
-#define CLKID_AO_I2C 93
+/* CLKID_AO_I2C */
/* CLKID_SD_EMMC_A */
/* CLKID_SD_EMMC_B */
/* CLKID_SD_EMMC_C */
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 3a51fff1b0e7..9adaf48aea23 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -313,7 +313,7 @@ static void __init mmp2_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 87f2317b2a00..f110c02e83cb 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -262,7 +262,7 @@ static void __init pxa168_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index e22a67f76d93..64d1ef49caeb 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -282,7 +282,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apmu_base = of_iomap(np, 1);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
@@ -294,7 +294,7 @@ static void __init pxa910_clk_init(struct device_node *np)
}
pxa_unit->apbcp_base = of_iomap(np, 3);
- if (!pxa_unit->mpmu_base) {
+ if (!pxa_unit->apbcp_base) {
pr_err("failed to map apbcp registers\n");
return;
}
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
- struct device *dev, struct clk_hw *hw)
+ struct device *dev, struct clk_hw **hw)
{
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
*rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
gate->lock = lock;
gate_ops = gate_hw->init->ops;
gate->reg = reg + (u64)gate->reg;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
}
if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
- hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(*hw))
+ return PTR_ERR(*hw);
return 0;
}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
- struct clk_hw *hw = driver_data->hw_data->hws[i];
+ struct clk_hw **hw = &driver_data->hw_data->hws[i];
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c
index fd129566c1ce..a6e5bee23385 100644
--- a/drivers/clk/mvebu/orion.c
+++ b/drivers/clk/mvebu/orion.c
@@ -21,6 +21,76 @@ static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = {
};
/*
+ * Orion 5181
+ */
+
+#define SAR_MV88F5181_TCLK_FREQ 8
+#define SAR_MV88F5181_TCLK_FREQ_MASK 0x3
+
+static u32 __init mv88f5181_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_TCLK_FREQ) &
+ SAR_MV88F5181_TCLK_FREQ_MASK;
+ if (opt == 0)
+ return 133333333;
+ else if (opt == 1)
+ return 150000000;
+ else if (opt == 2)
+ return 166666667;
+ else
+ return 0;
+}
+
+#define SAR_MV88F5181_CPU_FREQ 4
+#define SAR_MV88F5181_CPU_FREQ_MASK 0xf
+
+static u32 __init mv88f5181_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0)
+ return 333333333;
+ else if (opt == 1 || opt == 2)
+ return 400000000;
+ else if (opt == 3)
+ return 500000000;
+ else
+ return 0;
+}
+
+static void __init mv88f5181_get_clk_ratio(void __iomem *sar, int id,
+ int *mult, int *div)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0 || opt == 1) {
+ *mult = 1;
+ *div = 2;
+ } else if (opt == 2 || opt == 3) {
+ *mult = 1;
+ *div = 3;
+ } else {
+ *mult = 0;
+ *div = 1;
+ }
+}
+
+static const struct coreclk_soc_desc mv88f5181_coreclks = {
+ .get_tclk_freq = mv88f5181_get_tclk_freq,
+ .get_cpu_freq = mv88f5181_get_cpu_freq,
+ .get_clk_ratio = mv88f5181_get_clk_ratio,
+ .ratios = orion_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
+};
+
+static void __init mv88f5181_clk_init(struct device_node *np)
+{
+ return mvebu_coreclk_setup(np, &mv88f5181_coreclks);
+}
+
+CLK_OF_DECLARE(mv88f5181_clk, "marvell,mv88f5181-core-clock", mv88f5181_clk_init);
+
+/*
* Orion 5182
*/
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 8feba93672c5..e8075359366b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -144,11 +144,8 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
ddrclk->ddr_flag = ddr_flag;
clk = clk_register(NULL, &ddrclk->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: could not register ddrclk %s\n", __func__, name);
+ if (IS_ERR(clk))
kfree(ddrclk);
- return NULL;
- }
return clk;
}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
static void exynos_audss_clk_teardown(void)
{
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 96fab6cfb202..6c6afb87b4ce 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -132,28 +132,34 @@ free_clkout:
pr_err("%s: failed to register clkout clock\n", __func__);
}
+/*
+ * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
+ * the OF_POPULATED flag on the pmu device tree node, so later the
+ * Exynos PMU platform device can be properly probed with PMU driver.
+ */
+
static void __init exynos4_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE(exynos3250_clkout, "samsung,exynos3250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
exynos4_clkout_init);
static void __init exynos5_clkout_init(struct device_node *node)
{
exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
}
-CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
-CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
+CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
exynos5_clkout_init);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..4a82a49cff5e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
0x050, 0, 3, axi_div_table, 0);
+#define SUN6I_A31_AHB1_REG 0x054
+
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph" };
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val &= BIT(16);
writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
+ /* Force AHB1 to PLL6 / 3 */
+ val = readl(reg + SUN6I_A31_AHB1_REG);
+ /* set PLL6 pre-div = 3 */
+ val &= ~GENMASK(7, 6);
+ val |= 0x2 << 6;
+ /* select PLL6 / pre-div */
+ val &= ~GENMASK(13, 12);
+ val |= 0x3 << 12;
+ writel(val, reg + SUN6I_A31_AHB1_REG);
+
sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
else
calcp = 3;
- calcm = (req->parent_rate >> calcp) - 1;
+ calcm = (div >> calcp) - 1;
req->rate = (req->parent_rate >> calcp) / (calcm + 1);
req->m = calcm;
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->num = clk_num;
/* avoid returning NULL for unused idx */
- for (; clk_num >= 0; clk_num--)
+ while (--clk_num >= 0)
hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
+ .compatible = "socionext,uniphier-sld3-clock",
+ .data = uniphier_sld3_sys_clk_data,
+ },
+ {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock */
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-sld3-mio-clock",
.data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pro5-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs2-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-ld20-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
/* Peripheral clock */
{
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
{ /* sentinel */ }
};
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
int ret;
- u32 val;
+ unsigned int val;
u8 i;
ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 8a753fd5b79d..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -361,7 +361,7 @@ config CLKSRC_METAG_GENERIC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
- depends on ARM
+ depends on ARM || ARM64
help
Support for Multi Core Timer controller on Exynos SoCs.
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
config SYS_SUPPORTS_EM_STI
bool
+config CLKSRC_JCORE_PIT
+ bool "J-Core PIT timer driver" if COMPILE_TEST
+ depends on OF
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in the J-Core synthesizable, open source SoC.
+
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 41840d02c331..8f3488b80896 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -223,6 +223,7 @@ static u64 notrace exynos4_read_sched_clock(void)
return exynos4_read_count_32();
}
+#if defined(CONFIG_ARM)
static struct delay_timer exynos4_delay_timer;
static cycles_t exynos4_read_current_timer(void)
@@ -231,14 +232,17 @@ static cycles_t exynos4_read_current_timer(void)
"cycles_t needs to move to 32-bit for ARM64 usage");
return exynos4_read_count_32();
}
+#endif
static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
+#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
exynos4_delay_timer.freq = clk_rate;
register_current_timer_delay(&exynos4_delay_timer);
+#endif
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT 12
+#define PIT_PRIO_SHIFT 20
+#define PIT_ENABLE_SHIFT 26
+#define PIT_PRIO_MASK 0xf
+
+#define REG_PITEN 0x00
+#define REG_THROT 0x10
+#define REG_COUNT 0x14
+#define REG_BUSPD 0x18
+#define REG_SECHI 0x20
+#define REG_SECLO 0x24
+#define REG_NSEC 0x28
+
+struct jcore_pit {
+ struct clock_event_device ced;
+ void __iomem *base;
+ unsigned long periodic_delta;
+ u32 enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+ u32 seclo, nsec, seclo0;
+ __iomem void *base = jcore_pit_base;
+
+ seclo = readl(base + REG_SECLO);
+ do {
+ seclo0 = seclo;
+ nsec = readl(base + REG_NSEC);
+ seclo = readl(base + REG_SECLO);
+ } while (seclo0 != seclo);
+
+ return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+ return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+ writel(0, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+ jcore_pit_disable(pit);
+ writel(delta, pit->base + REG_THROT);
+ writel(pit->enable_val, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+ unsigned buspd, freq;
+
+ pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+ buspd = readl(pit->base + REG_BUSPD);
+ freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+ pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+ clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+ struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+ if (clockevent_state_oneshot(&pit->ced))
+ jcore_pit_disable(pit);
+
+ pit->ced.event_handler(&pit->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+ int err;
+ unsigned pit_irq, cpu;
+ unsigned long hwirq;
+ u32 irqprio, enable_val;
+
+ jcore_pit_base = of_iomap(node, 0);
+ if (!jcore_pit_base) {
+ pr_err("Error: Cannot map base address for J-Core PIT\n");
+ return -ENXIO;
+ }
+
+ pit_irq = irq_of_parse_and_map(node, 0);
+ if (!pit_irq) {
+ pr_err("Error: J-Core PIT has no IRQ\n");
+ return -ENXIO;
+ }
+
+ pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+ jcore_pit_base, pit_irq);
+
+ err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+ NSEC_PER_SEC, 400, 32,
+ jcore_clocksource_read);
+ if (err) {
+ pr_err("Error registering clocksource device: %d\n", err);
+ return err;
+ }
+
+ sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+ jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+ if (!jcore_pit_percpu) {
+ pr_err("Failed to allocate memory for clock event device\n");
+ return -ENOMEM;
+ }
+
+ err = request_irq(pit_irq, jcore_timer_interrupt,
+ IRQF_TIMER | IRQF_PERCPU,
+ "jcore_pit", jcore_pit_percpu);
+ if (err) {
+ pr_err("pit irq request failed: %d\n", err);
+ free_percpu(jcore_pit_percpu);
+ return err;
+ }
+
+ /*
+ * The J-Core PIT is not hard-wired to a particular IRQ, but
+ * integrated with the interrupt controller such that the IRQ it
+ * generates is programmable, as follows:
+ *
+ * The bit layout of the PIT enable register is:
+ *
+ * .....e..ppppiiiiiiii............
+ *
+ * where the .'s indicate unrelated/unused bits, e is enable,
+ * p is priority, and i is hard irq number.
+ *
+ * For the PIT included in AIC1 (obsolete but still in use),
+ * any hard irq (trap number) can be programmed via the 8
+ * iiiiiiii bits, and a priority (0-15) is programmable
+ * separately in the pppp bits.
+ *
+ * For the PIT included in AIC2 (current), the programming
+ * interface is equivalent modulo interrupt mapping. This is
+ * why a different compatible tag was not used. However only
+ * traps 64-127 (the ones actually intended to be used for
+ * interrupts, rather than syscalls/exceptions/etc.) can be
+ * programmed (the high 2 bits of i are ignored) and the
+ * priority pppp is <<2'd and or'd onto the irq number. This
+ * choice seems to have been made on the hardware engineering
+ * side under an assumption that preserving old AIC1 priority
+ * mappings was important. Future models will likely ignore
+ * the pppp field.
+ */
+ hwirq = irq_get_irq_data(pit_irq)->hwirq;
+ irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+ enable_val = (1U << PIT_ENABLE_SHIFT)
+ | (hwirq << PIT_IRQ_SHIFT)
+ | (irqprio << PIT_PRIO_SHIFT);
+
+ for_each_present_cpu(cpu) {
+ struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+ pit->base = of_iomap(node, cpu);
+ if (!pit->base) {
+ pr_err("Unable to map PIT for cpu %u\n", cpu);
+ continue;
+ }
+
+ pit->ced.name = "jcore_pit";
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_PERCPU;
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.rating = 400;
+ pit->ced.irq = pit_irq;
+ pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+ pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+ pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+ pit->ced.set_next_event = jcore_pit_set_next_event;
+
+ pit->enable_val = enable_val;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ "AP_JCORE_TIMER_STARTING",
+ jcore_pit_local_init, NULL);
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+ struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+ return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
- rate, 340, 32, clocksource_mmio_readl_down);
+ cs->clksrc.name = node->name;
+ cs->clksrc.rating = 340;
+ cs->clksrc.read = sun5i_clksrc_read;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1b2f28f69a81..4852d9efe74e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct cppc_cpudata *cpu;
struct cpufreq_freqs freqs;
+ u32 desired_perf;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
- cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
+ desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu->perf_ctrls.desired_perf)
+ return ret;
+
+ cpu->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 18da4f8051d3..13475890d792 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
+ unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
@@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (cs_tuners->freq_step == 0)
goto out;
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min)
+ requested_freq = policy->cur;
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
- unsigned int requested_freq = policy->cur;
-
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
goto out;
requested_freq += get_freq_target(cs_tuners, policy);
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target, requested_freq = policy->cur;
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
@@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ dbs_info->requested_freq = requested_freq;
}
out:
@@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 806f2039571e..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
struct cpudata {
int cpu;
+ unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
@@ -225,7 +227,7 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pid_adjust_policy - Stores static PID configuration data
+ * struct pstate_adjust_policy - Stores static PID configuration data
* @sample_rate_ms: PID calculation sample rate in ms
* @sample_rate_ns: Sample rate calculation in ns
* @deadband: PID deadband
@@ -562,12 +564,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
- rdmsrl(MSR_HWP_CAPABILITIES, cap);
- hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
-
for_each_cpu(cpu, cpumask) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1232,6 +1246,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
+ int target, avg_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc);
@@ -1242,7 +1257,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100;
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
+
+ target = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ target += target >> 2;
+ target = mul_fp(target, busy_frac);
+ if (target < cpu->pstate.min_pstate)
+ target = cpu->pstate.min_pstate;
+
+ /*
+ * If the average P-state during the previous cycle was higher than the
+ * current target, add 50% of the difference to the target to reduce
+ * possible performance oscillations and offset possible performance
+ * loss related to moving the workload from one CPU to another within
+ * a package/module.
+ */
+ avg_pstate = get_avg_pstate(cpu);
+ if (avg_pstate > target)
+ target += (avg_pstate - target) >> 1;
+
+ return target;
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@ -1251,10 +1285,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
u64 duration_ns;
/*
- * perf_scaled is the average performance during the last sampling
- * period scaled by the ratio of the maximum P-state to the P-state
- * requested last time (in percent). That measures the system's
- * response to the previous P-state selection.
+ * perf_scaled is the ratio of the average P-state during the last
+ * sampling period to the P-state requested last time (in percent).
+ *
+ * That measures the system's response to the previous P-state
+ * selection.
*/
max_pstate = cpu->pstate.max_pstate_physical;
current_pstate = cpu->pstate.current_pstate;
@@ -1304,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
- target_pstate = pstate_funcs.get_target_pstate(cpu);
+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1470,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1478,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
@@ -1514,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 4102be01d06a..512ee37b374b 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE
bool "CPU Idle driver for MIPS CPS platforms"
depends on CPU_IDLE && MIPS_CPS
depends on SYS_SUPPORTS_MIPS_CPS
- select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select MIPS_CPS_PM
default y
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 1adb6980b707..926ba9871c62 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void)
core = cpu_data[cpu].core;
device = &per_cpu(cpuidle_dev, cpu);
device->cpu = cpu;
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
#endif
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 389ade4572be..ab264d393233 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,6 +14,7 @@
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include "cpuidle.h"
@@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
}
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
local_irq_enable();
if (!current_set_polling_and_test()) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9b035b7d7f4f..4d2b81f2b223 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -318,6 +318,9 @@ config CRYPTO_DEV_OMAP_AES
select CRYPTO_AES
select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CTR
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b3044219772c..954a64c7757b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -111,6 +111,42 @@
#else
#define debug(format, arg...)
#endif
+
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+static void dbg_dump_sg(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ struct scatterlist *sg, size_t tlen, bool ascii,
+ bool may_sleep)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min_t(size_t, tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+#endif
+
static struct list_head alg_list;
struct caam_alg_entry {
@@ -227,8 +263,9 @@ static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
if (is_rfc3686) {
nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
enckeylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc,
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -500,11 +537,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -578,11 +614,10 @@ skip_enc:
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
@@ -683,11 +718,10 @@ copy_iv:
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Class 1 operation */
append_operation(desc, ctx->class1_alg_type |
@@ -1478,7 +1512,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
u32 *key_jump_cmd;
u32 *desc;
- u32 *nonce;
+ u8 *nonce;
u32 geniv;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
@@ -1531,9 +1565,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1549,11 +1584,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, ctx->class1_alg_type |
@@ -1590,9 +1624,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1608,11 +1643,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
/* Choose operation */
if (ctr_mode)
@@ -1653,9 +1687,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- nonce = (u32 *)(key + keylen);
- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+ nonce = (u8 *)key + keylen;
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_OUTFIFO |
MOVE_DEST_CLASS1CTX |
@@ -1685,11 +1720,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
- append_load_imm_u32(desc, (u32)1, LDST_IMM |
- LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
+ append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+ LDST_OFFSET_SHIFT));
if (ctx1_iv_off)
append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
@@ -1995,9 +2029,9 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2027,9 +2061,9 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
#endif
ablkcipher_unmap(jrdev, edesc, req);
@@ -2184,12 +2218,15 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->nbytes, 1);
+ printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
+ dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
@@ -2241,12 +2278,14 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
int len, sec4_sg_index = 0;
#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- edesc->src_nents ? 100 : req->nbytes, 1);
+ dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
#endif
len = desc_len(sh_desc);
@@ -2516,18 +2555,20 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
+#ifdef DEBUG
+ bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
+ dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1, may_sleep);
+#endif
+
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
&all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
- req->assoclen + req->cryptlen, 1);
-#endif
-
/* Create and submit job descriptor*/
init_authenc_job(req, edesc, all_contig, false);
#ifdef DEBUG
@@ -4542,6 +4583,15 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
+
t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 36365b3efdfd..660dc206969f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -99,17 +99,17 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
- struct device *jrdev;
- u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
- u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
- dma_addr_t sh_desc_update_dma;
+ u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
+ dma_addr_t sh_desc_update_dma ____cacheline_aligned;
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
dma_addr_t sh_desc_finup_dma;
+ struct device *jrdev;
u32 alg_type;
u32 alg_op;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
@@ -187,15 +187,6 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
return buf_dma;
}
-/* Map req->src and put it in link table */
-static inline void src_map_to_sec4_sg(struct device *jrdev,
- struct scatterlist *src, int src_nents,
- struct sec4_sg_entry *sec4_sg)
-{
- dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
- sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
-}
-
/*
* Only put buffer in link table if it contains data, which is possible,
* since a buffer has previously been used, and needs to be unmapped,
@@ -449,7 +440,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *desc;
struct split_key_result result;
dma_addr_t src_dma, dst_dma;
- int ret = 0;
+ int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
@@ -526,7 +517,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
- int ret = 0;
+ int ret;
u8 *hashed_key = NULL;
#ifdef DEBUG
@@ -534,14 +525,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
#endif
if (keylen > blocksize) {
- hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
- GFP_DMA);
+ hashed_key = kmalloc_array(digestsize,
+ sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, key, &keylen, hashed_key,
digestsize);
if (ret)
- goto badkey;
+ goto bad_free_key;
key = hashed_key;
}
@@ -559,14 +551,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
ret = gen_split_hash_key(ctx, key, keylen);
if (ret)
- goto badkey;
+ goto bad_free_key;
ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
ret = -ENOMEM;
- goto map_err;
+ goto error_free_key;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
@@ -579,11 +571,10 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
DMA_TO_DEVICE);
}
-
-map_err:
+ error_free_key:
kfree(hashed_key);
return ret;
-badkey:
+ bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
@@ -595,16 +586,16 @@ badkey:
* @sec4_sg_dma: physical mapped address of h/w link table
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
- * @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * @sec4_sg: h/w link table
*/
struct ahash_edesc {
dma_addr_t dst_dma;
dma_addr_t sec4_sg_dma;
int src_nents;
int sec4_sg_bytes;
- struct sec4_sg_entry *sec4_sg;
- u32 hw_desc[0];
+ u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
+ struct sec4_sg_entry sec4_sg[0];
};
static inline void ahash_unmap(struct device *dev,
@@ -774,6 +765,65 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
req->base.complete(&req->base, err);
}
+/*
+ * Allocate an enhanced descriptor, which contains the hardware descriptor
+ * and space for hardware scatter table containing sg_num entries.
+ */
+static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
+ int sg_num, u32 *sh_desc,
+ dma_addr_t sh_desc_dma,
+ gfp_t flags)
+{
+ struct ahash_edesc *edesc;
+ unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
+
+ edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ return NULL;
+ }
+
+ init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
+ HDR_SHARE_DEFER | HDR_REVERSE);
+
+ return edesc;
+}
+
+static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int nents,
+ unsigned int first_sg,
+ unsigned int first_bytes, size_t to_hash)
+{
+ dma_addr_t src_dma;
+ u32 options;
+
+ if (nents > 1 || first_sg) {
+ struct sec4_sg_entry *sg = edesc->sec4_sg;
+ unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
+
+ sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
+
+ src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->jrdev, src_dma)) {
+ dev_err(ctx->jrdev, "unable to map S/G table\n");
+ return -ENOMEM;
+ }
+
+ edesc->sec4_sg_bytes = sgsize;
+ edesc->sec4_sg_dma = src_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+
+ append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
+ options);
+
+ return 0;
+}
+
/* submit update job descriptor */
static int ahash_update_ctx(struct ahash_request *req)
{
@@ -789,12 +839,10 @@ static int ahash_update_ctx(struct ahash_request *req)
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1, last_buflen;
int in_len = *buflen + req->nbytes, to_hash;
- u32 *sh_desc = ctx->sh_desc_update, *desc;
- dma_addr_t ptr = ctx->sh_desc_update_dma;
- int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ u32 *desc;
+ int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
struct ahash_edesc *edesc;
int ret = 0;
- int sh_len;
last_buflen = *next_buflen;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
@@ -807,40 +855,51 @@ static int ahash_update_ctx(struct ahash_request *req)
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_update,
+ ctx->sh_desc_update_dma, flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
edesc->sec4_sg + 1,
buf, state->buf_dma,
*buflen, last_buflen);
- if (src_nents) {
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + sec4_sg_src_index);
+ if (mapped_nents) {
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ 0);
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -852,17 +911,15 @@ static int ahash_update_ctx(struct ahash_request *req)
state->current_buf = !state->current_buf;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
@@ -877,13 +934,10 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_BIDIRECTIONAL);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -899,6 +953,10 @@ static int ahash_update_ctx(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+ return ret;
}
static int ahash_final_ctx(struct ahash_request *req)
@@ -913,38 +971,31 @@ static int ahash_final_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_fin, *desc;
- dma_addr_t ptr = ctx->sh_desc_fin_dma;
+ u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
+ ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
+ flags);
+ if (!edesc)
return -ENOMEM;
- }
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->src_nents = 0;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
@@ -956,7 +1007,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
@@ -966,7 +1018,8 @@ static int ahash_final_ctx(struct ahash_request *req)
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
#ifdef DEBUG
@@ -975,13 +1028,13 @@ static int ahash_final_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+ return -EINPROGRESS;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
return ret;
}
@@ -997,68 +1050,66 @@ static int ahash_finup_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_finup, *desc;
- dma_addr_t ptr = ctx->sh_desc_finup_dma;
- int sec4_sg_bytes, sec4_sg_src_index;
- int src_nents;
+ u32 *desc;
+ int sec4_sg_src_index;
+ int src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
- sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
- return ret;
+ goto unmap_ctx;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
- sec4_sg_src_index);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
-
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
- buflen + req->nbytes, LDST_SGF);
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
+ sec4_sg_src_index, ctx->ctx_len + buflen,
+ req->nbytes);
+ if (ret)
+ goto unmap_ctx;
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
#ifdef DEBUG
@@ -1067,13 +1118,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+ return -EINPROGRESS;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
return ret;
}
@@ -1084,60 +1135,56 @@ static int ahash_digest(struct ahash_request *req)
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
- int src_nents, sec4_sg_bytes;
- dma_addr_t src_dma;
+ int src_nents, mapped_nents;
struct ahash_edesc *edesc;
- int ret = 0;
- u32 options;
- int sh_len;
+ int ret;
- src_nents = sg_count(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
- sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->src_nents = src_nents;
- sh_len = desc_len(sh_desc);
- desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+ edesc->src_nents = src_nents;
- if (src_nents) {
- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
- src_dma = edesc->sec4_sg_dma;
- options = LDST_SGF;
- } else {
- src_dma = sg_dma_address(req->src);
- options = 0;
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ req->nbytes);
+ if (ret) {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return ret;
}
- append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+ desc = edesc->hw_desc;
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
return -ENOMEM;
}
@@ -1168,29 +1215,23 @@ static int ahash_final_no_ctx(struct ahash_request *req)
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int ret = 0;
- int sh_len;
+ int ret;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
+ ctx->sh_desc_digest_dma, flags);
+ if (!edesc)
return -ENOMEM;
- }
- edesc->sec4_sg_bytes = 0;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, state->buf_dma)) {
dev_err(jrdev, "unable to map src\n");
- return -ENOMEM;
+ goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
@@ -1199,7 +1240,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ goto unmap;
}
edesc->src_nents = 0;
@@ -1217,6 +1258,11 @@ static int ahash_final_no_ctx(struct ahash_request *req)
}
return ret;
+ unmap:
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return -ENOMEM;
+
}
/* submit ahash update if it the first job descriptor after update */
@@ -1234,48 +1280,58 @@ static int ahash_update_no_ctx(struct ahash_request *req)
int *next_buflen = state->current_buf ? &state->buflen_0 :
&state->buflen_1;
int in_len = *buflen + req->nbytes, to_hash;
- int sec4_sg_bytes, src_nents;
+ int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
- u32 *desc, *sh_desc = ctx->sh_desc_update_first;
- dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ u32 *desc;
int ret = 0;
- int sh_len;
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
to_hash = in_len - *next_buflen;
if (to_hash) {
src_nents = sg_nents_for_len(req->src,
- req->nbytes - (*next_buflen));
+ req->nbytes - *next_buflen);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- sec4_sg_bytes = (1 + src_nents) *
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ sec4_sg_bytes = (1 + mapped_nents) *
sizeof(struct sec4_sg_entry);
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
+ ctx->sh_desc_update_first,
+ ctx->sh_desc_update_first_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
buf, *buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents,
- edesc->sec4_sg + 1);
+ sg_to_sec4_sg_last(req->src, mapped_nents,
+ edesc->sec4_sg + 1, 0);
+
if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
@@ -1284,24 +1340,22 @@ static int ahash_update_no_ctx(struct ahash_request *req)
state->current_buf = !state->current_buf;
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unmap_ctx;
}
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
- return ret;
+ goto unmap_ctx;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1310,16 +1364,13 @@ static int ahash_update_no_ctx(struct ahash_request *req)
#endif
ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
- if (!ret) {
- ret = -EINPROGRESS;
- state->update = ahash_update_ctx;
- state->finup = ahash_finup_ctx;
- state->final = ahash_final_ctx;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_TO_DEVICE);
- kfree(edesc);
- }
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
} else if (*next_buflen) {
scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
req->nbytes, 0);
@@ -1335,6 +1386,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+ return ret;
}
/* submit ahash finup if it the first job descriptor after update */
@@ -1350,61 +1405,63 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
int last_buflen = state->current_buf ? state->buflen_0 :
state->buflen_1;
- u32 *sh_desc = ctx->sh_desc_digest, *desc;
- dma_addr_t ptr = ctx->sh_desc_digest_dma;
- int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ u32 *desc;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
- int sh_len;
- int ret = 0;
+ int ret;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
sec4_sg_src_index = 2;
- sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
+ ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
state->buf_dma, buflen,
last_buflen);
- src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
+ req->nbytes);
+ if (ret) {
dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
+ goto unmap;
}
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
- req->nbytes, LDST_SGF);
-
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize);
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
dev_err(jrdev, "unable to map dst\n");
- return -ENOMEM;
+ goto unmap;
}
#ifdef DEBUG
@@ -1421,6 +1478,11 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
return ret;
+ unmap:
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ return -ENOMEM;
+
}
/* submit first update job descriptor after init */
@@ -1436,78 +1498,65 @@ static int ahash_update_first(struct ahash_request *req)
int *next_buflen = state->current_buf ?
&state->buflen_1 : &state->buflen_0;
int to_hash;
- u32 *sh_desc = ctx->sh_desc_update_first, *desc;
- dma_addr_t ptr = ctx->sh_desc_update_first_dma;
- int sec4_sg_bytes, src_nents;
- dma_addr_t src_dma;
- u32 options;
+ u32 *desc;
+ int src_nents, mapped_nents;
struct ahash_edesc *edesc;
int ret = 0;
- int sh_len;
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1);
to_hash = req->nbytes - *next_buflen;
if (to_hash) {
- src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents;
}
- dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
- sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(jrdev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
/*
* allocate space for base edesc and hw desc commands,
* link tables
*/
- edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
- sec4_sg_bytes, GFP_DMA | flags);
+ edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
+ mapped_nents : 0,
+ ctx->sh_desc_update_first,
+ ctx->sh_desc_update_first_dma,
+ flags);
if (!edesc) {
- dev_err(jrdev,
- "could not allocate extended descriptor\n");
+ dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
}
edesc->src_nents = src_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
- DESC_JOB_IO_LEN;
edesc->dst_dma = 0;
- if (src_nents) {
- sg_to_sec4_sg_last(req->src, src_nents,
- edesc->sec4_sg, 0);
- edesc->sec4_sg_dma = dma_map_single(jrdev,
- edesc->sec4_sg,
- sec4_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- return -ENOMEM;
- }
- src_dma = edesc->sec4_sg_dma;
- options = LDST_SGF;
- } else {
- src_dma = sg_dma_address(req->src);
- options = 0;
- }
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ to_hash);
+ if (ret)
+ goto unmap_ctx;
if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
- sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
- init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
- HDR_REVERSE);
-
- append_seq_in_ptr(desc, src_dma, to_hash, options);
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
if (ret)
- return ret;
+ goto unmap_ctx;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1515,18 +1564,14 @@ static int ahash_update_first(struct ahash_request *req)
desc_bytes(desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
- req);
- if (!ret) {
- ret = -EINPROGRESS;
- state->update = ahash_update_ctx;
- state->finup = ahash_finup_ctx;
- state->final = ahash_final_ctx;
- } else {
- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
- DMA_TO_DEVICE);
- kfree(edesc);
- }
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
} else if (*next_buflen) {
state->update = ahash_update_no_ctx;
state->finup = ahash_finup_no_ctx;
@@ -1541,6 +1586,10 @@ static int ahash_update_first(struct ahash_request *req)
#endif
return ret;
+ unmap_ctx:
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+ return ret;
}
static int ahash_finup_first(struct ahash_request *req)
@@ -1799,7 +1848,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
- int ret = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1819,10 +1867,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
-
- ret = ahash_set_sh_desc(ahash);
-
- return ret;
+ return ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 0ec112ee5204..72ff19658985 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -14,6 +14,7 @@
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
+#include "ctrl.h"
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
@@ -826,6 +827,8 @@ static int caam_probe(struct platform_device *pdev)
caam_remove:
caam_remove(pdev);
+ return ret;
+
iounmap_ctrl:
iounmap(ctrl);
disable_caam_emi_slow:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 26427c11ad87..513b6646bb36 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -23,13 +23,7 @@
#define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry {
-#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
- defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
- u32 rsvd1;
- dma_addr_t ptr;
-#else
u64 ptr;
-#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
u32 bpid_offset;
};
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d3869b95e7b1..a8cd8a78ec1f 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -325,6 +325,23 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
+ * ee - endianness
+ * size - size of immediate type in bytes
+ */
+#define APPEND_CMD_RAW_IMM2(cmd, op, ee, size) \
+static inline void append_##cmd##_imm_##ee##size(u32 *desc, \
+ u##size immediate, \
+ u32 options) \
+{ \
+ __##ee##size data = cpu_to_##ee##size(immediate); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(data)); \
+ append_data(desc, &data, sizeof(data)); \
+}
+
+APPEND_CMD_RAW_IMM2(load, LOAD, be, 32);
+
+/*
* Append math command. Only the last part of destination and source need to
* be specified
*/
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index e2bcacc1a921..5d4c05074a5c 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,7 +41,6 @@ struct caam_drv_private_jr {
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
- struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index a81f551ac222..757c27f9953d 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,8 +73,6 @@ static int caam_jr_shutdown(struct device *dev)
ret = caam_reset_hw_jr(dev);
- tasklet_kill(&jrp->irqtask);
-
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -130,7 +128,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/*
* Check the output ring for ready responses, kick
- * tasklet if jobs done.
+ * the threaded irq if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
@@ -152,18 +150,13 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
- preempt_disable();
- tasklet_schedule(&jrp->irqtask);
- preempt_enable();
-
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
-/* Deferred service handler, run as interrupt-fired tasklet */
-static void caam_jr_dequeue(unsigned long devarg)
+static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = (struct device *)devarg;
+ struct device *dev = st_dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -237,6 +230,8 @@ static void caam_jr_dequeue(unsigned long devarg)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+
+ return IRQ_HANDLED;
}
/**
@@ -394,11 +389,10 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-
/* Connect job ring interrupt handler. */
- error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
- dev_name(dev), dev);
+ error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
+ caam_jr_threadirq, IRQF_SHARED,
+ dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
@@ -460,7 +454,6 @@ out_free_inpring:
out_free_irq:
free_irq(jrp->irq, dev);
out_kill_deq:
- tasklet_kill(&jrp->irqtask);
return error;
}
@@ -513,6 +506,7 @@ static int caam_jr_probe(struct platform_device *pdev)
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error) {
irq_dispose_mapping(jrpriv->irq);
+ iounmap(ctrl);
return error;
}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index b3c5016f6458..84d2f838a063 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -196,6 +196,14 @@ static inline u64 rd_reg64(void __iomem *reg)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+#define cpu_to_caam_dma64(value) \
+ (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+ (u64)cpu_to_caam32(upper_32_bits(value)))
+#else
+#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
+#endif
+
/*
* jr_outentry
* Represents each entry in a JobR output ring
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 19dc64fede0d..41cd5a356d05 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,7 @@ struct sec4_sg_entry;
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
sec4_sg_ptr->len = cpu_to_caam32(len);
sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index ee4d2741b3ab..346ceb8f17bd 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
+ ccp-dev-v5.o \
ccp-platform.o \
ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8f36af62fe95..84a652be4274 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
rctx->cmd.engine = CCP_ENGINE_SHA;
rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
- rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
+
+ switch (rctx->type) {
+ case CCP_SHA_TYPE_1:
+ rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_224:
+ rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_256:
+ rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ /* Should never get here */
+ break;
+ }
+
rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index d7a710347967..8d2dbacc6161 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,6 +20,61 @@
#include "ccp-dev.h"
+static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+ int start;
+ struct ccp_device *ccp = cmd_q->ccp;
+
+ for (;;) {
+ mutex_lock(&ccp->sb_mutex);
+
+ start = (u32)bitmap_find_next_zero_area(ccp->sb,
+ ccp->sb_count,
+ ccp->sb_start,
+ count, 0);
+ if (start <= ccp->sb_count) {
+ bitmap_set(ccp->sb, start, count);
+
+ mutex_unlock(&ccp->sb_mutex);
+ break;
+ }
+
+ ccp->sb_avail = 0;
+
+ mutex_unlock(&ccp->sb_mutex);
+
+ /* Wait for KSB entries to become available */
+ if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+ return 0;
+ }
+
+ return KSB_START + start;
+}
+
+static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
+ unsigned int count)
+{
+ struct ccp_device *ccp = cmd_q->ccp;
+
+ if (!start)
+ return;
+
+ mutex_lock(&ccp->sb_mutex);
+
+ bitmap_clear(ccp->sb, start - KSB_START, count);
+
+ ccp->sb_avail = 1;
+
+ mutex_unlock(&ccp->sb_mutex);
+
+ wake_up_interruptible_all(&ccp->sb_queue);
+}
+
+static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+ return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+}
+
static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
{
struct ccp_cmd_queue *cmd_q = op->cmd_q;
@@ -68,6 +124,9 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
/* On error delete all related jobs from the queue */
cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
| op->jobid;
+ if (cmd_q->cmd_error)
+ ccp_log_error(cmd_q->ccp,
+ cmd_q->cmd_error);
iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
@@ -99,10 +158,10 @@ static int ccp_perform_aes(struct ccp_op *op)
| (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
| (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
| (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ | (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -129,10 +188,10 @@ static int ccp_perform_xts_aes(struct ccp_op *op)
cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
| (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
| (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ | (op->sb_key << REQ1_KEY_KSB_SHIFT);
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -158,7 +217,7 @@ static int ccp_perform_sha(struct ccp_op *op)
| REQ1_INIT;
cr[1] = op->src.u.dma.length - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
@@ -181,11 +240,11 @@ static int ccp_perform_rsa(struct ccp_op *op)
/* Fill out the register contents for REQ1 through REQ6 */
cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
| (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | (op->sb_key << REQ1_KEY_KSB_SHIFT)
| REQ1_EOM;
cr[1] = op->u.rsa.input_len - 1;
cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
| (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->src.u.dma);
cr[4] = ccp_addr_lo(&op->dst.u.dma);
@@ -215,10 +274,10 @@ static int ccp_perform_passthru(struct ccp_op *op)
| ccp_addr_hi(&op->src.u.dma);
if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
- cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
} else {
- cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
- cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ cr[2] = op->src.u.sb * CCP_SB_BYTES;
+ cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
}
if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
@@ -226,8 +285,8 @@ static int ccp_perform_passthru(struct ccp_op *op)
cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
| ccp_addr_hi(&op->dst.u.dma);
} else {
- cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
- cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ cr[4] = op->dst.u.sb * CCP_SB_BYTES;
+ cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
}
if (op->eom)
@@ -256,35 +315,6 @@ static int ccp_perform_ecc(struct ccp_op *op)
return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
}
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
- u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
-
- /*
- * Locking is provided by the caller so we can update device
- * hwrng-related fields safely
- */
- trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
- if (!trng_value) {
- /* Zero is returned if not data is available or if a
- * bad-entropy error is present. Assume an error if
- * we exceed TRNG_RETRIES reads of zero.
- */
- if (ccp->hwrng_retries++ > TRNG_RETRIES)
- return -EIO;
-
- return 0;
- }
-
- /* Reset the counter and save the rng value */
- ccp->hwrng_retries = 0;
- memcpy(data, &trng_value, len);
-
- return len;
-}
-
static int ccp_init(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@@ -321,9 +351,9 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->dma_pool = dma_pool;
/* Reserve 2 KSB regions for the queue */
- cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
- cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
- ccp->ksb_count -= 2;
+ cmd_q->sb_key = KSB_START + ccp->sb_start++;
+ cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
+ ccp->sb_count -= 2;
/* Preset some register values and masks that are queue
* number dependent
@@ -335,7 +365,7 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q->int_ok = 1 << (i * 2);
cmd_q->int_err = 1 << ((i * 2) + 1);
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+ cmd_q->free_slots = ccp_get_free_slots(cmd_q);
init_waitqueue_head(&cmd_q->int_queue);
@@ -375,9 +405,10 @@ static int ccp_init(struct ccp_device *ccp)
}
/* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue);
+ dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) {
struct task_struct *kthread;
@@ -397,29 +428,26 @@ static int ccp_init(struct ccp_device *ccp)
wake_up_process(kthread);
}
- /* Register the RNG */
- ccp->hwrng.name = ccp->rngname;
- ccp->hwrng.read = ccp_trng_read;
- ret = hwrng_register(&ccp->hwrng);
- if (ret) {
- dev_err(dev, "error registering hwrng (%d)\n", ret);
+ dev_dbg(dev, "Enabling interrupts...\n");
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ dev_dbg(dev, "Registering device...\n");
+ ccp_add_device(ccp);
+
+ ret = ccp_register_rng(ccp);
+ if (ret)
goto e_kthread;
- }
/* Register the DMA engine support */
ret = ccp_dmaengine_register(ccp);
if (ret)
goto e_hwrng;
- ccp_add_device(ccp);
-
- /* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
return 0;
e_hwrng:
- hwrng_unregister(&ccp->hwrng);
+ ccp_unregister_rng(ccp);
e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -441,19 +469,14 @@ static void ccp_destroy(struct ccp_device *ccp)
struct ccp_cmd *cmd;
unsigned int qim, i;
- /* Remove this device from the list of available units first */
- ccp_del_device(ccp);
-
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */
- hwrng_unregister(&ccp->hwrng);
+ ccp_unregister_rng(ccp);
- /* Stop the queue kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
+ /* Remove this device from the list of available units */
+ ccp_del_device(ccp);
/* Build queue interrupt mask (two interrupt masks per queue) */
qim = 0;
@@ -472,6 +495,11 @@ static void ccp_destroy(struct ccp_device *ccp)
}
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
ccp->free_irq(ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -527,18 +555,24 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
}
static const struct ccp_actions ccp3_actions = {
- .perform_aes = ccp_perform_aes,
- .perform_xts_aes = ccp_perform_xts_aes,
- .perform_sha = ccp_perform_sha,
- .perform_rsa = ccp_perform_rsa,
- .perform_passthru = ccp_perform_passthru,
- .perform_ecc = ccp_perform_ecc,
+ .aes = ccp_perform_aes,
+ .xts_aes = ccp_perform_xts_aes,
+ .sha = ccp_perform_sha,
+ .rsa = ccp_perform_rsa,
+ .passthru = ccp_perform_passthru,
+ .ecc = ccp_perform_ecc,
+ .sballoc = ccp_alloc_ksb,
+ .sbfree = ccp_free_ksb,
.init = ccp_init,
.destroy = ccp_destroy,
+ .get_free_slots = ccp_get_free_slots,
.irqhandler = ccp_irq_handler,
};
-struct ccp_vdata ccpv3 = {
+const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
+ .setup = NULL,
.perform = &ccp3_actions,
+ .bar = 2,
+ .offset = 0x20000,
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
new file mode 100644
index 000000000000..faf3cb3ddce2
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -0,0 +1,1017 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE,
+ 0, count, 0);
+ if (start < LSB_SIZE) {
+ bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* No joy; try to get an entry from the shared blocks */
+ ccp = cmd_q->ccp;
+ for (;;) {
+ mutex_lock(&ccp->sb_mutex);
+
+ start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0,
+ count, 0);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ bitmap_set(ccp->lsbmap, start, count);
+
+ mutex_unlock(&ccp->sb_mutex);
+ return start * LSB_ITEM_SIZE;
+ }
+
+ ccp->sb_avail = 0;
+
+ mutex_unlock(&ccp->sb_mutex);
+
+ /* Wait for KSB entries to become available */
+ if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
+ return 0;
+ }
+}
+
+static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->ccp;
+
+ mutex_lock(&ccp->sb_mutex);
+ bitmap_clear(ccp->lsbmap, start, count);
+ ccp->sb_avail = 1;
+ mutex_unlock(&ccp->sb_mutex);
+ wake_up_interruptible_all(&ccp->sb_queue);
+ }
+}
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ u16 size:7;
+ u16 encrypt:1;
+ u16 mode:5;
+ u16 type:2;
+ } aes;
+ struct {
+ u16 size:7;
+ u16 encrypt:1;
+ u16 rsvd:5;
+ u16 type:2;
+ } aes_xts;
+ struct {
+ u16 rsvd1:10;
+ u16 type:4;
+ u16 rsvd2:1;
+ } sha;
+ struct {
+ u16 mode:3;
+ u16 size:12;
+ } rsa;
+ struct {
+ u16 byteswap:2;
+ u16 bitwise:3;
+ u16 reflect:2;
+ u16 rsvd:8;
+ } pt;
+ struct {
+ u16 rsvd:13;
+ } zlib;
+ struct {
+ u16 size:10;
+ u16 type:2;
+ u16 mode:3;
+ } ecc;
+ u16 raw;
+};
+
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_RSA_SIZE(p) ((p)->rsa.size)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+#define CCP_ECC_MODE(p) ((p)->ecc.mode)
+#define CCP_ECC_AFFINE(p) ((p)->ecc.one)
+
+/* Word 0 */
+#define CCP5_CMD_DW0(p) ((p)->dw0)
+#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc)
+#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc)
+#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init)
+#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom)
+#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function)
+#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine)
+#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP5_CMD_DW1(p) ((p)->length)
+#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP5_CMD_DW2(p) ((p)->src_lo)
+#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP5_CMD_DW3(p) ((p)->dw3)
+#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP5_CMD_DW4(p) ((p)->dw4)
+#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo)
+#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p))
+#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP5_CMD_DW6(p) ((p)->key_lo)
+#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p))
+#define CCP5_CMD_DW7(p) ((p)->dw7)
+#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+static inline u32 low_address(unsigned long addr)
+{
+ return (u64)addr & 0x0ffffffff;
+}
+
+static inline u32 high_address(unsigned long addr)
+{
+ return ((u64)addr >> 32) & 0x00000ffff;
+}
+
+static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
+{
+ unsigned int head_idx, n;
+ u32 head_lo, queue_start;
+
+ queue_start = low_address(cmd_q->qdma_tail);
+ head_lo = ioread32(cmd_q->reg_head_lo);
+ head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
+
+ n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
+
+ return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
+}
+
+static int ccp5_do_cmd(struct ccp5_desc *desc,
+ struct ccp_cmd_queue *cmd_q)
+{
+ u32 *mP;
+ __le32 *dP;
+ u32 tail;
+ int i;
+ int ret = 0;
+
+ if (CCP5_CMD_SOC(desc)) {
+ CCP5_CMD_IOC(desc) = 1;
+ CCP5_CMD_SOC(desc) = 0;
+ }
+ mutex_lock(&cmd_q->q_mutex);
+
+ mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
+ dP = (__le32 *) desc;
+ for (i = 0; i < 8; i++)
+ mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /* The data used by this command must be flushed to memory */
+ wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
+ iowrite32(tail, cmd_q->reg_tail_lo);
+
+ /* Turn the queue back on using our cached control register */
+ iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
+ mutex_unlock(&cmd_q->q_mutex);
+
+ if (CCP5_CMD_IOC(desc)) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ if (cmd_q->cmd_error)
+ ccp_log_error(cmd_q->ccp,
+ cmd_q->cmd_error);
+ /* A version 5 device doesn't use Job IDs... */
+ if (!ret)
+ ret = -EIO;
+ }
+ cmd_q->int_rcvd = 0;
+ }
+
+ return 0;
+}
+
+static int ccp5_perform_aes(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+ u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
+
+ CCP5_CMD_SOC(&desc) = op->soc;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = op->init;
+ CCP5_CMD_EOM(&desc) = op->eom;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = op->u.aes.action;
+ CCP_AES_MODE(&function) = op->u.aes.mode;
+ CCP_AES_TYPE(&function) = op->u.aes.type;
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ CCP_AES_SIZE(&function) = 0x7f;
+
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_xts_aes(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+ u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
+
+ CCP5_CMD_SOC(&desc) = op->soc;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = op->init;
+ CCP5_CMD_EOM(&desc) = op->eom;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+ CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
+ CCP5_CMD_KEY_HI(&desc) = 0;
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_sha(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
+
+ CCP5_CMD_SOC(&desc) = op->soc;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = 1;
+ CCP5_CMD_EOM(&desc) = op->eom;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = op->u.sha.type;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
+
+ if (op->eom) {
+ CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
+ CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ CCP5_CMD_SHA_LO(&desc) = 0;
+ CCP5_CMD_SHA_HI(&desc) = 0;
+ }
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_rsa(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
+
+ CCP5_CMD_SOC(&desc) = op->soc;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = 0;
+ CCP5_CMD_EOM(&desc) = 1;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
+
+ /* Source is from external memory */
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ /* Destination is in external memory */
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ /* Key (Exponent) is in external memory */
+ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+ CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_passthru(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+ struct ccp_dma_info *saddr = &op->src.u.dma;
+ struct ccp_dma_info *daddr = &op->dst.u.dma;
+
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP5_CMD_SOC(&desc) = 0;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = 0;
+ CCP5_CMD_EOM(&desc) = op->eom;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
+ CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ /* Length of source data is always 256 bytes */
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ CCP5_CMD_LEN(&desc) = saddr->length;
+ else
+ CCP5_CMD_LEN(&desc) = daddr->length;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP5_CMD_LSB_ID(&desc) = op->sb_key;
+ } else {
+ u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
+
+ CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
+ CCP5_CMD_SRC_HI(&desc) = 0;
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+ } else {
+ u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
+
+ CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
+ CCP5_CMD_DST_HI(&desc) = 0;
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
+ }
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp5_perform_ecc(struct ccp_op *op)
+{
+ struct ccp5_desc desc;
+ union ccp_function function;
+
+ /* Zero out all the fields of the command desc */
+ memset(&desc, 0, Q_DESC_SIZE);
+
+ CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
+
+ CCP5_CMD_SOC(&desc) = 0;
+ CCP5_CMD_IOC(&desc) = 1;
+ CCP5_CMD_INIT(&desc) = 0;
+ CCP5_CMD_EOM(&desc) = 1;
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
+ function.ecc.mode = op->u.ecc.function;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+
+ CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
+
+ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
+ CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
+ CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
+ CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
+ CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+
+ return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
+static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int queues = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs this queue has access to.
+ * Don't bother with segment 0 as it has special privileges.
+ */
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ bitmap_set(cmd_q->lsbmask, j, 1);
+ status >>= LSB_REGION_WIDTH;
+ }
+ queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+ dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
+ cmd_q->id, queues);
+
+ return queues ? 0 : -EINVAL;
+}
+
+
+static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+ int bitno;
+ int qlsb_wgt;
+ int i;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
+
+ if (qlsb_wgt == lsb_cnt) {
+ bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
+
+ bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ if (test_bit(bitno, lsb_pub)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ bitmap_clear(lsb_pub, bitno, 1);
+ dev_info(ccp->dev,
+ "Queue %d gets LSB %d\n",
+ i, bitno);
+ break;
+ }
+ bitmap_clear(qlsb, bitno, 1);
+ bitno = find_first_bit(qlsb, MAX_LSB_CNT);
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared resources.
+ */
+static int ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
+ DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ bitmap_zero(lsb_pub, MAX_LSB_CNT);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ bitmap_or(lsb_pub,
+ lsb_pub, ccp->cmd_q[i].lsbmask,
+ MAX_LSB_CNT);
+
+ n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1;
+ n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
+
+ bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ bitmap_set(qlsb, bitno, 1);
+ bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int ccp5_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ u64 status;
+ u32 status_lo, status_hi;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+ ccp->name, i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+ mutex_init(&cmd_q->q_mutex);
+
+ /* Page alignment satisfies our needs for N <= 128 */
+ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+ cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize,
+ &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase) {
+ dev_err(dev, "unable to allocate command queue\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q->qidx = 0;
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_control = ccp->io_regs +
+ CMD5_Q_STATUS_INCR * (i + 1);
+ cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
+ cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
+ cmd_q->reg_int_enable = cmd_q->reg_control +
+ CMD5_Q_INT_ENABLE_BASE;
+ cmd_q->reg_interrupt_status = cmd_q->reg_control +
+ CMD5_Q_INTERRUPT_STATUS_BASE;
+ cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
+ cmd_q->reg_int_status = cmd_q->reg_control +
+ CMD5_Q_INT_STATUS_BASE;
+ cmd_q->reg_dma_status = cmd_q->reg_control +
+ CMD5_Q_DMA_STATUS_BASE;
+ cmd_q->reg_dma_read_status = cmd_q->reg_control +
+ CMD5_Q_DMA_READ_STATUS_BASE;
+ cmd_q->reg_dma_write_status = cmd_q->reg_control +
+ CMD5_Q_DMA_WRITE_STATUS_BASE;
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Turn off the queues and disable interrupts until ready */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ cmd_q->qcontrol = 0; /* Start with nothing */
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ /* Disable the interrupts */
+ iowrite32(0x00, cmd_q->reg_int_enable);
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+
+ /* Clear the interrupts */
+ iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+ }
+
+ dev_dbg(dev, "Requesting an IRQ...\n");
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queue used to suspend */
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ dev_dbg(dev, "Loading LSB map...\n");
+ /* Copy the private LSB mask to the public registers */
+ status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+ iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
+ iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
+ status = ((u64)status_hi<<30) | (u64)status_lo;
+
+ dev_dbg(dev, "Configuring virtual queues...\n");
+ /* Configure size of each virtual queue accessible to host */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ u32 dma_addr_lo;
+ u32 dma_addr_hi;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ dma_addr_lo = low_address(cmd_q->qdma_tail);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
+ iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
+
+ dma_addr_hi = high_address(cmd_q->qdma_tail);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
+
+ /* Find the LSB regions accessible to the queue */
+ ccp_find_lsb_regions(cmd_q, status);
+ cmd_q->lsb = -1; /* Unassigned value */
+ }
+
+ dev_dbg(dev, "Assigning LSBs...\n");
+ ret = ccp_assign_lsbs(ccp);
+ if (ret) {
+ dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
+ goto e_irq;
+ }
+
+ /* Optimization: pre-allocate LSB slots for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+ ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
+ }
+
+ dev_dbg(dev, "Starting threads...\n");
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ dev_dbg(dev, "Enabling interrupts...\n");
+ /* Enable interrupts */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
+ }
+
+ dev_dbg(dev, "Registering device...\n");
+ /* Put this on the unit list to make it available */
+ ccp_add_device(ccp);
+
+ ret = ccp_register_rng(ccp);
+ if (ret)
+ goto e_kthread;
+
+ /* Register the DMA engine support */
+ ret = ccp_dmaengine_register(ccp);
+ if (ret)
+ goto e_hwrng;
+
+ return 0;
+
+e_hwrng:
+ ccp_unregister_rng(ccp);
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+e_irq:
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+static void ccp5_destroy(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int i;
+
+ /* Unregister the DMA engine */
+ ccp_dmaengine_unregister(ccp);
+
+ /* Unregister the RNG */
+ ccp_unregister_rng(ccp);
+
+ /* Remove this device from the list of available units first */
+ ccp_del_device(ccp);
+
+ /* Disable and clear interrupts */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ /* Turn off the run bit */
+ iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
+
+ /* Disable the interrupts */
+ iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+
+ /* Clear the interrupt status */
+ iowrite32(0x00, cmd_q->reg_int_enable);
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
+ cmd_q->qbase_dma);
+ }
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+static irqreturn_t ccp5_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ u32 status;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
+
+ status = ioread32(cmd_q->reg_interrupt_status);
+
+ if (status) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((status & INT_ERROR) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ccp5_config(struct ccp_device *ccp)
+{
+ /* Public side */
+ iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+}
+
+static void ccp5other_config(struct ccp_device *ccp)
+{
+ int i;
+ u32 rnd;
+
+ /* We own all of the queues on the NTB CCP */
+
+ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
+ iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
+ for (i = 0; i < 12; i++) {
+ rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
+ }
+
+ iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
+ iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
+ iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
+
+ iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
+ iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
+
+ iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
+
+ ccp5_config(ccp);
+}
+
+/* Version 5 adds some function, but is essentially the same as v5 */
+static const struct ccp_actions ccp5_actions = {
+ .aes = ccp5_perform_aes,
+ .xts_aes = ccp5_perform_xts_aes,
+ .sha = ccp5_perform_sha,
+ .rsa = ccp5_perform_rsa,
+ .passthru = ccp5_perform_passthru,
+ .ecc = ccp5_perform_ecc,
+ .sballoc = ccp_lsb_alloc,
+ .sbfree = ccp_lsb_free,
+ .init = ccp5_init,
+ .destroy = ccp5_destroy,
+ .get_free_slots = ccp5_get_free_slots,
+ .irqhandler = ccp5_irq_handler,
+};
+
+const struct ccp_vdata ccpv5a = {
+ .version = CCP_VERSION(5, 0),
+ .setup = ccp5_config,
+ .perform = &ccp5_actions,
+ .bar = 2,
+ .offset = 0x0,
+};
+
+const struct ccp_vdata ccpv5b = {
+ .version = CCP_VERSION(5, 0),
+ .setup = ccp5other_config,
+ .perform = &ccp5_actions,
+ .bar = 2,
+ .offset = 0x0,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 87b9f2bfa623..cafa633aae10 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -39,6 +40,59 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
+/* Human-readable error strings */
+char *ccp_error_codes[] = {
+ "",
+ "ERR 01: ILLEGAL_ENGINE",
+ "ERR 02: ILLEGAL_KEY_ID",
+ "ERR 03: ILLEGAL_FUNCTION_TYPE",
+ "ERR 04: ILLEGAL_FUNCTION_MODE",
+ "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
+ "ERR 06: ILLEGAL_FUNCTION_SIZE",
+ "ERR 07: Zlib_MISSING_INIT_EOM",
+ "ERR 08: ILLEGAL_FUNCTION_RSVD",
+ "ERR 09: ILLEGAL_BUFFER_LENGTH",
+ "ERR 10: VLSB_FAULT",
+ "ERR 11: ILLEGAL_MEM_ADDR",
+ "ERR 12: ILLEGAL_MEM_SEL",
+ "ERR 13: ILLEGAL_CONTEXT_ID",
+ "ERR 14: ILLEGAL_KEY_ADDR",
+ "ERR 15: 0xF Reserved",
+ "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
+ "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
+ "ERR 18: CMD_TIMEOUT",
+ "ERR 19: IDMA0_AXI_SLVERR",
+ "ERR 20: IDMA0_AXI_DECERR",
+ "ERR 21: 0x15 Reserved",
+ "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+ "ERR 23: IDMA1_AIXI_DECERR",
+ "ERR 24: 0x18 Reserved",
+ "ERR 25: ZLIBVHB_AXI_SLVERR",
+ "ERR 26: ZLIBVHB_AXI_DECERR",
+ "ERR 27: 0x1B Reserved",
+ "ERR 27: ZLIB_UNEXPECTED_EOM",
+ "ERR 27: ZLIB_EXTRA_DATA",
+ "ERR 30: ZLIB_BTYPE",
+ "ERR 31: ZLIB_UNDEFINED_SYMBOL",
+ "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
+ "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
+ "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
+ "ERR 35: ZLIB_UNCOMPRESSED_LEN",
+ "ERR 36: ZLIB_LIMIT_REACHED",
+ "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
+ "ERR 38: ODMA0_AXI_SLVERR",
+ "ERR 39: ODMA0_AXI_DECERR",
+ "ERR 40: 0x28 Reserved",
+ "ERR 41: ODMA1_AXI_SLVERR",
+ "ERR 42: ODMA1_AXI_DECERR",
+ "ERR 43: LSB_PARITY_ERR",
+};
+
+void ccp_log_error(struct ccp_device *d, int e)
+{
+ dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+}
+
/* List of CCPs, CCP count, read-write access lock, and access functions
*
* Lock structure: get ccp_unit_lock for reading whenever we need to
@@ -58,7 +112,7 @@ static struct ccp_device *ccp_rr;
/* Ever-increasing value to produce unique unit numbers */
static atomic_t ccp_unit_ordinal;
-unsigned int ccp_increment_unit_ordinal(void)
+static unsigned int ccp_increment_unit_ordinal(void)
{
return atomic_inc_return(&ccp_unit_ordinal);
}
@@ -118,6 +172,29 @@ void ccp_del_device(struct ccp_device *ccp)
write_unlock_irqrestore(&ccp_unit_lock, flags);
}
+
+
+int ccp_register_rng(struct ccp_device *ccp)
+{
+ int ret = 0;
+
+ dev_dbg(ccp->dev, "Registering RNG...\n");
+ /* Register an RNG */
+ ccp->hwrng.name = ccp->rngname;
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret)
+ dev_err(ccp->dev, "error registering hwrng (%d)\n", ret);
+
+ return ret;
+}
+
+void ccp_unregister_rng(struct ccp_device *ccp)
+{
+ if (ccp->hwrng.name)
+ hwrng_unregister(&ccp->hwrng);
+}
+
static struct ccp_device *ccp_get_device(void)
{
unsigned long flags;
@@ -397,9 +474,9 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
spin_lock_init(&ccp->cmd_lock);
mutex_init(&ccp->req_mutex);
- mutex_init(&ccp->ksb_mutex);
- ccp->ksb_count = KSB_COUNT;
- ccp->ksb_start = 0;
+ mutex_init(&ccp->sb_mutex);
+ ccp->sb_count = KSB_COUNT;
+ ccp->sb_start = 0;
ccp->ord = ccp_increment_unit_ordinal();
snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
@@ -408,6 +485,34 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
return ccp;
}
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /* Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
#ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp)
{
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index bd41ffceff82..da5f4a678083 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -60,7 +61,69 @@
#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f)
-/****** REQ0 Related Values ******/
+/* ------------------------ CCP Version 5 Specifics ------------------------ */
+#define CMD5_QUEUE_MASK_OFFSET 0x00
+#define CMD5_QUEUE_PRIO_OFFSET 0x04
+#define CMD5_REQID_CONFIG_OFFSET 0x08
+#define CMD5_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD5_Q_CONTROL_BASE 0x0000
+#define CMD5_Q_TAIL_LO_BASE 0x0004
+#define CMD5_Q_HEAD_LO_BASE 0x0008
+#define CMD5_Q_INT_ENABLE_BASE 0x000C
+#define CMD5_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD5_Q_STATUS_BASE 0x0100
+#define CMD5_Q_INT_STATUS_BASE 0x0104
+#define CMD5_Q_DMA_STATUS_BASE 0x0108
+#define CMD5_Q_DMA_READ_STATUS_BASE 0x010C
+#define CMD5_Q_DMA_WRITE_STATUS_BASE 0x0110
+#define CMD5_Q_ABORT_BASE 0x0114
+#define CMD5_Q_AX_CACHE_BASE 0x0118
+
+#define CMD5_CONFIG_0_OFFSET 0x6000
+#define CMD5_TRNG_CTL_OFFSET 0x6008
+#define CMD5_AES_MASK_OFFSET 0x6010
+#define CMD5_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD5_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD5_Q_RUN 0x1
+#define CMD5_Q_HALT 0x2
+#define CMD5_Q_MEM_LOCATION 0x4
+#define CMD5_Q_SIZE 0x1F
+#define CMD5_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 16
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD5_Q_SIZE)
+#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
+#define Q_DESC_SIZE sizeof(struct ccp5_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define PLSB_MAP_SIZE (LSB_SIZE)
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* ------------------------ CCP Version 3 Specifics ------------------------ */
#define REQ0_WAIT_FOR_WRITE 0x00000004
#define REQ0_INT_ON_COMPLETE 0x00000002
#define REQ0_STOP_ON_COMPLETE 0x00000001
@@ -110,29 +173,30 @@
#define KSB_START 77
#define KSB_END 127
#define KSB_COUNT (KSB_END - KSB_START + 1)
-#define CCP_KSB_BITS 256
-#define CCP_KSB_BYTES 32
+#define CCP_SB_BITS 256
#define CCP_JOBID_MASK 0x0000003f
+/* ------------------------ General CCP Defines ------------------------ */
+
#define CCP_DMAPOOL_MAX_SIZE 64
#define CCP_DMAPOOL_ALIGN BIT(5)
#define CCP_REVERSE_BUF_SIZE 64
-#define CCP_AES_KEY_KSB_COUNT 1
-#define CCP_AES_CTX_KSB_COUNT 1
+#define CCP_AES_KEY_SB_COUNT 1
+#define CCP_AES_CTX_SB_COUNT 1
-#define CCP_XTS_AES_KEY_KSB_COUNT 1
-#define CCP_XTS_AES_CTX_KSB_COUNT 1
+#define CCP_XTS_AES_KEY_SB_COUNT 1
+#define CCP_XTS_AES_CTX_SB_COUNT 1
-#define CCP_SHA_KSB_COUNT 1
+#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
-#define CCP_PASSTHRU_KSB_COUNT 1
+#define CCP_PASSTHRU_SB_COUNT 1
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6
@@ -144,31 +208,12 @@
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
-struct ccp_op;
-
-/* Structure for computation functions that are device-specific */
-struct ccp_actions {
- int (*perform_aes)(struct ccp_op *);
- int (*perform_xts_aes)(struct ccp_op *);
- int (*perform_sha)(struct ccp_op *);
- int (*perform_rsa)(struct ccp_op *);
- int (*perform_passthru)(struct ccp_op *);
- int (*perform_ecc)(struct ccp_op *);
- int (*init)(struct ccp_device *);
- void (*destroy)(struct ccp_device *);
- irqreturn_t (*irqhandler)(int, void *);
-};
-
-/* Structure to hold CCP version-specific values */
-struct ccp_vdata {
- unsigned int version;
- const struct ccp_actions *perform;
-};
-
-extern struct ccp_vdata ccpv3;
+#define CCP_SB_BYTES 32
+struct ccp_op;
struct ccp_device;
struct ccp_cmd;
+struct ccp_fns;
struct ccp_dma_cmd {
struct list_head entry;
@@ -212,9 +257,29 @@ struct ccp_cmd_queue {
/* Queue dma pool */
struct dma_pool *dma_pool;
- /* Queue reserved KSB regions */
- u32 ksb_key;
- u32 ksb_ctx;
+ /* Queue base address (not neccessarily aligned)*/
+ struct ccp5_desc *qbase;
+
+ /* Aligned queue start address (per requirement) */
+ struct mutex q_mutex ____cacheline_aligned;
+ unsigned int qidx;
+
+ /* Version 5 has different requirements for queue memory */
+ unsigned int qsize;
+ dma_addr_t qbase_dma;
+ dma_addr_t qdma_tail;
+
+ /* Per-queue reserved storage block(s) */
+ u32 sb_key;
+ u32 sb_ctx;
+
+ /* Bitmap of LSBs that can be accessed by this queue */
+ DECLARE_BITMAP(lsbmask, MAX_LSB_CNT);
+ /* Private LSB that is assigned to this queue, or -1 if none.
+ * Bitmap for my private LSB, unused otherwise
+ */
+ unsigned int lsb;
+ DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */
struct task_struct *kthread;
@@ -229,8 +294,17 @@ struct ccp_cmd_queue {
u32 int_err;
/* Register addresses for queue */
+ void __iomem *reg_control;
+ void __iomem *reg_tail_lo;
+ void __iomem *reg_head_lo;
+ void __iomem *reg_int_enable;
+ void __iomem *reg_interrupt_status;
void __iomem *reg_status;
void __iomem *reg_int_status;
+ void __iomem *reg_dma_status;
+ void __iomem *reg_dma_read_status;
+ void __iomem *reg_dma_write_status;
+ u32 qcontrol; /* Cached control register */
/* Status values from job */
u32 int_status;
@@ -253,16 +327,14 @@ struct ccp_device {
struct device *dev;
- /*
- * Bus specific device information
+ /* Bus specific device information
*/
void *dev_specific;
int (*get_irq)(struct ccp_device *ccp);
void (*free_irq)(struct ccp_device *ccp);
unsigned int irq;
- /*
- * I/O area used for device communication. The register mapping
+ /* I/O area used for device communication. The register mapping
* starts at an offset into the mapped bar.
* The CMD_REQx registers and the Delete_Cmd_Queue_Job register
* need to be protected while a command queue thread is accessing
@@ -272,8 +344,7 @@ struct ccp_device {
void __iomem *io_map;
void __iomem *io_regs;
- /*
- * Master lists that all cmds are queued on. Because there can be
+ /* Master lists that all cmds are queued on. Because there can be
* more than one CCP command queue that can process a cmd a separate
* backlog list is neeeded so that the backlog completion call
* completes before the cmd is available for execution.
@@ -283,47 +354,54 @@ struct ccp_device {
struct list_head cmd;
struct list_head backlog;
- /*
- * The command queues. These represent the queues available on the
+ /* The command queues. These represent the queues available on the
* CCP that are available for processing cmds
*/
struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
unsigned int cmd_q_count;
- /*
- * Support for the CCP True RNG
+ /* Support for the CCP True RNG
*/
struct hwrng hwrng;
unsigned int hwrng_retries;
- /*
- * Support for the CCP DMA capabilities
+ /* Support for the CCP DMA capabilities
*/
struct dma_device dma_dev;
struct ccp_dma_chan *ccp_dma_chan;
struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
- /*
- * A counter used to generate job-ids for cmds submitted to the CCP
+ /* A counter used to generate job-ids for cmds submitted to the CCP
*/
atomic_t current_id ____cacheline_aligned;
- /*
- * The CCP uses key storage blocks (KSB) to maintain context for certain
- * operations. To prevent multiple cmds from using the same KSB range
- * a command queue reserves a KSB range for the duration of the cmd.
- * Each queue, will however, reserve 2 KSB blocks for operations that
- * only require single KSB entries (eg. AES context/iv and key) in order
- * to avoid allocation contention. This will reserve at most 10 KSB
- * entries, leaving 40 KSB entries available for dynamic allocation.
+ /* The v3 CCP uses key storage blocks (SB) to maintain context for
+ * certain operations. To prevent multiple cmds from using the same
+ * SB range a command queue reserves an SB range for the duration of
+ * the cmd. Each queue, will however, reserve 2 SB blocks for
+ * operations that only require single SB entries (eg. AES context/iv
+ * and key) in order to avoid allocation contention. This will reserve
+ * at most 10 SB entries, leaving 40 SB entries available for dynamic
+ * allocation.
+ *
+ * The v5 CCP Local Storage Block (LSB) is broken up into 8
+ * memrory ranges, each of which can be enabled for access by one
+ * or more queues. Device initialization takes this into account,
+ * and attempts to assign one region for exclusive use by each
+ * available queue; the rest are then aggregated as "public" use.
+ * If there are fewer regions than queues, all regions are shared
+ * amongst all queues.
*/
- struct mutex ksb_mutex ____cacheline_aligned;
- DECLARE_BITMAP(ksb, KSB_COUNT);
- wait_queue_head_t ksb_queue;
- unsigned int ksb_avail;
- unsigned int ksb_count;
- u32 ksb_start;
+ struct mutex sb_mutex ____cacheline_aligned;
+ DECLARE_BITMAP(sb, KSB_COUNT);
+ wait_queue_head_t sb_queue;
+ unsigned int sb_avail;
+ unsigned int sb_count;
+ u32 sb_start;
+
+ /* Bitmap of shared LSBs, if any */
+ DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE);
/* Suspend support */
unsigned int suspending;
@@ -335,10 +413,11 @@ struct ccp_device {
enum ccp_memtype {
CCP_MEMTYPE_SYSTEM = 0,
- CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_SB,
CCP_MEMTYPE_LOCAL,
CCP_MEMTYPE__LAST,
};
+#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB
struct ccp_dma_info {
dma_addr_t address;
@@ -379,7 +458,7 @@ struct ccp_mem {
enum ccp_memtype type;
union {
struct ccp_dma_info dma;
- u32 ksb;
+ u32 sb;
} u;
};
@@ -419,13 +498,14 @@ struct ccp_op {
u32 jobid;
u32 ioc;
u32 soc;
- u32 ksb_key;
- u32 ksb_ctx;
+ u32 sb_key;
+ u32 sb_ctx;
u32 init;
u32 eom;
struct ccp_mem src;
struct ccp_mem dst;
+ struct ccp_mem exp;
union {
struct ccp_aes_op aes;
@@ -435,6 +515,7 @@ struct ccp_op {
struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc;
} u;
+ struct ccp_mem key;
};
static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -447,6 +528,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
return upper_32_bits(info->address + info->offset) & 0x0000ffff;
}
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ __le32 soc:1;
+ __le32 ioc:1;
+ __le32 rsvd1:1;
+ __le32 init:1;
+ __le32 eom:1; /* AES/SHA only */
+ __le32 function:15;
+ __le32 engine:4;
+ __le32 prot:1;
+ __le32 rsvd2:7;
+};
+
+struct dword3 {
+ __le32 src_hi:16;
+ __le32 src_mem:2;
+ __le32 lsb_cxt_id:8;
+ __le32 rsvd1:5;
+ __le32 fixed:1;
+};
+
+union dword4 {
+ __le32 dst_lo; /* NON-SHA */
+ __le32 sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ __le32 dst_hi:16;
+ __le32 dst_mem:2;
+ __le32 rsvd1:13;
+ __le32 fixed:1;
+ } fields;
+ __le32 sha_len_hi;
+};
+
+struct dword7 {
+ __le32 key_hi:16;
+ __le32 key_mem:2;
+ __le32 rsvd1:14;
+};
+
+struct ccp5_desc {
+ struct dword0 dw0;
+ __le32 length;
+ __le32 src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ __le32 key_lo;
+ struct dword7 dw7;
+};
+
int ccp_pci_init(void);
void ccp_pci_exit(void);
@@ -456,13 +601,48 @@ void ccp_platform_exit(void);
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
+extern void ccp_log_error(struct ccp_device *, int);
+
struct ccp_device *ccp_alloc_struct(struct device *dev);
bool ccp_queues_suspended(struct ccp_device *ccp);
int ccp_cmd_queue_thread(void *data);
+int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+int ccp_register_rng(struct ccp_device *ccp);
+void ccp_unregister_rng(struct ccp_device *ccp);
int ccp_dmaengine_register(struct ccp_device *ccp);
void ccp_dmaengine_unregister(struct ccp_device *ccp);
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+ int (*aes)(struct ccp_op *);
+ int (*xts_aes)(struct ccp_op *);
+ int (*sha)(struct ccp_op *);
+ int (*rsa)(struct ccp_op *);
+ int (*passthru)(struct ccp_op *);
+ int (*ecc)(struct ccp_op *);
+ u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
+ void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
+ unsigned int);
+ unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
+ int (*init)(struct ccp_device *);
+ void (*destroy)(struct ccp_device *);
+ irqreturn_t (*irqhandler)(int, void *);
+};
+
+/* Structure to hold CCP version-specific values */
+struct ccp_vdata {
+ const unsigned int version;
+ void (*setup)(struct ccp_device *);
+ const struct ccp_actions *perform;
+ const unsigned int bar;
+ const unsigned int offset;
+};
+
+extern const struct ccp_vdata ccpv3;
+extern const struct ccp_vdata ccpv5a;
+extern const struct ccp_vdata ccpv5b;
+
#endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 94f77b0f9ae7..6553912804f7 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -299,12 +299,10 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
{
struct ccp_dma_desc *desc;
- desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+ desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
if (!desc)
return NULL;
- memset(desc, 0, sizeof(*desc));
-
dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
@@ -650,8 +648,11 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
ccp->name);
- if (!dma_cmd_cache_name)
- return -ENOMEM;
+ if (!dma_desc_cache_name) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
sizeof(struct ccp_dma_desc),
sizeof(void *),
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index ffa2891035ac..50fae4442801 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -20,72 +21,28 @@
#include "ccp-dev.h"
/* SHA initial context values */
-static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
- cpu_to_be32(SHA1_H4), 0, 0, 0,
+ cpu_to_be32(SHA1_H4),
};
-static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
};
-static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
+static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
-static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
-{
- int start;
-
- for (;;) {
- mutex_lock(&ccp->ksb_mutex);
-
- start = (u32)bitmap_find_next_zero_area(ccp->ksb,
- ccp->ksb_count,
- ccp->ksb_start,
- count, 0);
- if (start <= ccp->ksb_count) {
- bitmap_set(ccp->ksb, start, count);
-
- mutex_unlock(&ccp->ksb_mutex);
- break;
- }
-
- ccp->ksb_avail = 0;
-
- mutex_unlock(&ccp->ksb_mutex);
-
- /* Wait for KSB entries to become available */
- if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
- return 0;
- }
-
- return KSB_START + start;
-}
-
-static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
- unsigned int count)
-{
- if (!start)
- return;
-
- mutex_lock(&ccp->ksb_mutex);
-
- bitmap_clear(ccp->ksb, start - KSB_START, count);
-
- ccp->ksb_avail = 1;
-
- mutex_unlock(&ccp->ksb_mutex);
-
- wake_up_interruptible_all(&ccp->ksb_queue);
-}
+#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
+ ccp_gen_jobid(ccp) : 0)
static u32 ccp_gen_jobid(struct ccp_device *ccp)
{
@@ -231,7 +188,7 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
unsigned int len, unsigned int se_len,
bool sign_extend)
{
- unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
u8 buffer[CCP_REVERSE_BUF_SIZE];
if (WARN_ON(se_len > sizeof(buffer)))
@@ -241,21 +198,21 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
dm_offset = 0;
nbytes = len;
while (nbytes) {
- ksb_len = min_t(unsigned int, nbytes, se_len);
- sg_offset -= ksb_len;
+ sb_len = min_t(unsigned int, nbytes, se_len);
+ sg_offset -= sb_len;
- scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
- for (i = 0; i < ksb_len; i++)
- wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
+ for (i = 0; i < sb_len; i++)
+ wa->address[dm_offset + i] = buffer[sb_len - i - 1];
- dm_offset += ksb_len;
- nbytes -= ksb_len;
+ dm_offset += sb_len;
+ nbytes -= sb_len;
- if ((ksb_len != se_len) && sign_extend) {
+ if ((sb_len != se_len) && sign_extend) {
/* Must sign-extend to nearest sign-extend length */
if (wa->address[dm_offset - 1] & 0x80)
memset(wa->address + dm_offset, 0xff,
- se_len - ksb_len);
+ se_len - sb_len);
}
}
@@ -266,22 +223,22 @@ static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
struct scatterlist *sg,
unsigned int len)
{
- unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
+ unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
u8 buffer[CCP_REVERSE_BUF_SIZE];
sg_offset = 0;
dm_offset = len;
nbytes = len;
while (nbytes) {
- ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
- dm_offset -= ksb_len;
+ sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
+ dm_offset -= sb_len;
- for (i = 0; i < ksb_len; i++)
- buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
- scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
+ for (i = 0; i < sb_len; i++)
+ buffer[sb_len - i - 1] = wa->address[dm_offset + i];
+ scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
- sg_offset += ksb_len;
- nbytes -= ksb_len;
+ sg_offset += sb_len;
+ nbytes -= sb_len;
}
}
@@ -449,9 +406,9 @@ static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
}
}
-static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
- u32 byte_swap, bool from)
+static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+ u32 byte_swap, bool from)
{
struct ccp_op op;
@@ -463,8 +420,8 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
if (from) {
op.soc = 1;
- op.src.type = CCP_MEMTYPE_KSB;
- op.src.u.ksb = ksb;
+ op.src.type = CCP_MEMTYPE_SB;
+ op.src.u.sb = sb;
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = wa->dma.address;
op.dst.u.dma.length = wa->length;
@@ -472,27 +429,27 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
op.src.type = CCP_MEMTYPE_SYSTEM;
op.src.u.dma.address = wa->dma.address;
op.src.u.dma.length = wa->length;
- op.dst.type = CCP_MEMTYPE_KSB;
- op.dst.u.ksb = ksb;
+ op.dst.type = CCP_MEMTYPE_SB;
+ op.dst.u.sb = sb;
}
op.u.passthru.byte_swap = byte_swap;
- return cmd_q->ccp->vdata->perform->perform_passthru(&op);
+ return cmd_q->ccp->vdata->perform->passthru(&op);
}
-static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
- u32 byte_swap)
+static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+ u32 byte_swap)
{
- return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
+ return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
}
-static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
- struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
- u32 byte_swap)
+static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
+ struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
+ u32 byte_swap)
{
- return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
+ return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
}
static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
@@ -527,54 +484,54 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
- BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
ret = -EIO;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.ksb_key = cmd_q->ksb_key;
- op.ksb_ctx = cmd_q->ksb_ctx;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_key = cmd_q->sb_key;
+ op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
op.u.aes.type = aes->type;
op.u.aes.mode = aes->mode;
op.u.aes.action = aes->action;
- /* All supported key sizes fit in a single (32-byte) KSB entry
+ /* All supported key sizes fit in a single (32-byte) SB entry
* and must be in little endian format. Use the 256-bit byte
* swap passthru option to convert from big endian to little
* endian.
*/
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_KSB_BYTES - aes->key_len;
+ dm_offset = CCP_SB_BYTES - aes->key_len;
ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
- ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
- /* The AES context fits in a single (32-byte) KSB entry and
+ /* The AES context fits in a single (32-byte) SB entry and
* must be in little endian format. Use the 256-bit byte swap
* passthru option to convert from big endian to little endian.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
- dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -592,9 +549,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
op.eom = 1;
/* Push the K1/K2 key to the CCP now */
- ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
- op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
+ op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
@@ -602,15 +559,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
aes->cmac_key_len);
- ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
}
}
- ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
@@ -622,15 +579,15 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
/* Retrieve the AES context - convert from LE to BE using
* 32-byte (256-bit) byteswapping
*/
- ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
}
/* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
e_src:
@@ -680,56 +637,56 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
}
- BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
- BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
+ BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
ret = -EIO;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.ksb_key = cmd_q->ksb_key;
- op.ksb_ctx = cmd_q->ksb_ctx;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_key = cmd_q->sb_key;
+ op.sb_ctx = cmd_q->sb_ctx;
op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
op.u.aes.type = aes->type;
op.u.aes.mode = aes->mode;
op.u.aes.action = aes->action;
- /* All supported key sizes fit in a single (32-byte) KSB entry
+ /* All supported key sizes fit in a single (32-byte) SB entry
* and must be in little endian format. Use the 256-bit byte
* swap passthru option to convert from big endian to little
* endian.
*/
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_KSB_BYTES - aes->key_len;
+ dm_offset = CCP_SB_BYTES - aes->key_len;
ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
- ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
- /* The AES context fits in a single (32-byte) KSB entry and
+ /* The AES context fits in a single (32-byte) SB entry and
* must be in little endian format. Use the 256-bit byte swap
* passthru option to convert from big endian to little endian.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
if (aes->mode != CCP_AES_MODE_ECB) {
- /* Load the AES context - conver to LE */
- dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ /* Load the AES context - convert to LE */
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
- ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -772,7 +729,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.soc = 1;
}
- ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -785,15 +742,15 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Retrieve the AES context - convert from LE to BE using
* 32-byte (256-bit) byteswapping
*/
- ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
/* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
}
@@ -857,53 +814,53 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (!xts->key || !xts->iv || !xts->src || !xts->dst)
return -EINVAL;
- BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
- BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
+ BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
ret = -EIO;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.ksb_key = cmd_q->ksb_key;
- op.ksb_ctx = cmd_q->ksb_ctx;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_key = cmd_q->sb_key;
+ op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) KSB entry
+ /* All supported key sizes fit in a single (32-byte) SB entry
* and must be in little endian format. Use the 256-bit byte
* swap passthru option to convert from big endian to little
* endian.
*/
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
- ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_key;
}
- /* The AES context fits in a single (32-byte) KSB entry and
+ /* The AES context fits in a single (32-byte) SB entry and
* for XTS is already in little endian format so no byte swapping
* is needed.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
+ CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
- ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_NOOP);
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
@@ -937,7 +894,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (!src.sg_wa.bytes_left)
op.eom = 1;
- ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -949,15 +906,15 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
/* Retrieve the AES context - convert from LE to BE using
* 32-byte (256-bit) byteswapping
*/
- ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
/* ...but we only need AES_BLOCK_SIZE bytes */
- dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
e_dst:
@@ -982,163 +939,227 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_dm_workarea ctx;
struct ccp_data src;
struct ccp_op op;
+ unsigned int ioffset, ooffset;
+ unsigned int digest_size;
+ int sb_count;
+ const void *init;
+ u64 block_size;
+ int ctx_size;
int ret;
- if (sha->ctx_len != CCP_SHA_CTXSIZE)
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ if (sha->ctx_len < SHA1_DIGEST_SIZE)
+ return -EINVAL;
+ block_size = SHA1_BLOCK_SIZE;
+ break;
+ case CCP_SHA_TYPE_224:
+ if (sha->ctx_len < SHA224_DIGEST_SIZE)
+ return -EINVAL;
+ block_size = SHA224_BLOCK_SIZE;
+ break;
+ case CCP_SHA_TYPE_256:
+ if (sha->ctx_len < SHA256_DIGEST_SIZE)
+ return -EINVAL;
+ block_size = SHA256_BLOCK_SIZE;
+ break;
+ default:
return -EINVAL;
+ }
if (!sha->ctx)
return -EINVAL;
- if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
+ if (!sha->final && (sha->src_len & (block_size - 1)))
return -EINVAL;
- if (!sha->src_len) {
- const u8 *sha_zero;
+ /* The version 3 device can't handle zero-length input */
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
- /* Not final, just return */
- if (!sha->final)
- return 0;
+ if (!sha->src_len) {
+ unsigned int digest_len;
+ const u8 *sha_zero;
- /* CCP can't do a zero length sha operation so the caller
- * must buffer the data.
- */
- if (sha->msg_bits)
- return -EINVAL;
+ /* Not final, just return */
+ if (!sha->final)
+ return 0;
- /* The CCP cannot perform zero-length sha operations so the
- * caller is required to buffer data for the final operation.
- * However, a sha operation for a message with a total length
- * of zero is valid so known values are required to supply
- * the result.
- */
- switch (sha->type) {
- case CCP_SHA_TYPE_1:
- sha_zero = sha1_zero_message_hash;
- break;
- case CCP_SHA_TYPE_224:
- sha_zero = sha224_zero_message_hash;
- break;
- case CCP_SHA_TYPE_256:
- sha_zero = sha256_zero_message_hash;
- break;
- default:
- return -EINVAL;
- }
+ /* CCP can't do a zero length sha operation so the
+ * caller must buffer the data.
+ */
+ if (sha->msg_bits)
+ return -EINVAL;
+
+ /* The CCP cannot perform zero-length sha operations
+ * so the caller is required to buffer data for the
+ * final operation. However, a sha operation for a
+ * message with a total length of zero is valid so
+ * known values are required to supply the result.
+ */
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ sha_zero = sha1_zero_message_hash;
+ digest_len = SHA1_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_224:
+ sha_zero = sha224_zero_message_hash;
+ digest_len = SHA224_DIGEST_SIZE;
+ break;
+ case CCP_SHA_TYPE_256:
+ sha_zero = sha256_zero_message_hash;
+ digest_len = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
- scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
- sha->ctx_len, 1);
+ scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
+ digest_len, 1);
- return 0;
+ return 0;
+ }
}
- if (!sha->src)
- return -EINVAL;
+ /* Set variables used throughout */
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ digest_size = SHA1_DIGEST_SIZE;
+ init = (void *) ccp_sha1_init;
+ ctx_size = SHA1_DIGEST_SIZE;
+ sb_count = 1;
+ if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+ ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ else
+ ooffset = ioffset = 0;
+ break;
+ case CCP_SHA_TYPE_224:
+ digest_size = SHA224_DIGEST_SIZE;
+ init = (void *) ccp_sha224_init;
+ ctx_size = SHA256_DIGEST_SIZE;
+ sb_count = 1;
+ ioffset = 0;
+ if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
+ ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ else
+ ooffset = 0;
+ break;
+ case CCP_SHA_TYPE_256:
+ digest_size = SHA256_DIGEST_SIZE;
+ init = (void *) ccp_sha256_init;
+ ctx_size = SHA256_DIGEST_SIZE;
+ sb_count = 1;
+ ooffset = ioffset = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_data;
+ }
- BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
+ /* For zero-length plaintext the src pointer is ignored;
+ * otherwise both parts must be valid
+ */
+ if (sha->src_len && !sha->src)
+ return -EINVAL;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.ksb_ctx = cmd_q->ksb_ctx;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.u.sha.type = sha->type;
op.u.sha.msg_bits = sha->msg_bits;
- /* The SHA context fits in a single (32-byte) KSB entry and
- * must be in little endian format. Use the 256-bit byte swap
- * passthru option to convert from big endian to little endian.
- */
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
- CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
+ ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
return ret;
-
if (sha->first) {
- const __be32 *init;
-
switch (sha->type) {
case CCP_SHA_TYPE_1:
- init = ccp_sha1_init;
- break;
case CCP_SHA_TYPE_224:
- init = ccp_sha224_init;
- break;
case CCP_SHA_TYPE_256:
- init = ccp_sha256_init;
+ memcpy(ctx.address + ioffset, init, ctx_size);
break;
default:
ret = -EINVAL;
goto e_ctx;
}
- memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
} else {
- ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
+ /* Restore the context */
+ ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+ sb_count * CCP_SB_BYTES);
}
- ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- /* Send data to the CCP SHA engine */
- ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
- CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
- if (ret)
- goto e_ctx;
+ if (sha->src) {
+ /* Send data to the CCP SHA engine; block_size is set above */
+ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
+ block_size, DMA_TO_DEVICE);
+ if (ret)
+ goto e_ctx;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
- if (sha->final && !src.sg_wa.bytes_left)
- op.eom = 1;
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, NULL, &op, block_size, false);
+ if (sha->final && !src.sg_wa.bytes_left)
+ op.eom = 1;
+
+ ret = cmd_q->ccp->vdata->perform->sha(&op);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_data;
+ }
- ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
+ ccp_process_data(&src, NULL, &op);
+ }
+ } else {
+ op.eom = 1;
+ ret = cmd_q->ccp->vdata->perform->sha(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
}
-
- ccp_process_data(&src, NULL, &op);
}
/* Retrieve the SHA context - convert from LE to BE using
* 32-byte (256-bit) byteswapping to BE
*/
- ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
- CCP_PASSTHRU_BYTESWAP_256BIT);
+ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
}
- ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
-
- if (sha->final && sha->opad) {
- /* HMAC operation, recursively perform final SHA */
- struct ccp_cmd hmac_cmd;
- struct scatterlist sg;
- u64 block_size, digest_size;
- u8 *hmac_buf;
-
+ if (sha->final) {
+ /* Finishing up, so get the digest */
switch (sha->type) {
case CCP_SHA_TYPE_1:
- block_size = SHA1_BLOCK_SIZE;
- digest_size = SHA1_DIGEST_SIZE;
- break;
case CCP_SHA_TYPE_224:
- block_size = SHA224_BLOCK_SIZE;
- digest_size = SHA224_DIGEST_SIZE;
- break;
case CCP_SHA_TYPE_256:
- block_size = SHA256_BLOCK_SIZE;
- digest_size = SHA256_DIGEST_SIZE;
+ ccp_get_dm_area(&ctx, ooffset,
+ sha->ctx, 0,
+ digest_size);
break;
default:
ret = -EINVAL;
- goto e_data;
+ goto e_ctx;
}
+ } else {
+ /* Stash the context */
+ ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
+ sb_count * CCP_SB_BYTES);
+ }
+
+ if (sha->final && sha->opad) {
+ /* HMAC operation, recursively perform final SHA */
+ struct ccp_cmd hmac_cmd;
+ struct scatterlist sg;
+ u8 *hmac_buf;
if (sha->opad_len != block_size) {
ret = -EINVAL;
@@ -1153,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sg_init_one(&sg, hmac_buf, block_size + digest_size);
scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
- memcpy(hmac_buf + block_size, ctx.address, digest_size);
+ switch (sha->type) {
+ case CCP_SHA_TYPE_1:
+ case CCP_SHA_TYPE_224:
+ case CCP_SHA_TYPE_256:
+ memcpy(hmac_buf + block_size,
+ ctx.address + ooffset,
+ digest_size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto e_ctx;
+ }
memset(&hmac_cmd, 0, sizeof(hmac_cmd));
hmac_cmd.engine = CCP_ENGINE_SHA;
@@ -1176,7 +1208,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
}
e_data:
- ccp_free_data(&src, cmd_q);
+ if (sha->src)
+ ccp_free_data(&src, cmd_q);
e_ctx:
ccp_dm_free(&ctx);
@@ -1190,7 +1223,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_dm_workarea exp, src;
struct ccp_data dst;
struct ccp_op op;
- unsigned int ksb_count, i_len, o_len;
+ unsigned int sb_count, i_len, o_len;
int ret;
if (rsa->key_size > CCP_RSA_MAX_WIDTH)
@@ -1208,16 +1241,17 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
o_len = ((rsa->key_size + 255) / 256) * 32;
i_len = o_len * 2;
- ksb_count = o_len / CCP_KSB_BYTES;
+ sb_count = o_len / CCP_SB_BYTES;
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
- if (!op.ksb_key)
+ op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
+
+ if (!op.sb_key)
return -EIO;
- /* The RSA exponent may span multiple (32-byte) KSB entries and must
+ /* The RSA exponent may span multiple (32-byte) SB entries and must
* be in little endian format. Reverse copy each 32-byte chunk
* of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
* and each byte within that chunk and do not perform any byte swap
@@ -1225,14 +1259,14 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
*/
ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
if (ret)
- goto e_ksb;
+ goto e_sb;
ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
- CCP_KSB_BYTES, false);
+ CCP_SB_BYTES, false);
if (ret)
goto e_exp;
- ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
+ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_exp;
@@ -1247,12 +1281,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_exp;
ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
- CCP_KSB_BYTES, false);
+ CCP_SB_BYTES, false);
if (ret)
goto e_src;
src.address += o_len; /* Adjust the address for the copy operation */
ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
- CCP_KSB_BYTES, false);
+ CCP_SB_BYTES, false);
if (ret)
goto e_src;
src.address -= o_len; /* Reset the address to original value */
@@ -1274,7 +1308,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.rsa.mod_size = rsa->key_size;
op.u.rsa.input_len = i_len;
- ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
+ ret = cmd_q->ccp->vdata->perform->rsa(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1291,8 +1325,8 @@ e_src:
e_exp:
ccp_dm_free(&exp);
-e_ksb:
- ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
+e_sb:
+ cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
return ret;
}
@@ -1306,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op;
bool in_place = false;
unsigned int i;
- int ret;
+ int ret = 0;
if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
return -EINVAL;
@@ -1321,26 +1355,26 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
- op.ksb_key = cmd_q->ksb_key;
+ op.sb_key = cmd_q->sb_key;
ret = ccp_init_dm_workarea(&mask, cmd_q,
- CCP_PASSTHRU_KSB_COUNT *
- CCP_KSB_BYTES,
+ CCP_PASSTHRU_SB_COUNT *
+ CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
- ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
+ ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_mask;
@@ -1399,7 +1433,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.u.dma.offset = dst.sg_wa.sg_used;
op.dst.u.dma.length = op.src.u.dma.length;
- ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+ ret = cmd_q->ccp->vdata->perform->passthru(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1448,7 +1482,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+ BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
@@ -1456,13 +1490,13 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
- op.ksb_key = cmd_q->ksb_key;
+ op.sb_key = cmd_q->sb_key;
mask.length = pt->mask_len;
mask.dma.address = pt->mask;
mask.dma.length = pt->mask_len;
- ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+ ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -1484,7 +1518,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.u.dma.offset = 0;
op.dst.u.dma.length = pt->src_len;
- ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+ ret = cmd_q->ccp->vdata->perform->passthru(&op);
if (ret)
cmd->engine_error = cmd_q->cmd_error;
@@ -1514,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
/* Concatenate the modulus and the operands. Both the modulus and
* the operands must be in little endian format. Since the input
@@ -1575,7 +1609,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1639,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
/* Concatenate the modulus and the operands. Both the modulus and
* the operands must be in little endian format. Since the input
@@ -1677,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
- /* Set the first point Z coordianate to 1 */
+ /* Set the first point Z coordinate to 1 */
*src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE;
@@ -1696,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src;
src.address += CCP_ECC_OPERAND_SIZE;
- /* Set the second point Z coordianate to 1 */
+ /* Set the second point Z coordinate to 1 */
*src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE;
} else {
@@ -1739,7 +1773,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1810,7 +1844,7 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
cmd->engine_error = 0;
cmd_q->cmd_error = 0;
cmd_q->int_rcvd = 0;
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+ cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
switch (cmd->engine) {
case CCP_ENGINE_AES:
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 0bf262e36b6b..28a9996c1085 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,9 +26,6 @@
#include "ccp-dev.h"
-#define IO_BAR 2
-#define IO_OFFSET 0x20000
-
#define MSIX_VECTORS 2
struct ccp_msix {
@@ -143,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp)
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
dev);
pci_disable_msix(pdev);
- } else {
+ } else if (ccp->irq) {
free_irq(ccp->irq, dev);
pci_disable_msi(pdev);
}
+ ccp->irq = 0;
}
static int ccp_find_mmio_area(struct ccp_device *ccp)
@@ -156,10 +155,11 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
resource_size_t io_len;
unsigned long io_flags;
- io_flags = pci_resource_flags(pdev, IO_BAR);
- io_len = pci_resource_len(pdev, IO_BAR);
- if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
- return IO_BAR;
+ io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
+ io_len = pci_resource_len(pdev, ccp->vdata->bar);
+ if ((io_flags & IORESOURCE_MEM) &&
+ (io_len >= (ccp->vdata->offset + 0x800)))
+ return ccp->vdata->bar;
return -EIO;
}
@@ -216,7 +216,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(dev, "pci_iomap failed\n");
goto e_device;
}
- ccp->io_regs = ccp->io_map + IO_OFFSET;
+ ccp->io_regs = ccp->io_map + ccp->vdata->offset;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
@@ -230,6 +230,9 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp);
+ if (ccp->vdata->setup)
+ ccp->vdata->setup(ccp);
+
ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_iomap;
@@ -322,6 +325,8 @@ static int ccp_pci_resume(struct pci_dev *pdev)
static const struct pci_device_id ccp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
+ { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
+ { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index eee2c7e6c299..e09d4055b19e 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -636,20 +636,12 @@ struct hifn_request_context {
static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
{
- u32 ret;
-
- ret = readl(dev->bar[0] + reg);
-
- return ret;
+ return readl(dev->bar[0] + reg);
}
static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
{
- u32 ret;
-
- ret = readl(dev->bar[1] + reg);
-
- return ret;
+ return readl(dev->bar[1] + reg);
}
static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 68e8aa90fe01..a2e77b87485b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -71,6 +71,7 @@
#define DRIVER_FLAGS_MD5 BIT(21)
#define IMG_HASH_QUEUE_LENGTH 20
+#define IMG_HASH_DMA_BURST 4
#define IMG_HASH_DMA_THRESHOLD 64
#ifdef __LITTLE_ENDIAN
@@ -102,8 +103,10 @@ struct img_hash_request_ctx {
unsigned long op;
size_t bufcnt;
- u8 buffer[0] __aligned(sizeof(u32));
struct ahash_request fallback_req;
+
+ /* Zero length buffer must remain last member of struct */
+ u8 buffer[0] __aligned(sizeof(u32));
};
struct img_hash_ctx {
@@ -340,7 +343,7 @@ static int img_hash_dma_init(struct img_hash_dev *hdev)
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->bus_addr;
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_conf.dst_maxburst = 16;
+ dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
dma_conf.device_fc = false;
err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
@@ -361,7 +364,7 @@ static void img_hash_dma_task(unsigned long d)
size_t nbytes, bleft, wsend, len, tbc;
struct scatterlist tsg;
- if (!ctx->sg)
+ if (!hdev->req || !ctx->sg)
return;
addr = sg_virt(ctx->sg);
@@ -587,6 +590,32 @@ static int img_hash_finup(struct ahash_request *req)
return crypto_ahash_finup(&rctx->fallback_req);
}
+static int img_hash_import(struct ahash_request *req, const void *in)
+{
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int img_hash_export(struct ahash_request *req, void *out)
+{
+ struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ rctx->fallback_req.base.flags = req->base.flags
+ & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
static int img_hash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -643,10 +672,9 @@ static int img_hash_digest(struct ahash_request *req)
return err;
}
-static int img_hash_cra_init(struct crypto_tfm *tfm)
+static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
{
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
int err = -ENOMEM;
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
@@ -658,6 +686,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct img_hash_request_ctx) +
+ crypto_ahash_reqsize(ctx->fallback) +
IMG_HASH_DMA_THRESHOLD);
return 0;
@@ -666,6 +695,26 @@ err:
return err;
}
+static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return img_hash_cra_init(tfm, "md5-generic");
+}
+
+static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return img_hash_cra_init(tfm, "sha1-generic");
+}
+
+static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return img_hash_cra_init(tfm, "sha224-generic");
+}
+
+static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return img_hash_cra_init(tfm, "sha256-generic");
+}
+
static void img_hash_cra_exit(struct crypto_tfm *tfm)
{
struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
@@ -711,9 +760,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
+ .export = img_hash_export,
+ .import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "img-md5",
@@ -723,7 +775,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
- .cra_init = img_hash_cra_init,
+ .cra_init = img_hash_cra_md5_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
@@ -734,9 +786,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
+ .export = img_hash_export,
+ .import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "img-sha1",
@@ -746,7 +801,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
- .cra_init = img_hash_cra_init,
+ .cra_init = img_hash_cra_sha1_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
@@ -757,9 +812,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
+ .export = img_hash_export,
+ .import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "img-sha224",
@@ -769,7 +827,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
- .cra_init = img_hash_cra_init,
+ .cra_init = img_hash_cra_sha224_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
@@ -780,9 +838,12 @@ static struct ahash_alg img_algs[] = {
.update = img_hash_update,
.final = img_hash_final,
.finup = img_hash_finup,
+ .export = img_hash_export,
+ .import = img_hash_import,
.digest = img_hash_digest,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "img-sha256",
@@ -792,7 +853,7 @@ static struct ahash_alg img_algs[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct img_hash_ctx),
- .cra_init = img_hash_cra_init,
+ .cra_init = img_hash_cra_sha256_init,
.cra_exit = img_hash_cra_exit,
.cra_module = THIS_MODULE,
}
@@ -971,7 +1032,7 @@ static int img_hash_probe(struct platform_device *pdev)
err = img_register_algs(hdev);
if (err)
goto err_algs;
- dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
+ dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
return 0;
@@ -1013,11 +1074,38 @@ static int img_hash_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int img_hash_suspend(struct device *dev)
+{
+ struct img_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hdev->hash_clk);
+ clk_disable_unprepare(hdev->sys_clk);
+
+ return 0;
+}
+
+static int img_hash_resume(struct device *dev)
+{
+ struct img_hash_dev *hdev = dev_get_drvdata(dev);
+
+ clk_prepare_enable(hdev->hash_clk);
+ clk_prepare_enable(hdev->sys_clk);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_hash_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
+};
+
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
.remove = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
+ .pm = &img_hash_pm_ops,
.of_match_table = of_match_ptr(img_hash_match),
}
};
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 2296934455fc..7868765a70c5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -447,9 +447,8 @@ static int init_ixp_crypto(struct device *dev)
if (!npe_running(npe_c)) {
ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
- if (ret) {
- return ret;
- }
+ if (ret)
+ goto npe_release;
if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
goto npe_error;
} else {
@@ -473,7 +472,8 @@ static int init_ixp_crypto(struct device *dev)
default:
printk(KERN_ERR "Firmware of %s lacks crypto support\n",
npe_name(npe_c));
- return -ENODEV;
+ ret = -ENODEV;
+ goto npe_release;
}
/* buffer_pool will also be used to sometimes store the hmac,
* so assure it is large enough
@@ -512,6 +512,7 @@ npe_error:
err:
dma_pool_destroy(ctx_pool);
dma_pool_destroy(buffer_pool);
+npe_release:
npe_release(npe_c);
return ret;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index d64af8625d7e..37dadb2a4feb 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
if (!req)
break;
+ ctx = crypto_tfm_ctx(req->tfm);
mv_cesa_complete_req(ctx, req, 0);
}
}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 82e0f4e6eb1c..9f284682c091 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -374,7 +374,7 @@ static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
.complete = mv_cesa_ahash_complete,
};
-static int mv_cesa_ahash_init(struct ahash_request *req,
+static void mv_cesa_ahash_init(struct ahash_request *req,
struct mv_cesa_op_ctx *tmpl, bool algo_le)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
@@ -390,8 +390,6 @@ static int mv_cesa_ahash_init(struct ahash_request *req,
creq->op_tmpl = *tmpl;
creq->len = 0;
creq->algo_le = algo_le;
-
- return 0;
}
static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
@@ -405,15 +403,16 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
+static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ bool cached = false;
- if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
- *cached = true;
+ if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) {
+ cached = true;
if (!req->nbytes)
- return 0;
+ return cached;
sg_pcopy_to_buffer(req->src, creq->src_nents,
creq->cache + creq->cache_ptr,
@@ -422,7 +421,7 @@ static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
creq->cache_ptr += req->nbytes;
}
- return 0;
+ return cached;
}
static struct mv_cesa_op_ctx *
@@ -455,7 +454,6 @@ mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
static int
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
- struct mv_cesa_ahash_dma_iter *dma_iter,
struct mv_cesa_ahash_req *creq,
gfp_t flags)
{
@@ -586,7 +584,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
* Add the cache (left-over data from a previous block) first.
* This will never overflow the SRAM size.
*/
- ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
+ ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
if (ret)
goto err_free_tdma;
@@ -668,7 +666,6 @@ err:
static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- int ret;
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
@@ -676,17 +673,15 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
return creq->src_nents;
}
- ret = mv_cesa_ahash_cache_req(req, cached);
- if (ret)
- return ret;
+ *cached = mv_cesa_ahash_cache_req(req);
if (*cached)
return 0;
if (cesa_dev->caps->has_tdma)
- ret = mv_cesa_ahash_dma_req_init(req);
-
- return ret;
+ return mv_cesa_ahash_dma_req_init(req);
+ else
+ return 0;
}
static int mv_cesa_ahash_queue_req(struct ahash_request *req)
@@ -805,13 +800,14 @@ static int mv_cesa_md5_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+
+ mv_cesa_ahash_init(req, &tmpl, true);
+
creq->state[0] = MD5_H0;
creq->state[1] = MD5_H1;
creq->state[2] = MD5_H2;
creq->state[3] = MD5_H3;
- mv_cesa_ahash_init(req, &tmpl, true);
-
return 0;
}
@@ -873,14 +869,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+
+ mv_cesa_ahash_init(req, &tmpl, false);
+
creq->state[0] = SHA1_H0;
creq->state[1] = SHA1_H1;
creq->state[2] = SHA1_H2;
creq->state[3] = SHA1_H3;
creq->state[4] = SHA1_H4;
- mv_cesa_ahash_init(req, &tmpl, false);
-
return 0;
}
@@ -942,6 +939,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+
+ mv_cesa_ahash_init(req, &tmpl, false);
+
creq->state[0] = SHA256_H0;
creq->state[1] = SHA256_H1;
creq->state[2] = SHA256_H2;
@@ -951,8 +951,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
creq->state[6] = SHA256_H6;
creq->state[7] = SHA256_H7;
- mv_cesa_ahash_init(req, &tmpl, false);
-
return 0;
}
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 86a065bcc187..9fd7a5fbaa1b 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -261,6 +261,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
tdma->op = op;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = cpu_to_le32(dma_handle);
+ tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6b658faef63..104e9ce9400a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1091,11 +1091,8 @@ static int mv_probe(struct platform_device *pdev)
cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
- if (pdev->dev.of_node)
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- else
- irq = platform_get_irq(pdev, 0);
- if (irq < 0 || irq == NO_IRQ) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
ret = irq;
goto err;
}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ff383ef83871..ee4be1b0d30b 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -668,7 +668,9 @@ static int mxc_scc_probe(struct platform_device *pdev)
return PTR_ERR(scc->clk);
}
- clk_prepare_enable(scc->clk);
+ ret = clk_prepare_enable(scc->clk);
+ if (ret)
+ return ret;
/* clear error status register */
writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 4ab53a604312..fe32dd95ae4f 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -35,7 +35,8 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -85,6 +86,8 @@
#define AES_REG_IRQ_DATA_OUT BIT(2)
#define DEFAULT_TIMEOUT (5*HZ)
+#define DEFAULT_AUTOSUSPEND_DELAY 1000
+
#define FLAGS_MODE_MASK 0x000f
#define FLAGS_ENCRYPT BIT(0)
#define FLAGS_CBC BIT(1)
@@ -103,6 +106,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
unsigned long flags;
+ struct crypto_skcipher *fallback;
};
struct omap_aes_reqctx {
@@ -238,11 +242,19 @@ static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
static int omap_aes_hw_init(struct omap_aes_dev *dd)
{
+ int err;
+
if (!(dd->flags & FLAGS_INIT)) {
dd->flags |= FLAGS_INIT;
dd->err = 0;
}
+ err = pm_runtime_get_sync(dd->dev);
+ if (err < 0) {
+ dev_err(dd->dev, "failed to get sync: %d\n", err);
+ return err;
+ }
+
return 0;
}
@@ -319,20 +331,12 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd)
static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
{
- struct omap_aes_dev *dd = NULL, *tmp;
+ struct omap_aes_dev *dd;
spin_lock_bh(&list_lock);
- if (!ctx->dd) {
- list_for_each_entry(tmp, &dev_list, list) {
- /* FIXME: take fist available aes core */
- dd = tmp;
- break;
- }
- ctx->dd = dd;
- } else {
- /* already found before */
- dd = ctx->dd;
- }
+ dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
+ list_move_tail(&dd->list, &dev_list);
+ ctx->dd = dd;
spin_unlock_bh(&list_lock);
return dd;
@@ -519,7 +523,10 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- crypto_finalize_request(dd->engine, req, err);
+ crypto_finalize_cipher_request(dd->engine, req, err);
+
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
}
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -592,7 +599,7 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_request_to_engine(dd->engine, req);
+ return crypto_transfer_cipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -602,7 +609,7 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
- struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+ struct omap_aes_dev *dd = ctx->dd;
struct omap_aes_reqctx *rctx;
if (!dd)
@@ -648,7 +655,7 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
- struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+ struct omap_aes_dev *dd = ctx->dd;
if (!dd)
return -ENODEV;
@@ -696,11 +703,29 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
crypto_ablkcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
struct omap_aes_dev *dd;
+ int ret;
pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
!!(mode & FLAGS_ENCRYPT),
!!(mode & FLAGS_CBC));
+ if (req->nbytes < 200) {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+
+ if (mode & FLAGS_ENCRYPT)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+ return ret;
+ }
dd = omap_aes_find_dev(ctx);
if (!dd)
return -ENODEV;
@@ -716,6 +741,7 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ int ret;
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256)
@@ -726,6 +752,14 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (!ret)
+ return 0;
+
return 0;
}
@@ -761,22 +795,16 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
- struct omap_aes_dev *dd = NULL;
- int err;
+ const char *name = crypto_tfm_alg_name(tfm);
+ const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+ struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_skcipher *blk;
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
+ blk = crypto_alloc_skcipher(name, 0, flags);
+ if (IS_ERR(blk))
+ return PTR_ERR(blk);
- err = pm_runtime_get_sync(dd->dev);
- if (err < 0) {
- dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
- __func__, err);
- return err;
- }
+ ctx->fallback = blk;
tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
@@ -785,16 +813,12 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
static void omap_aes_cra_exit(struct crypto_tfm *tfm)
{
- struct omap_aes_dev *dd = NULL;
+ struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- /* Find AES device, currently picks the first device */
- spin_lock_bh(&list_lock);
- list_for_each_entry(dd, &dev_list, list) {
- break;
- }
- spin_unlock_bh(&list_lock);
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
- pm_runtime_put_sync(dd->dev);
+ ctx->fallback = NULL;
}
/* ********************** ALGS ************************************ */
@@ -806,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_aes_ctx),
.cra_alignmask = 0,
@@ -828,7 +852,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_aes_ctx),
.cra_alignmask = 0,
@@ -854,7 +878,7 @@ static struct crypto_alg algs_ctr[] = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_aes_ctx),
.cra_alignmask = 0,
@@ -1140,6 +1164,9 @@ static int omap_aes_probe(struct platform_device *pdev)
}
dd->phys_base = res.start;
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
if (err < 0) {
@@ -1186,6 +1213,19 @@ static int omap_aes_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
+ /* Initialize crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine) {
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ dd->engine->prepare_cipher_request = omap_aes_prepare_req;
+ dd->engine->cipher_one_request = omap_aes_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
for (i = 0; i < dd->pdata->algs_info_size; i++) {
if (!dd->pdata->algs_info[i].registered) {
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
@@ -1203,26 +1243,17 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
- /* Initialize crypto engine */
- dd->engine = crypto_engine_alloc_init(dev, 1);
- if (!dd->engine)
- goto err_algs;
-
- dd->engine->prepare_request = omap_aes_prepare_req;
- dd->engine->crypt_one_request = omap_aes_crypt_req;
- err = crypto_engine_start(dd->engine);
- if (err)
- goto err_engine;
-
return 0;
-err_engine:
- crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
+err_engine:
+ if (dd->engine)
+ crypto_engine_exit(dd->engine);
+
omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 5691434ffb2d..a6f65532fd16 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -39,6 +39,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/des.h>
#include <crypto/algapi.h>
+#include <crypto/engine.h>
#define DST_MAXBURST 2
@@ -506,7 +507,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err);
pm_runtime_put(dd->dev);
- crypto_finalize_request(dd->engine, req, err);
+ crypto_finalize_cipher_request(dd->engine, req, err);
}
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@@ -574,7 +575,7 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
struct ablkcipher_request *req)
{
if (req)
- return crypto_transfer_request_to_engine(dd->engine, req);
+ return crypto_transfer_cipher_request_to_engine(dd->engine, req);
return 0;
}
@@ -1078,6 +1079,19 @@ static int omap_des_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
+ /* Initialize des crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine) {
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ dd->engine->prepare_cipher_request = omap_des_prepare_req;
+ dd->engine->cipher_one_request = omap_des_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
for (i = 0; i < dd->pdata->algs_info_size; i++) {
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
@@ -1093,27 +1107,18 @@ static int omap_des_probe(struct platform_device *pdev)
}
}
- /* Initialize des crypto engine */
- dd->engine = crypto_engine_alloc_init(dev, 1);
- if (!dd->engine)
- goto err_algs;
-
- dd->engine->prepare_request = omap_des_prepare_req;
- dd->engine->crypt_one_request = omap_des_crypt_req;
- err = crypto_engine_start(dd->engine);
- if (err)
- goto err_engine;
-
return 0;
-err_engine:
- crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
+err_engine:
+ if (dd->engine)
+ crypto_engine_exit(dd->engine);
+
omap_des_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 7fe4eef12fe2..d0b16e5e4ee5 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -112,9 +112,10 @@
#define FLAGS_DMA_READY 6
#define FLAGS_AUTO_XOR 7
#define FLAGS_BE32_SHA1 8
+#define FLAGS_SGS_COPIED 9
+#define FLAGS_SGS_ALLOCED 10
/* context flags */
#define FLAGS_FINUP 16
-#define FLAGS_SG 17
#define FLAGS_MODE_SHIFT 18
#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
@@ -134,7 +135,8 @@
#define OMAP_ALIGN_MASK (sizeof(u32)-1)
#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
-#define BUFLEN PAGE_SIZE
+#define BUFLEN SHA512_BLOCK_SIZE
+#define OMAP_SHA_DMA_THRESHOLD 256
struct omap_sham_dev;
@@ -147,12 +149,12 @@ struct omap_sham_reqctx {
size_t digcnt;
size_t bufcnt;
size_t buflen;
- dma_addr_t dma_addr;
/* walk state */
struct scatterlist *sg;
- struct scatterlist sgl;
- unsigned int offset; /* offset in current sg */
+ struct scatterlist sgl[2];
+ int offset; /* offset in current sg */
+ int sg_len;
unsigned int total; /* total request */
u8 buffer[0] OMAP_ALIGNED;
@@ -223,6 +225,7 @@ struct omap_sham_dev {
struct dma_chan *dma_lch;
struct tasklet_struct done_task;
u8 polling_mode;
+ u8 xmit_buf[BUFLEN];
unsigned long flags;
struct crypto_queue queue;
@@ -510,12 +513,14 @@ static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
SHA_REG_IRQSTATUS_INPUT_RDY);
}
-static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
- size_t length, int final)
+static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
+ int final)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
int count, len32, bs32, offset = 0;
- const u32 *buffer = (const u32 *)buf;
+ const u32 *buffer;
+ int mlen;
+ struct sg_mapping_iter mi;
dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
@@ -525,6 +530,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
/* should be non-zero before next lines to disable clocks later */
ctx->digcnt += length;
+ ctx->total -= length;
if (final)
set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
@@ -534,16 +540,35 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
len32 = DIV_ROUND_UP(length, sizeof(u32));
bs32 = get_block_size(ctx) / sizeof(u32);
+ sg_miter_start(&mi, ctx->sg, ctx->sg_len,
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+
+ mlen = 0;
+
while (len32) {
if (dd->pdata->poll_irq(dd))
return -ETIMEDOUT;
- for (count = 0; count < min(len32, bs32); count++, offset++)
+ for (count = 0; count < min(len32, bs32); count++, offset++) {
+ if (!mlen) {
+ sg_miter_next(&mi);
+ mlen = mi.length;
+ if (!mlen) {
+ pr_err("sg miter failure.\n");
+ return -EINVAL;
+ }
+ offset = 0;
+ buffer = mi.addr;
+ }
omap_sham_write(dd, SHA_REG_DIN(dd, count),
buffer[offset]);
+ mlen -= 4;
+ }
len32 -= min(len32, bs32);
}
+ sg_miter_stop(&mi);
+
return -EINPROGRESS;
}
@@ -555,22 +580,27 @@ static void omap_sham_dma_callback(void *param)
tasklet_schedule(&dd->done_task);
}
-static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
- size_t length, int final, int is_sg)
+static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
+ int final)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
struct dma_async_tx_descriptor *tx;
struct dma_slave_config cfg;
- int len32, ret, dma_min = get_block_size(ctx);
+ int ret;
dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
ctx->digcnt, length, final);
+ if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
memset(&cfg, 0, sizeof(cfg));
cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(dd->dma_lch, &cfg);
if (ret) {
@@ -578,30 +608,12 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
return ret;
}
- len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
-
- if (is_sg) {
- /*
- * The SG entry passed in may not have the 'length' member
- * set correctly so use a local SG entry (sgl) with the
- * proper value for 'length' instead. If this is not done,
- * the dmaengine may try to DMA the incorrect amount of data.
- */
- sg_init_table(&ctx->sgl, 1);
- sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
- ctx->sgl.offset = ctx->sg->offset;
- sg_dma_len(&ctx->sgl) = len32;
- sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
-
- tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- } else {
- tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- }
+ tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx) {
- dev_err(dd->dev, "prep_slave_sg/single() failed\n");
+ dev_err(dd->dev, "prep_slave_sg failed\n");
return -EINVAL;
}
@@ -611,6 +623,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
dd->pdata->write_ctrl(dd, length, final, 1);
ctx->digcnt += length;
+ ctx->total -= length;
if (final)
set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
@@ -625,189 +638,257 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
return -EINPROGRESS;
}
-static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
- const u8 *data, size_t length)
+static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
+ struct scatterlist *sg, int bs, int new_len)
{
- size_t count = min(length, ctx->buflen - ctx->bufcnt);
+ int n = sg_nents(sg);
+ struct scatterlist *tmp;
+ int offset = ctx->offset;
- count = min(count, ctx->total);
- if (count <= 0)
- return 0;
- memcpy(ctx->buffer + ctx->bufcnt, data, count);
- ctx->bufcnt += count;
+ if (ctx->bufcnt)
+ n++;
- return count;
-}
+ ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+ if (!ctx->sg)
+ return -ENOMEM;
-static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
-{
- size_t count;
- const u8 *vaddr;
+ sg_init_table(ctx->sg, n);
- while (ctx->sg) {
- vaddr = kmap_atomic(sg_page(ctx->sg));
- vaddr += ctx->sg->offset;
+ tmp = ctx->sg;
- count = omap_sham_append_buffer(ctx,
- vaddr + ctx->offset,
- ctx->sg->length - ctx->offset);
+ ctx->sg_len = 0;
- kunmap_atomic((void *)vaddr);
+ if (ctx->bufcnt) {
+ sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ }
- if (!count)
- break;
- ctx->offset += count;
- ctx->total -= count;
- if (ctx->offset == ctx->sg->length) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- else
- ctx->total = 0;
+ while (sg && new_len) {
+ int len = sg->length - offset;
+
+ if (offset) {
+ offset -= sg->length;
+ if (offset < 0)
+ offset = 0;
+ }
+
+ if (new_len < len)
+ len = new_len;
+
+ if (len > 0) {
+ new_len -= len;
+ sg_set_page(tmp, sg_page(sg), len, sg->offset);
+ if (new_len <= 0)
+ sg_mark_end(tmp);
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
}
+
+ sg = sg_next(sg);
}
+ set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
+
+ ctx->bufcnt = 0;
+
return 0;
}
-static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
- struct omap_sham_reqctx *ctx,
- size_t length, int final)
+static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
+ struct scatterlist *sg, int bs, int new_len)
{
- int ret;
+ int pages;
+ void *buf;
+ int len;
- ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
- dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
- return -EINVAL;
+ len = new_len + ctx->bufcnt;
+
+ pages = get_order(ctx->total);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ pr_err("Couldn't allocate pages for unaligned cases.\n");
+ return -ENOMEM;
}
- ctx->flags &= ~BIT(FLAGS_SG);
+ if (ctx->bufcnt)
+ memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
- ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
- if (ret != -EINPROGRESS)
- dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
- DMA_TO_DEVICE);
+ scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
+ ctx->total - ctx->bufcnt, 0);
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, buf, len);
+ ctx->sg = ctx->sgl;
+ set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
+ ctx->sg_len = 1;
+ ctx->bufcnt = 0;
+ ctx->offset = 0;
- return ret;
+ return 0;
}
-static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
+static int omap_sham_align_sgs(struct scatterlist *sg,
+ int nbytes, int bs, bool final,
+ struct omap_sham_reqctx *rctx)
{
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- unsigned int final;
- size_t count;
+ int n = 0;
+ bool aligned = true;
+ bool list_ok = true;
+ struct scatterlist *sg_tmp = sg;
+ int new_len;
+ int offset = rctx->offset;
- omap_sham_append_sg(ctx);
+ if (!sg || !sg->length || !nbytes)
+ return 0;
+
+ new_len = nbytes;
- final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
+ if (offset)
+ list_ok = false;
- dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
- ctx->bufcnt, ctx->digcnt, final);
+ if (final)
+ new_len = DIV_ROUND_UP(new_len, bs) * bs;
+ else
+ new_len = new_len / bs * bs;
- if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
- count = ctx->bufcnt;
- ctx->bufcnt = 0;
- return omap_sham_xmit_dma_map(dd, ctx, count, final);
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+
+ if (offset < sg_tmp->length) {
+ if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
+ aligned = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
+ aligned = false;
+ break;
+ }
+ }
+
+ if (offset) {
+ offset -= sg_tmp->length;
+ if (offset < 0) {
+ nbytes += offset;
+ offset = 0;
+ }
+ } else {
+ nbytes -= sg_tmp->length;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+
+ if (nbytes < 0) {
+ list_ok = false;
+ break;
+ }
}
+ if (!aligned)
+ return omap_sham_copy_sgs(rctx, sg, bs, new_len);
+ else if (!list_ok)
+ return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
+
+ rctx->sg_len = n;
+ rctx->sg = sg;
+
return 0;
}
-/* Start address alignment */
-#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
-/* SHA1 block size alignment */
-#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
-
-static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
+static int omap_sham_prepare_request(struct ahash_request *req, bool update)
{
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- unsigned int length, final, tail;
- struct scatterlist *sg;
- int ret, bs;
+ struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+ int bs;
+ int ret;
+ int nbytes;
+ bool final = rctx->flags & BIT(FLAGS_FINUP);
+ int xmit_len, hash_later;
- if (!ctx->total)
+ if (!req)
return 0;
- if (ctx->bufcnt || ctx->offset)
- return omap_sham_update_dma_slow(dd);
-
- /*
- * Don't use the sg interface when the transfer size is less
- * than the number of elements in a DMA frame. Otherwise,
- * the dmaengine infrastructure will calculate that it needs
- * to transfer 0 frames which ultimately fails.
- */
- if (ctx->total < get_block_size(ctx))
- return omap_sham_update_dma_slow(dd);
-
- dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
- ctx->digcnt, ctx->bufcnt, ctx->total);
+ bs = get_block_size(rctx);
- sg = ctx->sg;
- bs = get_block_size(ctx);
+ if (update)
+ nbytes = req->nbytes;
+ else
+ nbytes = 0;
- if (!SG_AA(sg))
- return omap_sham_update_dma_slow(dd);
+ rctx->total = nbytes + rctx->bufcnt;
- if (!sg_is_last(sg) && !SG_SA(sg, bs))
- /* size is not BLOCK_SIZE aligned */
- return omap_sham_update_dma_slow(dd);
+ if (!rctx->total)
+ return 0;
- length = min(ctx->total, sg->length);
+ if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
+ int len = bs - rctx->bufcnt % bs;
- if (sg_is_last(sg)) {
- if (!(ctx->flags & BIT(FLAGS_FINUP))) {
- /* not last sg must be BLOCK_SIZE aligned */
- tail = length & (bs - 1);
- /* without finup() we need one block to close hash */
- if (!tail)
- tail = bs;
- length -= tail;
- }
+ if (len > nbytes)
+ len = nbytes;
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
+ 0, len, 0);
+ rctx->bufcnt += len;
+ nbytes -= len;
+ rctx->offset = len;
}
- if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
- dev_err(dd->dev, "dma_map_sg error\n");
- return -EINVAL;
- }
+ if (rctx->bufcnt)
+ memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
- ctx->flags |= BIT(FLAGS_SG);
+ ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
+ if (ret)
+ return ret;
- ctx->total -= length;
- ctx->offset = length; /* offset where to start slow */
+ xmit_len = rctx->total;
- final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
+ if (!IS_ALIGNED(xmit_len, bs)) {
+ if (final)
+ xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
+ else
+ xmit_len = xmit_len / bs * bs;
+ }
- ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
- if (ret != -EINPROGRESS)
- dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+ hash_later = rctx->total - xmit_len;
+ if (hash_later < 0)
+ hash_later = 0;
- return ret;
-}
+ if (rctx->bufcnt && nbytes) {
+ /* have data from previous operation and current */
+ sg_init_table(rctx->sgl, 2);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
-static int omap_sham_update_cpu(struct omap_sham_dev *dd)
-{
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- int bufcnt, final;
+ sg_chain(rctx->sgl, 2, req->src);
- if (!ctx->total)
- return 0;
+ rctx->sg = rctx->sgl;
- omap_sham_append_sg(ctx);
+ rctx->sg_len++;
+ } else if (rctx->bufcnt) {
+ /* have buffered data only */
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
- final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
+ rctx->sg = rctx->sgl;
- dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
- ctx->bufcnt, ctx->digcnt, final);
+ rctx->sg_len = 1;
+ }
- if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
- bufcnt = ctx->bufcnt;
- ctx->bufcnt = 0;
- return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
+ if (hash_later) {
+ if (req->nbytes) {
+ scatterwalk_map_and_copy(rctx->buffer, req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+ } else {
+ memcpy(rctx->buffer, rctx->buffer + xmit_len,
+ hash_later);
+ }
+ rctx->bufcnt = hash_later;
+ } else {
+ rctx->bufcnt = 0;
}
+ if (!final)
+ rctx->total = xmit_len;
+
return 0;
}
@@ -815,18 +896,9 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
+ dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
- if (ctx->flags & BIT(FLAGS_SG)) {
- dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
- if (ctx->sg->length == ctx->offset) {
- ctx->sg = sg_next(ctx->sg);
- if (ctx->sg)
- ctx->offset = 0;
- }
- } else {
- dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
- DMA_TO_DEVICE);
- }
+ clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
return 0;
}
@@ -887,6 +959,8 @@ static int omap_sham_init(struct ahash_request *req)
ctx->bufcnt = 0;
ctx->digcnt = 0;
+ ctx->total = 0;
+ ctx->offset = 0;
ctx->buflen = BUFLEN;
if (tctx->flags & BIT(FLAGS_HMAC)) {
@@ -909,14 +983,19 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct ahash_request *req = dd->req;
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
+ bool final = ctx->flags & BIT(FLAGS_FINUP);
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
+ if (ctx->total < get_block_size(ctx) ||
+ ctx->total < OMAP_SHA_DMA_THRESHOLD)
+ ctx->flags |= BIT(FLAGS_CPU);
+
if (ctx->flags & BIT(FLAGS_CPU))
- err = omap_sham_update_cpu(dd);
+ err = omap_sham_xmit_cpu(dd, ctx->total, final);
else
- err = omap_sham_update_dma_start(dd);
+ err = omap_sham_xmit_dma(dd, ctx->total, final);
/* wait for dma completion before can take more data */
dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -930,7 +1009,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err = 0, use_dma = 1;
- if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
+ if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
/*
* faster to handle last block with cpu or
* use cpu when dma is not present.
@@ -938,9 +1017,9 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
use_dma = 0;
if (use_dma)
- err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
+ err = omap_sham_xmit_dma(dd, ctx->total, 1);
else
- err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
+ err = omap_sham_xmit_cpu(dd, ctx->total, 1);
ctx->bufcnt = 0;
@@ -988,6 +1067,17 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_dev *dd = ctx->dd;
+ if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+ free_pages((unsigned long)sg_virt(ctx->sg),
+ get_order(ctx->sg->length));
+
+ if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+ kfree(ctx->sg);
+
+ ctx->sg = NULL;
+
+ dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+
if (!err) {
dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
@@ -1005,9 +1095,6 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
if (req->base.complete)
req->base.complete(&req->base, err);
-
- /* handle new request */
- tasklet_schedule(&dd->done_task);
}
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
@@ -1018,6 +1105,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
unsigned long flags;
int err = 0, ret = 0;
+retry:
spin_lock_irqsave(&dd->lock, flags);
if (req)
ret = ahash_enqueue_request(&dd->queue, req);
@@ -1041,6 +1129,10 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
dd->req = req;
ctx = ahash_request_ctx(req);
+ err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
+ if (err)
+ goto err1;
+
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
@@ -1061,11 +1153,19 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
err = omap_sham_final_req(dd);
}
err1:
- if (err != -EINPROGRESS)
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ if (err != -EINPROGRESS) {
/* done_task will not finish it, so do it here */
omap_sham_finish_req(req, err);
+ req = NULL;
- dev_dbg(dd->dev, "exit, err: %d\n", err);
+ /*
+ * Execute next request immediately if there is anything
+ * in queue.
+ */
+ goto retry;
+ }
return ret;
}
@@ -1085,34 +1185,15 @@ static int omap_sham_update(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
struct omap_sham_dev *dd = ctx->dd;
- int bs = get_block_size(ctx);
if (!req->nbytes)
return 0;
- ctx->total = req->nbytes;
- ctx->sg = req->src;
- ctx->offset = 0;
-
- if (ctx->flags & BIT(FLAGS_FINUP)) {
- if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
- /*
- * OMAP HW accel works only with buffers >= 9
- * will switch to bypass in final()
- * final has the same request and data
- */
- omap_sham_append_sg(ctx);
- return 0;
- } else if ((ctx->bufcnt + ctx->total <= bs) ||
- dd->polling_mode) {
- /*
- * faster to use CPU for short transfers or
- * use cpu when dma is not present.
- */
- ctx->flags |= BIT(FLAGS_CPU);
- }
- } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
- omap_sham_append_sg(ctx);
+ if (ctx->total + req->nbytes < ctx->buflen) {
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ ctx->bufcnt += req->nbytes;
+ ctx->total += req->nbytes;
return 0;
}
@@ -1137,9 +1218,20 @@ static int omap_sham_final_shash(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ int offset = 0;
+
+ /*
+ * If we are running HMAC on limited hardware support, skip
+ * the ipad in the beginning of the buffer if we are going for
+ * software fallback algorithm.
+ */
+ if (test_bit(FLAGS_HMAC, &ctx->flags) &&
+ !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
+ offset = get_block_size(ctx);
return omap_sham_shash_digest(tctx->fallback, req->base.flags,
- ctx->buffer, ctx->bufcnt, req->result);
+ ctx->buffer + offset,
+ ctx->bufcnt - offset, req->result);
}
static int omap_sham_final(struct ahash_request *req)
@@ -1154,10 +1246,11 @@ static int omap_sham_final(struct ahash_request *req)
/*
* OMAP HW accel works only with buffers >= 9.
* HMAC is always >= 9 because ipad == block size.
- * If buffersize is less than 240, we use fallback SW encoding,
- * as using DMA + HW in this case doesn't provide any benefit.
+ * If buffersize is less than DMA_THRESHOLD, we use fallback
+ * SW encoding, as using DMA + HW in this case doesn't provide
+ * any benefit.
*/
- if ((ctx->digcnt + ctx->bufcnt) < 240)
+ if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
@@ -1323,6 +1416,25 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
}
}
+static int omap_sham_export(struct ahash_request *req, void *out)
+{
+ struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
+
+ return 0;
+}
+
+static int omap_sham_import(struct ahash_request *req, const void *in)
+{
+ struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
+ const struct omap_sham_reqctx *ctx_in = in;
+
+ memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
+
+ return 0;
+}
+
static struct ahash_alg algs_sha1_md5[] = {
{
.init = omap_sham_init,
@@ -1341,7 +1453,7 @@ static struct ahash_alg algs_sha1_md5[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1440,7 +1552,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1462,7 +1574,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1535,7 +1647,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1557,7 +1669,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = 0,
+ .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1624,12 +1736,8 @@ static void omap_sham_done_task(unsigned long data)
}
if (test_bit(FLAGS_CPU, &dd->flags)) {
- if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
- /* hash or semi-hash ready */
- err = omap_sham_update_cpu(dd);
- if (err != -EINPROGRESS)
- goto finish;
- }
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
+ goto finish;
} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
omap_sham_update_dma_stop(dd);
@@ -1641,8 +1749,6 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
/* hash or semi-hash ready */
clear_bit(FLAGS_DMA_READY, &dd->flags);
- err = omap_sham_update_dma_start(dd);
- if (err != -EINPROGRESS)
goto finish;
}
}
@@ -1653,6 +1759,10 @@ finish:
dev_dbg(dd->dev, "update done: err: %d\n", err);
/* finish curent request */
omap_sham_finish_req(dd->req, err);
+
+ /* If we are not busy, process next req */
+ if (!test_bit(FLAGS_BUSY, &dd->flags))
+ omap_sham_handle_queue(dd, NULL);
}
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
@@ -1977,8 +2087,14 @@ static int omap_sham_probe(struct platform_device *pdev)
for (i = 0; i < dd->pdata->algs_info_size; i++) {
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
- err = crypto_register_ahash(
- &dd->pdata->algs_info[i].algs_list[j]);
+ struct ahash_alg *alg;
+
+ alg = &dd->pdata->algs_info[i].algs_list[j];
+ alg->export = omap_sham_export;
+ alg->import = omap_sham_import;
+ alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
+ BUFLEN;
+ err = crypto_register_ahash(alg);
if (err)
goto err_algs;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 2f2681d3458a..afc9a0a86747 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -55,7 +55,7 @@
#define ADF_C3XXX_MAX_ACCELERATORS 3
#define ADF_C3XXX_MAX_ACCELENGINES 6
#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
-#define ADF_C3XXX_ACCELERATORS_MASK 0x3
+#define ADF_C3XXX_ACCELERATORS_MASK 0x7
#define ADF_C3XXX_ACCELENGINES_MASK 0x3F
#define ADF_C3XXX_ETR_MAX_BANKS 16
#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index ce7c4626c983..3744b22f0c46 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -146,6 +146,7 @@ struct adf_admin_comms {
dma_addr_t phy_addr;
dma_addr_t const_tbl_addr;
void *virt_addr;
+ void *virt_tbl_addr;
void __iomem *mailbox_addr;
struct mutex lock; /* protects adf_admin_comms struct */
};
@@ -251,17 +252,19 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
return -ENOMEM;
}
- admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
- (void *) const_tab, 1024,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
- admin->const_tbl_addr))) {
+ admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev),
+ PAGE_SIZE,
+ &admin->const_tbl_addr,
+ GFP_KERNEL);
+ if (!admin->virt_tbl_addr) {
+ dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
admin->virt_addr, admin->phy_addr);
kfree(admin);
return -ENOMEM;
}
+
+ memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
reg_val = (u64)admin->phy_addr;
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
@@ -282,9 +285,10 @@ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
if (admin->virt_addr)
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
admin->virt_addr, admin->phy_addr);
+ if (admin->virt_tbl_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ admin->virt_tbl_addr, admin->const_tbl_addr);
- dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
- DMA_TO_DEVICE);
mutex_destroy(&admin->lock);
kfree(admin);
accel_dev->admin = NULL;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 9b961b37a282..e2454d90d949 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -967,10 +967,6 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae;
- obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
- GFP_KERNEL);
- if (!obj_handle->uword_buf)
- return -ENOMEM;
obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
obj_handle->obj_hdr->file_buff;
@@ -982,6 +978,10 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
pr_err("QAT: UOF incompatible\n");
return -EINVAL;
}
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!obj_handle->uword_buf)
+ return -ENOMEM;
obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
if (!obj_handle->obj_hdr->file_buff ||
!qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index af508258d2ea..d0f80c6241f9 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -304,11 +304,9 @@ static int rk_crypto_probe(struct platform_device *pdev)
usleep_range(10, 20);
reset_control_deassert(crypto_info->rst);
- err = devm_add_action(dev, rk_crypto_action, crypto_info);
- if (err) {
- reset_control_assert(crypto_info->rst);
+ err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
+ if (err)
goto err_crypto;
- }
spin_lock_init(&crypto_info->lock);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 3830d7c4e138..90efd10d57a1 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -29,7 +29,8 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
u32 tx_cnt = 0;
u32 spaces;
u32 v;
- int i, err = 0;
+ int err = 0;
+ unsigned int i;
unsigned int ileft = areq->nbytes;
unsigned int oleft = areq->nbytes;
unsigned int todo;
@@ -139,7 +140,8 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
u32 tx_cnt = 0;
u32 v;
u32 spaces;
- int i, err = 0;
+ int err = 0;
+ unsigned int i;
unsigned int ileft = areq->nbytes;
unsigned int oleft = areq->nbytes;
unsigned int todo;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 107cd2a41cae..3ac6c6c4ad18 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -172,45 +172,45 @@ static struct sun4i_ss_alg_template ss_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-sun4i-ss",
- .cra_priority = 300,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_ctxsize = sizeof(struct sun4i_req_ctx),
- .cra_module = THIS_MODULE,
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = sun4i_ss_cipher_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = sun4i_ss_des3_setkey,
- .encrypt = sun4i_ss_cbc_des3_encrypt,
- .decrypt = sun4i_ss_cbc_des3_decrypt,
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_cbc_des3_encrypt,
+ .decrypt = sun4i_ss_cbc_des3_decrypt,
}
}
},
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-sun4i-ss",
- .cra_priority = 300,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_ctxsize = sizeof(struct sun4i_req_ctx),
- .cra_module = THIS_MODULE,
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = sun4i_ss_cipher_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = sun4i_ss_des3_setkey,
- .encrypt = sun4i_ss_ecb_des3_encrypt,
- .decrypt = sun4i_ss_ecb_des3_decrypt,
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-sun4i-ss",
+ .cra_priority = 300,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_ctxsize = sizeof(struct sun4i_req_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = sun4i_ss_cipher_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = sun4i_ss_des3_setkey,
+ .encrypt = sun4i_ss_ecb_des3_encrypt,
+ .decrypt = sun4i_ss_ecb_des3_decrypt,
}
}
},
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index ff8031498809..0de2f62d51ff 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -20,6 +20,15 @@
int sun4i_hash_crainit(struct crypto_tfm *tfm)
{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun4i_ss_alg_template *algt;
+
+ memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+ op->ss = algt->ss;
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sun4i_req_ctx));
return 0;
@@ -32,13 +41,10 @@ int sun4i_hash_init(struct ahash_request *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun4i_ss_alg_template *algt;
- struct sun4i_ss_ctx *ss;
memset(op, 0, sizeof(struct sun4i_req_ctx));
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
- ss = algt->ss;
- op->ss = algt->ss;
op->mode = algt->mode;
return 0;
@@ -129,6 +135,9 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
return 0;
}
+#define SS_HASH_UPDATE 1
+#define SS_HASH_FINAL 2
+
/*
* sun4i_hash_update: update hash engine
*
@@ -156,7 +165,7 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
* write remaining data in op->buf
* final state op->len=56
*/
-int sun4i_hash_update(struct ahash_request *areq)
+static int sun4i_hash(struct ahash_request *areq)
{
u32 v, ivmode = 0;
unsigned int i = 0;
@@ -167,8 +176,9 @@ int sun4i_hash_update(struct ahash_request *areq)
*/
struct sun4i_req_ctx *op = ahash_request_ctx(areq);
- struct sun4i_ss_ctx *ss = op->ss;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+ struct sun4i_ss_ctx *ss = tfmctx->ss;
unsigned int in_i = 0; /* advancement in the current SG */
unsigned int end;
/*
@@ -180,22 +190,30 @@ int sun4i_hash_update(struct ahash_request *areq)
u32 spaces, rx_cnt = SS_RX_DEFAULT;
size_t copied = 0;
struct sg_mapping_iter mi;
+ unsigned int j = 0;
+ int zeros;
+ unsigned int index, padlen;
+ __be64 bits;
+ u32 bf[32];
+ u32 wb = 0;
+ unsigned int nwait, nbw = 0;
+ struct scatterlist *in_sg = areq->src;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
op->byte_count, areq->nbytes, op->mode,
op->len, op->hash[0]);
- if (areq->nbytes == 0)
+ if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0)
return 0;
/* protect against overflow */
- if (areq->nbytes > UINT_MAX - op->len) {
+ if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
dev_err(ss->dev, "Cannot process too large request\n");
return -EINVAL;
}
- if (op->len + areq->nbytes < 64) {
+ if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) {
/* linearize data to op->buf */
copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
op->buf + op->len, areq->nbytes, 0);
@@ -203,14 +221,6 @@ int sun4i_hash_update(struct ahash_request *areq)
return 0;
}
- end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
-
- if (end > areq->nbytes || areq->nbytes - end > 63) {
- dev_err(ss->dev, "ERROR: Bound error %u %u\n",
- end, areq->nbytes);
- return -EINVAL;
- }
-
spin_lock_bh(&ss->slock);
/*
@@ -225,6 +235,34 @@ int sun4i_hash_update(struct ahash_request *areq)
/* Enable the device */
writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+ if ((op->flags & SS_HASH_UPDATE) == 0)
+ goto hash_final;
+
+ /* start of handling data */
+ if ((op->flags & SS_HASH_FINAL) == 0) {
+ end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+ if (end > areq->nbytes || areq->nbytes - end > 63) {
+ dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+ end, areq->nbytes);
+ err = -EINVAL;
+ goto release_ss;
+ }
+ } else {
+ /* Since we have the flag final, we can go up to modulo 4 */
+ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ }
+
+ /* TODO if SGlen % 4 and op->len == 0 then DMA */
+ i = 1;
+ while (in_sg && i == 1) {
+ if ((in_sg->length % 4) != 0)
+ i = 0;
+ in_sg = sg_next(in_sg);
+ }
+ if (i == 1 && op->len == 0)
+ dev_dbg(ss->dev, "We can DMA\n");
+
i = 0;
sg_miter_start(&mi, areq->src, sg_nents(areq->src),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
@@ -285,7 +323,11 @@ int sun4i_hash_update(struct ahash_request *areq)
}
}
} while (i < end);
- /* final linear */
+
+ /*
+ * Now we have written to the device all that we can,
+ * store the remaining bytes in op->buf
+ */
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
@@ -304,13 +346,21 @@ int sun4i_hash_update(struct ahash_request *areq)
sg_miter_stop(&mi);
+ /*
+ * End of data process
+ * Now if we have the flag final go to finalize part
+ * If not, store the partial hash
+ */
+ if ((op->flags & SS_HASH_FINAL) > 0)
+ goto hash_final;
+
writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
i = 0;
do {
v = readl(ss->base + SS_CTL);
i++;
} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
- if (i >= SS_TIMEOUT) {
+ if (unlikely(i >= SS_TIMEOUT)) {
dev_err_ratelimited(ss->dev,
"ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
i, SS_TIMEOUT, v, areq->nbytes);
@@ -318,56 +368,24 @@ int sun4i_hash_update(struct ahash_request *areq)
goto release_ss;
}
- /* get the partial hash only if something was written */
for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
-release_ss:
- writel(0, ss->base + SS_CTL);
- spin_unlock_bh(&ss->slock);
- return err;
-}
+ goto release_ss;
/*
- * sun4i_hash_final: finalize hashing operation
+ * hash_final: finalize hashing operation
*
* If we have some remaining bytes, we write them.
* Then ask the SS for finalizing the hashing operation
*
* I do not check RX FIFO size in this function since the size is 32
* after each enabling and this function neither write more than 32 words.
+ * If we come from the update part, we cannot have more than
+ * 3 remaining bytes to write and SS is fast enough to not care about it.
*/
-int sun4i_hash_final(struct ahash_request *areq)
-{
- u32 v, ivmode = 0;
- unsigned int i;
- unsigned int j = 0;
- int zeros, err = 0;
- unsigned int index, padlen;
- __be64 bits;
- struct sun4i_req_ctx *op = ahash_request_ctx(areq);
- struct sun4i_ss_ctx *ss = op->ss;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- u32 bf[32];
- u32 wb = 0;
- unsigned int nwait, nbw = 0;
-
- dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
- __func__, op->byte_count, areq->nbytes, op->mode,
- op->len, op->hash[0]);
- spin_lock_bh(&ss->slock);
-
- /*
- * if we have already written something,
- * restore the partial hash state
- */
- if (op->byte_count > 0) {
- ivmode = SS_IV_ARBITRARY;
- for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
- writel(op->hash[i], ss->base + SS_IV0 + i * 4);
- }
- writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+hash_final:
/* write the remaining words of the wait buffer */
if (op->len > 0) {
@@ -428,7 +446,7 @@ int sun4i_hash_final(struct ahash_request *areq)
/*
* Wait for SS to finish the hash.
- * The timeout could happen only in case of bad overcloking
+ * The timeout could happen only in case of bad overclocking
* or driver bug.
*/
i = 0;
@@ -436,7 +454,7 @@ int sun4i_hash_final(struct ahash_request *areq)
v = readl(ss->base + SS_CTL);
i++;
} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
- if (i >= SS_TIMEOUT) {
+ if (unlikely(i >= SS_TIMEOUT)) {
dev_err_ratelimited(ss->dev,
"ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
i, SS_TIMEOUT, v, areq->nbytes);
@@ -463,30 +481,41 @@ release_ss:
return err;
}
+int sun4i_hash_final(struct ahash_request *areq)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+ op->flags = SS_HASH_FINAL;
+ return sun4i_hash(areq);
+}
+
+int sun4i_hash_update(struct ahash_request *areq)
+{
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+
+ op->flags = SS_HASH_UPDATE;
+ return sun4i_hash(areq);
+}
+
/* sun4i_hash_finup: finalize hashing operation after an update */
int sun4i_hash_finup(struct ahash_request *areq)
{
- int err;
-
- err = sun4i_hash_update(areq);
- if (err != 0)
- return err;
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
- return sun4i_hash_final(areq);
+ op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+ return sun4i_hash(areq);
}
/* combo of init/update/final functions */
int sun4i_hash_digest(struct ahash_request *areq)
{
int err;
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
err = sun4i_hash_init(areq);
if (err != 0)
return err;
- err = sun4i_hash_update(areq);
- if (err != 0)
- return err;
-
- return sun4i_hash_final(areq);
+ op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
+ return sun4i_hash(areq);
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index 8e9c05f6e4d4..f04c0f8cf026 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -163,7 +163,7 @@ struct sun4i_req_ctx {
u32 hash[5]; /* for storing SS_IVx register */
char buf[64];
unsigned int len;
- struct sun4i_ss_ctx *ss;
+ int flags;
};
int sun4i_hash_crainit(struct crypto_tfm *tfm);
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index a83ead109d5f..c3d524ea6998 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -1,6 +1,7 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
+ select CRYPTO_GHASH
default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 6c999cb01b80..27a94a119009 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -26,16 +26,13 @@
#include <linux/hardirq.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/ghash.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include <crypto/b128ops.h>
#define IN_INTERRUPT in_interrupt()
-#define GHASH_BLOCK_SIZE (16)
-#define GHASH_DIGEST_SIZE (16)
-#define GHASH_KEY_LEN (16)
-
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx {
static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
{
- const char *alg;
+ const char *alg = "ghash-generic";
struct crypto_shash *fallback;
struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!(alg = crypto_tfm_alg_name(tfm))) {
- printk(KERN_ERR "Failed to get algorithm name.\n");
- return -ENOENT;
- }
-
fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
@@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
crypto_shash_set_flags(fallback,
crypto_shash_get_flags((struct crypto_shash
*) tfm));
- ctx->fallback = fallback;
- shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
- + crypto_shash_descsize(fallback);
+ /* Check if the descsize defined in the algorithm is still enough. */
+ if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+ + crypto_shash_descsize(fallback)) {
+ printk(KERN_ERR
+ "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+ alg,
+ shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+ crypto_shash_descsize(fallback));
+ return -EINVAL;
+ }
+ ctx->fallback = fallback;
return 0;
}
@@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
- if (keylen != GHASH_KEY_LEN)
+ if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
preempt_disable();
@@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = {
.update = p8_ghash_update,
.final = p8_ghash_final,
.setkey = p8_ghash_setkey,
- .descsize = sizeof(struct p8_ghash_desc_ctx),
+ .descsize = sizeof(struct p8_ghash_desc_ctx)
+ + sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index cedab7572de3..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
- depends on NVDIMM_DAX
+ depends on LIBNVDIMM && NVDIMM_DAX
default DEV_DAX
help
Support raw access to persistent memory. Note that this
@@ -23,4 +23,9 @@ config DEV_DAX_PMEM
Say Y if unsure
+config NR_DEV_DAX
+ int "Maximum number of Device-DAX instances"
+ default 32768
+ range 256 2147483647
+
endif
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 29f600f2c447..0e499bfca41c 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -13,15 +13,25 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/mount.h>
#include <linux/pfn_t.h>
+#include <linux/hash.h>
+#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include "dax.h"
-static int dax_major;
+static dev_t dax_devt;
static struct class *dax_class;
static DEFINE_IDA(dax_minor_ida);
+static int nr_dax = CONFIG_NR_DEV_DAX;
+module_param(nr_dax, int, S_IRUGO);
+static struct vfsmount *dax_mnt;
+static struct kmem_cache *dax_cache __read_mostly;
+static struct super_block *dax_superblock __read_mostly;
+MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
/**
* struct dax_region - mapping infrastructure for dax devices
@@ -48,7 +58,7 @@ struct dax_region {
* struct dax_dev - subdivision of a dax region
* @region - parent region
* @dev - device backing the character device
- * @kref - enable this data to be tracked in filp->private_data
+ * @cdev - core chardev data
* @alive - !alive + rcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
@@ -56,41 +66,139 @@ struct dax_region {
*/
struct dax_dev {
struct dax_region *region;
- struct device *dev;
- struct kref kref;
+ struct inode *inode;
+ struct device dev;
+ struct cdev cdev;
bool alive;
int id;
int num_resources;
struct resource res[0];
};
-static void dax_region_free(struct kref *kref)
+static struct inode *dax_alloc_inode(struct super_block *sb)
{
- struct dax_region *dax_region;
+ return kmem_cache_alloc(dax_cache, GFP_KERNEL);
+}
- dax_region = container_of(kref, struct dax_region, kref);
- kfree(dax_region);
+static void dax_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ kmem_cache_free(dax_cache, inode);
}
-void dax_region_put(struct dax_region *dax_region)
+static void dax_destroy_inode(struct inode *inode)
{
- kref_put(&dax_region->kref, dax_region_free);
+ call_rcu(&inode->i_rcu, dax_i_callback);
}
-EXPORT_SYMBOL_GPL(dax_region_put);
-static void dax_dev_free(struct kref *kref)
+static const struct super_operations dax_sops = {
+ .statfs = simple_statfs,
+ .alloc_inode = dax_alloc_inode,
+ .destroy_inode = dax_destroy_inode,
+ .drop_inode = generic_delete_inode,
+};
+
+static struct dentry *dax_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- struct dax_dev *dax_dev;
+ return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
+}
- dax_dev = container_of(kref, struct dax_dev, kref);
- dax_region_put(dax_dev->region);
- kfree(dax_dev);
+static struct file_system_type dax_type = {
+ .name = "dax",
+ .mount = dax_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static int dax_test(struct inode *inode, void *data)
+{
+ return inode->i_cdev == data;
+}
+
+static int dax_set(struct inode *inode, void *data)
+{
+ inode->i_cdev = data;
+ return 0;
+}
+
+static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
+{
+ struct inode *inode;
+
+ inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
+ dax_test, dax_set, cdev);
+
+ if (!inode)
+ return NULL;
+
+ if (inode->i_state & I_NEW) {
+ inode->i_mode = S_IFCHR;
+ inode->i_flags = S_DAX;
+ inode->i_rdev = devt;
+ mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+ unlock_new_inode(inode);
+ }
+ return inode;
+}
+
+static void init_once(void *inode)
+{
+ inode_init_once(inode);
+}
+
+static int dax_inode_init(void)
+{
+ int rc;
+
+ dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
+ (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ init_once);
+ if (!dax_cache)
+ return -ENOMEM;
+
+ rc = register_filesystem(&dax_type);
+ if (rc)
+ goto err_register_fs;
+
+ dax_mnt = kern_mount(&dax_type);
+ if (IS_ERR(dax_mnt)) {
+ rc = PTR_ERR(dax_mnt);
+ goto err_mount;
+ }
+ dax_superblock = dax_mnt->mnt_sb;
+
+ return 0;
+
+ err_mount:
+ unregister_filesystem(&dax_type);
+ err_register_fs:
+ kmem_cache_destroy(dax_cache);
+
+ return rc;
}
-static void dax_dev_put(struct dax_dev *dax_dev)
+static void dax_inode_exit(void)
+{
+ kern_unmount(dax_mnt);
+ unregister_filesystem(&dax_type);
+ kmem_cache_destroy(dax_cache);
+}
+
+static void dax_region_free(struct kref *kref)
+{
+ struct dax_region *dax_region;
+
+ dax_region = container_of(kref, struct dax_region, kref);
+ kfree(dax_region);
+}
+
+void dax_region_put(struct dax_region *dax_region)
{
- kref_put(&dax_dev->kref, dax_dev_free);
+ kref_put(&dax_region->kref, dax_region_free);
}
+EXPORT_SYMBOL_GPL(dax_region_put);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, unsigned int align, void *addr,
@@ -98,8 +206,11 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
{
struct dax_region *dax_region;
- dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+ if (!IS_ALIGNED(res->start, align)
+ || !IS_ALIGNED(resource_size(res), align))
+ return NULL;
+ dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
if (!dax_region)
return NULL;
@@ -116,10 +227,15 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
+static struct dax_dev *to_dax_dev(struct device *dev)
+{
+ return container_of(dev, struct dax_dev, dev);
+}
+
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dax_dev *dax_dev = dev_get_drvdata(dev);
+ struct dax_dev *dax_dev = to_dax_dev(dev);
unsigned long long size = 0;
int i;
@@ -144,180 +260,11 @@ static const struct attribute_group *dax_attribute_groups[] = {
NULL,
};
-static void unregister_dax_dev(void *_dev)
-{
- struct device *dev = _dev;
- struct dax_dev *dax_dev = dev_get_drvdata(dev);
- struct dax_region *dax_region = dax_dev->region;
-
- dev_dbg(dev, "%s\n", __func__);
-
- /*
- * Note, rcu is not protecting the liveness of dax_dev, rcu is
- * ensuring that any fault handlers that might have seen
- * dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_rcu() has started will abort
- * upon seeing dax_dev->alive == false.
- */
- dax_dev->alive = false;
- synchronize_rcu();
-
- get_device(dev);
- device_unregister(dev);
- ida_simple_remove(&dax_region->ida, dax_dev->id);
- ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
- put_device(dev);
- dax_dev_put(dax_dev);
-}
-
-int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
- int count)
-{
- struct device *parent = dax_region->dev;
- struct dax_dev *dax_dev;
- struct device *dev;
- int rc, minor;
- dev_t dev_t;
-
- dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
- if (!dax_dev)
- return -ENOMEM;
- memcpy(dax_dev->res, res, sizeof(*res) * count);
- dax_dev->num_resources = count;
- kref_init(&dax_dev->kref);
- dax_dev->alive = true;
- dax_dev->region = dax_region;
- kref_get(&dax_region->kref);
-
- dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- if (dax_dev->id < 0) {
- rc = dax_dev->id;
- goto err_id;
- }
-
- minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
- if (minor < 0) {
- rc = minor;
- goto err_minor;
- }
-
- dev_t = MKDEV(dax_major, minor);
- dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
- dax_attribute_groups, "dax%d.%d", dax_region->id,
- dax_dev->id);
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- goto err_create;
- }
- dax_dev->dev = dev;
-
- rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
- if (rc)
- return rc;
-
- return 0;
-
- err_create:
- ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
- ida_simple_remove(&dax_region->ida, dax_dev->id);
- err_id:
- dax_dev_put(dax_dev);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(devm_create_dax_dev);
-
-/* return an unmapped area aligned to the dax region specified alignment */
-static unsigned long dax_dev_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- unsigned long off, off_end, off_align, len_align, addr_align, align;
- struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
- struct dax_region *dax_region;
-
- if (!dax_dev || addr)
- goto out;
-
- dax_region = dax_dev->region;
- align = dax_region->align;
- off = pgoff << PAGE_SHIFT;
- off_end = off + len;
- off_align = round_up(off, align);
-
- if ((off_end <= off_align) || ((off_end - off_align) < align))
- goto out;
-
- len_align = len + align;
- if ((off + len_align) < off)
- goto out;
-
- addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
- pgoff, flags);
- if (!IS_ERR_VALUE(addr_align)) {
- addr_align += (off - addr_align) & (align - 1);
- return addr_align;
- }
- out:
- return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-
-static int __match_devt(struct device *dev, const void *data)
-{
- const dev_t *devt = data;
-
- return dev->devt == *devt;
-}
-
-static struct device *dax_dev_find(dev_t dev_t)
-{
- return class_find_device(dax_class, NULL, &dev_t, __match_devt);
-}
-
-static int dax_dev_open(struct inode *inode, struct file *filp)
-{
- struct dax_dev *dax_dev = NULL;
- struct device *dev;
-
- dev = dax_dev_find(inode->i_rdev);
- if (!dev)
- return -ENXIO;
-
- device_lock(dev);
- dax_dev = dev_get_drvdata(dev);
- if (dax_dev) {
- dev_dbg(dev, "%s\n", __func__);
- filp->private_data = dax_dev;
- kref_get(&dax_dev->kref);
- inode->i_flags = S_DAX;
- }
- device_unlock(dev);
-
- if (!dax_dev) {
- put_device(dev);
- return -ENXIO;
- }
- return 0;
-}
-
-static int dax_dev_release(struct inode *inode, struct file *filp)
-{
- struct dax_dev *dax_dev = filp->private_data;
- struct device *dev = dax_dev->dev;
-
- dev_dbg(dax_dev->dev, "%s\n", __func__);
- dax_dev_put(dax_dev);
- put_device(dev);
-
- return 0;
-}
-
static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
const char *func)
{
struct dax_region *dax_region = dax_dev->region;
- struct device *dev = dax_dev->dev;
+ struct device *dev = &dax_dev->dev;
unsigned long mask;
if (!dax_dev->alive)
@@ -382,7 +329,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct vm_fault *vmf)
{
unsigned long vaddr = (unsigned long) vmf->virtual_address;
- struct device *dev = dax_dev->dev;
+ struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
int rc = VM_FAULT_SIGBUS;
phys_addr_t phys;
@@ -422,7 +369,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct file *filp = vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
- dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
current->comm, (vmf->flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end);
rcu_read_lock();
@@ -437,7 +384,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
unsigned int flags)
{
unsigned long pmd_addr = addr & PMD_MASK;
- struct device *dev = dax_dev->dev;
+ struct device *dev = &dax_dev->dev;
struct dax_region *dax_region;
phys_addr_t phys;
pgoff_t pgoff;
@@ -479,7 +426,7 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
struct file *filp = vma->vm_file;
struct dax_dev *dax_dev = filp->private_data;
- dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
current->comm, (flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end);
@@ -490,81 +437,257 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return rc;
}
-static void dax_dev_vm_open(struct vm_area_struct *vma)
-{
- struct file *filp = vma->vm_file;
- struct dax_dev *dax_dev = filp->private_data;
-
- dev_dbg(dax_dev->dev, "%s\n", __func__);
- kref_get(&dax_dev->kref);
-}
-
-static void dax_dev_vm_close(struct vm_area_struct *vma)
-{
- struct file *filp = vma->vm_file;
- struct dax_dev *dax_dev = filp->private_data;
-
- dev_dbg(dax_dev->dev, "%s\n", __func__);
- dax_dev_put(dax_dev);
-}
-
static const struct vm_operations_struct dax_dev_vm_ops = {
.fault = dax_dev_fault,
.pmd_fault = dax_dev_pmd_fault,
- .open = dax_dev_vm_open,
- .close = dax_dev_vm_close,
};
-static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct dax_dev *dax_dev = filp->private_data;
int rc;
- dev_dbg(dax_dev->dev, "%s\n", __func__);
+ dev_dbg(&dax_dev->dev, "%s\n", __func__);
rc = check_vma(dax_dev, vma, __func__);
if (rc)
return rc;
- kref_get(&dax_dev->kref);
vma->vm_ops = &dax_dev_vm_ops;
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
return 0;
+}
+
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off, off_end, off_align, len_align, addr_align, align;
+ struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
+ struct dax_region *dax_region;
+
+ if (!dax_dev || addr)
+ goto out;
+
+ dax_region = dax_dev->region;
+ align = dax_region->align;
+ off = pgoff << PAGE_SHIFT;
+ off_end = off + len;
+ off_align = round_up(off, align);
+
+ if ((off_end <= off_align) || ((off_end - off_align) < align))
+ goto out;
+
+ len_align = len + align;
+ if ((off + len_align) < off)
+ goto out;
+ addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+ pgoff, flags);
+ if (!IS_ERR_VALUE(addr_align)) {
+ addr_align += (off - addr_align) & (align - 1);
+ return addr_align;
+ }
+ out:
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int dax_open(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev;
+
+ dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
+ dev_dbg(&dax_dev->dev, "%s\n", __func__);
+ inode->i_mapping = dax_dev->inode->i_mapping;
+ inode->i_mapping->host = dax_dev->inode;
+ filp->f_mapping = inode->i_mapping;
+ filp->private_data = dax_dev;
+ inode->i_flags = S_DAX;
+
+ return 0;
+}
+
+static int dax_release(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(&dax_dev->dev, "%s\n", __func__);
+ return 0;
}
static const struct file_operations dax_fops = {
.llseek = noop_llseek,
.owner = THIS_MODULE,
- .open = dax_dev_open,
- .release = dax_dev_release,
- .get_unmapped_area = dax_dev_get_unmapped_area,
- .mmap = dax_dev_mmap,
+ .open = dax_open,
+ .release = dax_release,
+ .get_unmapped_area = dax_get_unmapped_area,
+ .mmap = dax_mmap,
};
+static void dax_dev_release(struct device *dev)
+{
+ struct dax_dev *dax_dev = to_dax_dev(dev);
+ struct dax_region *dax_region = dax_dev->region;
+
+ ida_simple_remove(&dax_region->ida, dax_dev->id);
+ ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
+ dax_region_put(dax_region);
+ iput(dax_dev->inode);
+ kfree(dax_dev);
+}
+
+static void unregister_dax_dev(void *dev)
+{
+ struct dax_dev *dax_dev = to_dax_dev(dev);
+ struct cdev *cdev = &dax_dev->cdev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /*
+ * Note, rcu is not protecting the liveness of dax_dev, rcu is
+ * ensuring that any fault handlers that might have seen
+ * dax_dev->alive == true, have completed. Any fault handlers
+ * that start after synchronize_rcu() has started will abort
+ * upon seeing dax_dev->alive == false.
+ */
+ dax_dev->alive = false;
+ synchronize_rcu();
+ unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
+ cdev_del(cdev);
+ device_unregister(dev);
+}
+
+struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
+ struct resource *res, int count)
+{
+ struct device *parent = dax_region->dev;
+ struct dax_dev *dax_dev;
+ int rc = 0, minor, i;
+ struct device *dev;
+ struct cdev *cdev;
+ dev_t dev_t;
+
+ dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
+ if (!dax_dev)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < count; i++) {
+ if (!IS_ALIGNED(res[i].start, dax_region->align)
+ || !IS_ALIGNED(resource_size(&res[i]),
+ dax_region->align)) {
+ rc = -EINVAL;
+ break;
+ }
+ dax_dev->res[i].start = res[i].start;
+ dax_dev->res[i].end = res[i].end;
+ }
+
+ if (i < count)
+ goto err_id;
+
+ dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ if (dax_dev->id < 0) {
+ rc = dax_dev->id;
+ goto err_id;
+ }
+
+ minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto err_minor;
+ }
+
+ dev_t = MKDEV(MAJOR(dax_devt), minor);
+ dev = &dax_dev->dev;
+ dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
+ if (!dax_dev->inode) {
+ rc = -ENOMEM;
+ goto err_inode;
+ }
+
+ /* device_initialize() so cdev can reference kobj parent */
+ device_initialize(dev);
+
+ cdev = &dax_dev->cdev;
+ cdev_init(cdev, &dax_fops);
+ cdev->owner = parent->driver->owner;
+ cdev->kobj.parent = &dev->kobj;
+ rc = cdev_add(&dax_dev->cdev, dev_t, 1);
+ if (rc)
+ goto err_cdev;
+
+ /* from here on we're committed to teardown via dax_dev_release() */
+ dax_dev->num_resources = count;
+ dax_dev->alive = true;
+ dax_dev->region = dax_region;
+ kref_get(&dax_region->kref);
+
+ dev->devt = dev_t;
+ dev->class = dax_class;
+ dev->parent = parent;
+ dev->groups = dax_attribute_groups;
+ dev->release = dax_dev_release;
+ dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
+ rc = device_add(dev);
+ if (rc) {
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return dax_dev;
+
+ err_cdev:
+ iput(dax_dev->inode);
+ err_inode:
+ ida_simple_remove(&dax_minor_ida, minor);
+ err_minor:
+ ida_simple_remove(&dax_region->ida, dax_dev->id);
+ err_id:
+ kfree(dax_dev);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_create_dax_dev);
+
static int __init dax_init(void)
{
int rc;
- rc = register_chrdev(0, "dax", &dax_fops);
- if (rc < 0)
+ rc = dax_inode_init();
+ if (rc)
return rc;
- dax_major = rc;
+
+ nr_dax = max(nr_dax, 256);
+ rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
+ if (rc)
+ goto err_chrdev;
dax_class = class_create(THIS_MODULE, "dax");
if (IS_ERR(dax_class)) {
- unregister_chrdev(dax_major, "dax");
- return PTR_ERR(dax_class);
+ rc = PTR_ERR(dax_class);
+ goto err_class;
}
return 0;
+
+ err_class:
+ unregister_chrdev_region(dax_devt, nr_dax);
+ err_chrdev:
+ dax_inode_exit();
+ return rc;
}
static void __exit dax_exit(void)
{
class_destroy(dax_class);
- unregister_chrdev(dax_major, "dax");
+ unregister_chrdev_region(dax_devt, nr_dax);
ida_destroy(&dax_minor_ida);
+ dax_inode_exit();
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
index d8b8f1f25054..ddd829ab58c0 100644
--- a/drivers/dax/dax.h
+++ b/drivers/dax/dax.h
@@ -13,12 +13,13 @@
#ifndef __DAX_H__
#define __DAX_H__
struct device;
+struct dax_dev;
struct resource;
struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent,
int region_id, struct resource *res, unsigned int align,
void *addr, unsigned long flags);
-int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
- int count);
+struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
+ struct resource *res, int count);
#endif /* __DAX_H__ */
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 1f01e98c83c7..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -24,7 +24,7 @@ struct dax_pmem {
struct completion cmp;
};
-struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
+static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
{
return container_of(ref, struct dax_pmem, ref);
}
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
@@ -61,6 +61,7 @@ static int dax_pmem_probe(struct device *dev)
int rc;
void *addr;
struct resource res;
+ struct dax_dev *dax_dev;
struct nd_pfn_sb *pfn_sb;
struct dax_pmem *dax_pmem;
struct nd_region *nd_region;
@@ -126,12 +127,12 @@ static int dax_pmem_probe(struct device *dev)
return -ENOMEM;
/* TODO: support for subdividing a dax region... */
- rc = devm_create_dax_dev(dax_region, &res, 1);
+ dax_dev = devm_create_dax_dev(dax_region, &res, 1);
/* child dax_dev instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
- return rc;
+ return PTR_ERR_OR_ZERO(dax_dev);
}
static struct nd_device_driver dax_pmem_driver = {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 478006b7764a..bf3ea7603a58 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
cur_time = jiffies;
+ /* Immediately exit if previous_freq is not initialized yet. */
+ if (!devfreq->previous_freq)
+ goto out;
+
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
if (prev_lev < 0) {
ret = prev_lev;
@@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
- mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
+ mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
+
device_unregister(&devfreq->dev);
err_out:
return ERR_PTR(err);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 0fdae8608961..cd949800eed9 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
+ select REGMAP_MMIO
help
This add the devfreq-event driver for Exynos SoC. It provides NoC
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index a5841403bde8..49e712aca0c1 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
return 0;
out:
- edata->load_count = 0;
- edata->total_count = 0;
-
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
return ret;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ddaee60ae52a..cf04d249a6a4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -586,6 +586,22 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ bool write = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_TO_DEVICE);
+ struct reservation_object *resv = dmabuf->resv;
+ long ret;
+
+ /* Wait on any implicit rendering fences */
+ ret = reservation_object_wait_timeout_rcu(resv, write, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
/**
* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
@@ -608,6 +624,13 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (dmabuf->ops->begin_cpu_access)
ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+ /* Ensure that all fences are waited upon - but we first allow
+ * the native handler the chance to do so more efficiently if it
+ * chooses. A double invocation here will be reasonably cheap no-op.
+ */
+ if (ret == 0)
+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
index a8731c853da6..f1989fcaf354 100644
--- a/drivers/dma-buf/fence-array.c
+++ b/drivers/dma-buf/fence-array.c
@@ -99,6 +99,7 @@ const struct fence_ops fence_array_ops = {
.wait = fence_default_wait,
.release = fence_array_release,
};
+EXPORT_SYMBOL(fence_array_ops);
/**
* fence_array_create - Create a custom fence array
@@ -106,14 +107,14 @@ const struct fence_ops fence_array_ops = {
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
- * @signal_on_any [in] signal on any fence in the array
+ * @signal_on_any: [in] signal on any fence in the array
*
* Allocate a fence_array object and initialize the base fence with fence_init().
* In case of error it returns NULL.
*
- * The caller should allocte the fences array with num_fences size
+ * The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
- * array is take and fence_put() is used on each fence on release.
+ * array is taken and fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 9566a62ad8e3..723d8af988e5 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -205,7 +205,7 @@ done:
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared_fence has been called.
+ * reservation_object_reserve_shared() has been called.
*/
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index fab95204cf74..2dd4c3db6caa 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -135,10 +135,16 @@ static void sync_print_sync_file(struct seq_file *s,
int i;
seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
- sync_status_str(atomic_read(&sync_file->status)));
+ sync_status_str(!fence_is_signaled(sync_file->fence)));
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_print_fence(s, sync_file->cbs[i].fence, true);
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ for (i = 0; i < array->num_fences; ++i)
+ sync_print_fence(s, array->fences[i], true);
+ } else {
+ sync_print_fence(s, sync_file->fence, true);
+ }
}
static int sync_debugfs_show(struct seq_file *s, void *unused)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 9aaa608dfe01..b29a9e817320 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -28,11 +28,11 @@
static const struct file_operations sync_file_fops;
-static struct sync_file *sync_file_alloc(int size)
+static struct sync_file *sync_file_alloc(void)
{
struct sync_file *sync_file;
- sync_file = kzalloc(size, GFP_KERNEL);
+ sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
if (!sync_file)
return NULL;
@@ -45,6 +45,8 @@ static struct sync_file *sync_file_alloc(int size)
init_waitqueue_head(&sync_file->wq);
+ INIT_LIST_HEAD(&sync_file->cb.node);
+
return sync_file;
err:
@@ -54,14 +56,11 @@ err:
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
{
- struct sync_file_cb *check;
struct sync_file *sync_file;
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
+ sync_file = container_of(cb, struct sync_file, cb);
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
+ wake_up_all(&sync_file->wq);
}
/**
@@ -76,23 +75,17 @@ struct sync_file *sync_file_create(struct fence *fence)
{
struct sync_file *sync_file;
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
+ sync_file->fence = fence;
+
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
return sync_file;
}
EXPORT_SYMBOL(sync_file_create);
@@ -121,14 +114,73 @@ err:
return NULL;
}
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd: sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct fence *sync_file_get_fence(int fd)
+{
+ struct sync_file *sync_file;
+ struct fence *fence;
+
+ sync_file = sync_file_fdget(fd);
+ if (!sync_file)
+ return NULL;
+
+ fence = fence_get(sync_file->fence);
+ fput(sync_file->file);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+static int sync_file_set_fence(struct sync_file *sync_file,
+ struct fence **fences, int num_fences)
+{
+ struct fence_array *array;
+
+ /*
+ * The reference for the fences in the new sync_file and held
+ * in add_fence() during the merge procedure, so for num_fences == 1
+ * we already own a new reference to the fence. For num_fence > 1
+ * we own the reference of the fence_array creation.
+ */
+ if (num_fences == 1) {
+ sync_file->fence = fences[0];
+ kfree(fences);
+ } else {
+ array = fence_array_create(num_fences, fences,
+ fence_context_alloc(1), 1, false);
+ if (!array)
+ return -ENOMEM;
+
+ sync_file->fence = &array->base;
+ }
+
+ return 0;
+}
+
+static struct fence **get_fences(struct sync_file *sync_file, int *num_fences)
+{
+ if (fence_is_array(sync_file->fence)) {
+ struct fence_array *array = to_fence_array(sync_file->fence);
+
+ *num_fences = array->num_fences;
+ return array->fences;
+ }
+
+ *num_fences = 1;
+ return &sync_file->fence;
+}
+
+static void add_fence(struct fence **fences, int *i, struct fence *fence)
{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
+ fences[*i] = fence;
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
+ if (!fence_is_signaled(fence)) {
fence_get(fence);
(*i)++;
}
@@ -147,16 +199,24 @@ static void sync_file_add_pt(struct sync_file *sync_file, int *i,
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
struct sync_file *b)
{
- int num_fences = a->num_fences + b->num_fences;
struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+ struct fence **fences, **nfences, **a_fences, **b_fences;
+ int i, i_a, i_b, num_fences, a_num_fences, b_num_fences;
- sync_file = sync_file_alloc(size);
+ sync_file = sync_file_alloc();
if (!sync_file)
return NULL;
- atomic_set(&sync_file->status, num_fences);
+ a_fences = get_fences(a, &a_num_fences);
+ b_fences = get_fences(b, &b_num_fences);
+ if (a_num_fences > INT_MAX - b_num_fences)
+ return NULL;
+
+ num_fences = a_num_fences + b_num_fences;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto err;
/*
* Assume sync_file a and b are both ordered and have no
@@ -165,55 +225,69 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
* If a sync_file can only be created with sync_file_merge
* and sync_file_create, this is a reasonable assumption.
*/
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
+ for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
+ struct fence *pt_a = a_fences[i_a];
+ struct fence *pt_b = b_fences[i_b];
if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
i_a++;
} else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_b++;
} else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
+ add_fence(fences, &i, pt_a);
else
- sync_file_add_pt(sync_file, &i, pt_b);
+ add_fence(fences, &i, pt_b);
i_a++;
i_b++;
}
}
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+ for (; i_a < a_num_fences; i_a++)
+ add_fence(fences, &i, a_fences[i_a]);
+
+ for (; i_b < b_num_fences; i_b++)
+ add_fence(fences, &i, b_fences[i_b]);
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+ if (i == 0)
+ fences[i++] = fence_get(a_fences[0]);
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
+ if (num_fences > i) {
+ nfences = krealloc(fences, i * sizeof(*fences),
+ GFP_KERNEL);
+ if (!nfences)
+ goto err;
+
+ fences = nfences;
+ }
+
+ if (sync_file_set_fence(sync_file, fences, i) < 0) {
+ kfree(fences);
+ goto err;
+ }
strlcpy(sync_file->name, name, sizeof(sync_file->name));
return sync_file;
+
+err:
+ fput(sync_file->file);
+ return NULL;
+
}
static void sync_file_free(struct kref *kref)
{
struct sync_file *sync_file = container_of(kref, struct sync_file,
kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
+ if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ fence_remove_callback(sync_file->fence, &sync_file->cb);
+ fence_put(sync_file->fence);
kfree(sync_file);
}
@@ -228,17 +302,17 @@ static int sync_file_release(struct inode *inode, struct file *file)
static unsigned int sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
- int status;
poll_wait(file, &sync_file->wq, wait);
- status = atomic_read(&sync_file->status);
+ if (!poll_does_not_wait(wait) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
+ wake_up_all(&sync_file->wq);
+ }
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
+ return fence_is_signaled(sync_file->fence) ? POLLIN : 0;
}
static long sync_file_ioctl_merge(struct sync_file *sync_file,
@@ -315,8 +389,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
{
struct sync_file_info info;
struct sync_fence_info *fence_info = NULL;
+ struct fence **fences;
__u32 size;
- int ret, i;
+ int num_fences, ret, i;
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
return -EFAULT;
@@ -324,6 +399,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (info.flags || info.pad)
return -EINVAL;
+ fences = get_fences(sync_file, &num_fences);
+
/*
* Passing num_fences = 0 means that userspace doesn't want to
* retrieve any sync_fence_info. If num_fences = 0 we skip filling
@@ -333,16 +410,16 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!info.num_fences)
goto no_fences;
- if (info.num_fences < sync_file->num_fences)
+ if (info.num_fences < num_fences)
return -EINVAL;
- size = sync_file->num_fences * sizeof(*fence_info);
+ size = num_fences * sizeof(*fence_info);
fence_info = kzalloc(size, GFP_KERNEL);
if (!fence_info)
return -ENOMEM;
- for (i = 0; i < sync_file->num_fences; ++i)
- sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+ for (i = 0; i < num_fences; i++)
+ sync_fill_fence_info(fences[i], &fence_info[i]);
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) {
@@ -352,11 +429,8 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences:
strlcpy(info.name, sync_file->name, sizeof(info.name));
- info.status = atomic_read(&sync_file->status);
- if (info.status >= 0)
- info.status = !info.status;
-
- info.num_fences = sync_file->num_fences;
+ info.status = fence_is_signaled(sync_file->fence);
+ info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
ret = -EFAULT;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6bcf564..141aefbe37ec 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,7 @@ config MMP_TDMA
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
select MMP_SRAM if ARCH_MMP
+ select GENERIC_ALLOCATOR
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index bac5f023013b..d5ba43a87a68 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0)
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
- /* Paired with cppi41_dma_issue_pending */
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
}
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+ __func__, error);
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return error;
+ }
dma_cookie_init(chan);
dma_async_tx_descriptor_init(&c->txd, chan);
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
int error;
error = pm_runtime_get_sync(cdd->ddev.dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
return;
+ }
WARN_ON(!list_empty(&cdd->pending));
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
struct cppi41_dd *cdd = c->cdd;
int error;
- /* PM runtime paired with dmaengine_desc_get_callback_invoke */
error = pm_runtime_get(cdd->ddev.dev);
if ((error != -EINPROGRESS) && error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
error);
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
push_desc_queue(c);
else
pending_desc(c);
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
}
static u32 get_host_pd0(u32 length)
@@ -1059,8 +1075,8 @@ err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_dont_use_autosuspend(dev);
- pm_runtime_put_sync(dev);
err_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
@@ -1072,7 +1088,12 @@ err_get_sync:
static int cppi41_dma_remove(struct platform_device *pdev)
{
struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+ int error;
+ error = pm_runtime_get_sync(&pdev->dev);
+ if (error < 0)
+ dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
+ __func__, error);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&cdd->ddev);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e18a58068bca..77242b37ef87 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
if (echan->slot[0] < 0) {
dev_err(dev, "Entry slot allocation failed for channel %u\n",
EDMA_CHAN_SLOT(echan->ch_num));
+ ret = echan->slot[0];
goto err_slot;
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 83461994e418..a2358780ab2c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
burst = convert_burst(8);
width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
- v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index ca957a5f4291..b8cde096a808 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
if (ret)
return;
- extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1faae..15475892af0c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
+ if (len <= RFC2374_UNFRAG_HDR_SIZE)
+ return 0;
+
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
+
/* A datagram fragment has been received, now the fun begins. */
+
+ if (len <= RFC2374_FRAG_HDR_SIZE)
+ return 0;
+
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+ if (fg_off + len > dg_size)
+ return 0;
spin_lock_irqsave(&dev->lock, flags);
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
+static int gasp_source_id(__be32 *p)
+{
+ return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+ return be32_to_cpu(p[1]) & 0xffffff;
+}
+
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
- u16 source_node_id;
- u32 specifier_id;
- u32 ver;
unsigned long offset;
unsigned long flags;
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
- if (specifier_id == IANA_SPECIFIER_ID &&
- (ver == RFC2734_SW_VERSION
+ if (length > IEEE1394_GASP_HDR_SIZE &&
+ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
- || ver == RFC3146_SW_VERSION
+ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
- )) {
- buf_ptr += 2;
- length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ ))
+ fwnet_incoming_packet(dev, buf_ptr + 2,
+ length - IEEE1394_GASP_HDR_SIZE,
+ gasp_source_id(buf_ptr),
context->card->generation, true);
- }
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
+ if (lynx->registers == NULL) {
+ dev_err(&dev->dev, "Failed to map registers\n");
+ ret = -ENOMEM;
+ goto fail_deallocate_lynx;
+ }
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
-fail_deallocate:
+fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
+
+fail_deallocate_lynx:
kfree(lynx);
fail_disable:
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 0e22f241403b..bca172d42c74 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -209,5 +209,6 @@ config HAVE_ARM_SMCCC
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/meson/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 44a59dcfc398..898ac41fa8b3 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-y += broadcom/
+obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 000000000000..170d7e8bcdfb
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,9 @@
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+ bool
+ default ARCH_MESON
+ depends on ARM64_4K_PAGES
+ help
+ Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 000000000000..9ab3884f96bc
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MESON_SM) += meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 000000000000..b0d254930ed3
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,248 @@
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+ unsigned int index;
+ u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+ unsigned int shmem_size;
+ u32 cmd_shmem_in_base;
+ u32 cmd_shmem_out_base;
+ struct meson_sm_cmd cmd[];
+};
+
+struct meson_sm_chip gxbb_chip = {
+ .shmem_size = SZ_4K,
+ .cmd_shmem_in_base = 0x82000020,
+ .cmd_shmem_out_base = 0x82000021,
+ .cmd = {
+ CMD(SM_EFUSE_READ, 0x82000030),
+ CMD(SM_EFUSE_WRITE, 0x82000031),
+ CMD(SM_EFUSE_USER_MAX, 0x82000033),
+ { /* sentinel */ },
+ },
+};
+
+struct meson_sm_firmware {
+ const struct meson_sm_chip *chip;
+ void __iomem *sm_shmem_in_base;
+ void __iomem *sm_shmem_out_base;
+};
+
+static struct meson_sm_firmware fw;
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+ unsigned int cmd_index)
+{
+ const struct meson_sm_cmd *cmd = chip->cmd;
+
+ while (cmd->smc_id && cmd->index != cmd_index)
+ cmd++;
+
+ return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+ return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+ u32 sm_phy_base;
+
+ sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+ if (!sm_phy_base)
+ return 0;
+
+ return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @cmd_index: Index of the SMC32 function ID
+ * @ret: Returned value
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 cmd, lret;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ cmd = meson_sm_get_cmd(fw.chip, cmd_index);
+ if (!cmd)
+ return -EINVAL;
+
+ lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+ if (ret)
+ *ret = lret;
+
+ return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @buffer: Buffer to store the retrieved data
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of read data on success, a negative value on error
+ */
+int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 size;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (!fw.chip->cmd_shmem_out_base)
+ return -EINVAL;
+
+ if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!size || size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (buffer)
+ memcpy(buffer, fw.sm_shmem_out_base, size);
+
+ return size;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @buffer: Buffer containing data to send
+ * @size: Size of the data to send
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 written;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (!fw.chip->cmd_shmem_in_base)
+ return -EINVAL;
+
+ memcpy(fw.sm_shmem_in_base, buffer, size);
+
+ if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!written)
+ return -EINVAL;
+
+ return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+static const struct of_device_id meson_sm_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+ { /* sentinel */ },
+};
+
+int __init meson_sm_init(void)
+{
+ const struct meson_sm_chip *chip;
+ const struct of_device_id *matched_np;
+ struct device_node *np;
+
+ np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np);
+ if (!np)
+ return -ENODEV;
+
+ chip = matched_np->data;
+ if (!chip) {
+ pr_err("unable to setup secure-monitor data\n");
+ goto out;
+ }
+
+ if (chip->cmd_shmem_in_base) {
+ fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_in_base))
+ goto out;
+ }
+
+ if (chip->cmd_shmem_out_base) {
+ fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_out_base))
+ goto out_in_base;
+ }
+
+ fw.chip = chip;
+ pr_info("secure-monitor enabled\n");
+
+ return 0;
+
+out_in_base:
+ iounmap(fw.sm_shmem_in_base);
+out:
+ return -EINVAL;
+}
+device_initcall(meson_sm_init);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e64a501adbf4..d95c70227c05 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,4 +1,7 @@
-/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+/*
+ * Qualcomm SCM driver
+ *
+ * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -12,7 +15,7 @@
*
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
@@ -376,8 +379,6 @@ static const struct of_device_id qcom_scm_dt_match[] = {
{}
};
-MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
-
static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
@@ -414,14 +415,4 @@ static int __init qcom_scm_init(void)
return platform_driver_register(&qcom_scm_driver);
}
-
subsys_initcall(qcom_scm_init);
-
-static void __exit qcom_scm_exit(void)
-{
- platform_driver_unregister(&qcom_scm_driver);
-}
-module_exit(qcom_scm_exit);
-
-MODULE_DESCRIPTION("Qualcomm SCM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..ed37e5908b91 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,10 +22,6 @@ menuconfig GPIOLIB
if GPIOLIB
-config GPIO_DEVRES
- def_bool y
- depends on HAS_IOMEM
-
config OF_GPIO
def_bool y
depends on OF
@@ -284,7 +280,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
+ depends on GPIOLIB && SYSFS
select GPIO_SYSFS
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ab28a2daeacc..d074c2299393 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIO_DEVRES) += devres.o
+obj-$(CONFIG_GPIOLIB) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar9340-gpio" },
{},
};
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index cd5dc27320a2..1ed6132b993c 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -293,10 +293,10 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
- u32 mask = ~(1 << (d->irq - gc->irq_base));
+ u32 mask = d->mask;
irq_gc_lock(gc);
- writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
+ writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
irq_gc_unlock(gc);
}
@@ -305,7 +305,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -319,8 +319,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -333,8 +332,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
@@ -347,8 +345,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
struct irq_chip_type *ct = irq_data_get_chip_type(d);
-
- u32 mask = 1 << (d->irq - gc->irq_base);
+ u32 mask = d->mask;
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
@@ -462,7 +459,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = mvchip->irqbase + i;
+ irq = irq_find_mapping(mvchip->domain, i);
if (!(cause & (1 << i)))
continue;
@@ -655,6 +652,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct irq_chip_type *ct;
struct clk *clk;
unsigned int ngpios;
+ bool have_irqs;
int soc_variant;
int i, cpu, id;
int err;
@@ -665,6 +663,9 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
else
soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
+ /* Some gpio controllers do not provide irq support */
+ have_irqs = of_irq_count(np) != 0;
+
mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip),
GFP_KERNEL);
if (!mvchip)
@@ -697,7 +698,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
mvchip->chip.set = mvebu_gpio_set;
- mvchip->chip.to_irq = mvebu_gpio_to_irq;
+ if (have_irqs)
+ mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
mvchip->chip.ngpio = ngpios;
mvchip->chip.can_sleep = false;
@@ -758,34 +760,30 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
- if (!of_irq_count(np))
+ if (!have_irqs)
return 0;
- /* Setup the interrupt handlers. Each chip can have up to 4
- * interrupt handlers, with each handler dealing with 8 GPIO
- * pins. */
- for (i = 0; i < 4; i++) {
- int irq = platform_get_irq(pdev, i);
-
- if (irq < 0)
- continue;
- irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
- mvchip);
- }
-
- mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
- if (mvchip->irqbase < 0) {
- dev_err(&pdev->dev, "no irqs\n");
- return mvchip->irqbase;
+ mvchip->domain =
+ irq_domain_add_linear(np, ngpios, &irq_generic_chip_ops, NULL);
+ if (!mvchip->domain) {
+ dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
+ mvchip->chip.label);
+ return -ENODEV;
}
- gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
- mvchip->membase, handle_level_irq);
- if (!gc) {
- dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- return -ENOMEM;
+ err = irq_alloc_domain_generic_chips(
+ mvchip->domain, ngpios, 2, np->name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
+ mvchip->chip.label);
+ goto err_domain;
}
+ /* NOTE: The common accessors cannot be used because of the percpu
+ * access to the mask registers
+ */
+ gc = irq_get_domain_generic_chip(mvchip->domain, 0);
gc->private = mvchip;
ct = &gc->chip_types[0];
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
@@ -803,27 +801,23 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
- IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins.
+ */
+ for (i = 0; i < 4; i++) {
+ int irq = platform_get_irq(pdev, i);
- /* Setup irq domain on top of the generic chip. */
- mvchip->domain = irq_domain_add_simple(np, mvchip->chip.ngpio,
- mvchip->irqbase,
- &irq_domain_simple_ops,
- mvchip);
- if (!mvchip->domain) {
- dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
- mvchip->chip.label);
- err = -ENODEV;
- goto err_generic_chip;
+ if (irq < 0)
+ continue;
+ irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+ mvchip);
}
return 0;
-err_generic_chip:
- irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
- IRQ_LEVEL | IRQ_NOPROBE);
- kfree(gc);
+err_domain:
+ irq_domain_remove(mvchip->domain);
return err;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
- if (irq_base < 0)
- return irq_base;
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_iounmap;
+ }
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
+out_iounmap:
+ iounmap(port->base);
return err;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 5d059866d17a..fe731f094257 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
- memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
+ memcpy(reg_val, chip->reg_output, NBANK(chip));
for (bank = 0; bank < NBANK(chip); bank++) {
bank_mask = mask[bank / sizeof(*mask)] >>
((bank % sizeof(*mask)) * 8);
if (bank_mask) {
bank_val = bits[bank / sizeof(*bits)] >>
((bank % sizeof(*bits)) * 8);
+ bank_val &= bank_mask;
reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
}
}
@@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
if (client->irq && irq_base != -1
&& (chip->driver_data & PCA_INT)) {
-
ret = pca953x_read_regs(chip,
chip->regs->input, chip->irq_stat);
if (ret)
@@ -732,7 +732,7 @@ out:
static const struct of_device_id pca953x_dt_ids[];
static int pca953x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *i2c_id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
@@ -773,27 +773,45 @@ static int pca953x_probe(struct i2c_client *client,
}
chip->regulator = reg;
- if (id) {
- chip->driver_data = id->driver_data;
+ if (i2c_id) {
+ chip->driver_data = i2c_id->driver_data;
} else {
- const struct acpi_device_id *id;
+ const struct acpi_device_id *acpi_id;
const struct of_device_id *match;
match = of_match_device(pca953x_dt_ids, &client->dev);
if (match) {
chip->driver_data = (int)(uintptr_t)match->data;
} else {
- id = acpi_match_device(pca953x_acpi_ids, &client->dev);
- if (!id) {
+ acpi_id = acpi_match_device(pca953x_acpi_ids, &client->dev);
+ if (!acpi_id) {
ret = -ENODEV;
goto err_exit;
}
- chip->driver_data = id->driver_data;
+ chip->driver_data = acpi_id->driver_data;
}
}
mutex_init(&chip->i2c_lock);
+ /*
+ * In case we have an i2c-mux controlled by a GPIO provided by an
+ * expander using the same driver higher on the device tree, read the
+ * i2c adapter nesting depth and use the retrieved value as lockdep
+ * subclass for chip->i2c_lock.
+ *
+ * REVISIT: This solution is not complete. It protects us from lockdep
+ * false positives when the expander controlling the i2c-mux is on
+ * a different level on the device tree, but not when it's on the same
+ * level on a different branch (in which case the subclass number
+ * would be the same).
+ *
+ * TODO: Once a correct solution is developed, a similar fix should be
+ * applied to all other i2c-controlled GPIO expanders (and potentially
+ * regmap-i2c).
+ */
+ lockdep_set_subclass(&chip->i2c_lock,
+ i2c_adapter_depth(client->adapter));
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
* 801/1801/1600, bits are cleared when read.
* Edge detect register is not present on 801/1600/1801
*/
- if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 5a5a6cb00eea..d6e21f1a70a9 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !!(ret & BIT(pos));
+ return !(ret & BIT(pos));
}
static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
{ .compatible = "technologic,ts4800-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
static struct platform_driver ts4800_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ecad3f0e3b77..193f15d50bba 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -26,14 +26,18 @@
#include "gpiolib.h"
-static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
{
- return chip->gpiodev->dev.of_node == data;
+ struct of_phandle_args *gpiospec = data;
+
+ return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
-static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+static struct gpio_chip *of_find_gpiochip_by_xlate(
+ struct of_phandle_args *gpiospec)
{
- return gpiochip_find(np, of_gpiochip_match_node);
+ return gpiochip_find(gpiospec, of_gpiochip_match_node_and_xlate);
}
static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
@@ -79,7 +83,7 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
return ERR_PTR(ret);
}
- chip = of_find_gpiochip_by_node(gpiospec.np);
+ chip = of_find_gpiochip_by_xlate(&gpiospec);
if (!chip) {
desc = ERR_PTR(-EPROBE_DEFER);
goto out;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..868128a676ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/timekeeping.h>
@@ -333,6 +334,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -344,6 +352,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -414,6 +424,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
+ struct file *file;
int fd, i, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -444,6 +455,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -479,26 +501,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
i--;
lh->numdescs = handlereq.lines;
- fd = anon_inode_getfd("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_descs;
}
+ file = anon_inode_getfile("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- ret = -EFAULT;
- goto out_free_descs;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->numdescs);
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
gpiod_free(lh->descs[i]);
@@ -536,6 +573,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -623,6 +664,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -695,6 +738,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
+ struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
@@ -726,6 +770,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -777,23 +833,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
- fd = anon_inode_getfd("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_irq;
}
+ file = anon_inode_getfile("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- ret = -EFAULT;
- goto out_free_irq;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_irq:
free_irq(le->irq, le);
out_free_desc:
@@ -823,6 +894,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +912,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
@@ -2664,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (IS_ERR(desc))
return PTR_ERR(desc);
- /* Flush direction if something changed behind our back */
- if (chip->get_direction) {
+ /*
+ * If it's fast: flush the direction setting if something changed
+ * behind our back
+ */
+ if (!chip->can_sleep && chip->get_direction) {
int dir = chip->get_direction(chip, offset);
if (dir)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fc357319de35..483059a22b1b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -108,33 +108,13 @@ config DRM_KMS_CMA_HELPER
source "drivers/gpu/drm/i2c/Kconfig"
-config DRM_TDFX
- tristate "3dfx Banshee/Voodoo3+"
- depends on DRM && PCI
- help
- Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
- graphics card. If M is selected, the module will be called tdfx.
-
source "drivers/gpu/drm/arm/Kconfig"
-config DRM_R128
- tristate "ATI Rage 128"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have an ATI Rage 128 graphics card. If M
- is selected, the module will be called r128. AGP support for
- this card is strongly suggested (unless you have a PCI version).
-
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -153,12 +133,8 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -171,55 +147,11 @@ config DRM_AMDGPU
If M is selected, the module will be called amdgpu.
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/powerplay/Kconfig"
-
-source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
-config DRM_I810
- tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
- help
- Choose this option if you have an Intel I810 graphics card. If M is
- selected, the module will be called i810. AGP support is required
- for this driver to work.
-
source "drivers/gpu/drm/i915/Kconfig"
-config DRM_MGA
- tristate "Matrox g200/g400"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have a Matrox G200, G400 or G450 graphics
- card. If M is selected, the module will be called mga. AGP
- support is required for this driver to work.
-
-config DRM_SIS
- tristate "SiS video cards"
- depends on DRM && AGP
- depends on FB_SIS || FB_SIS=n
- help
- Choose this option if you have a SiS 630 or compatible video
- chipset. If M is selected the module will be called sis. AGP
- support is required for this driver to work.
-
-config DRM_VIA
- tristate "Via unichrome video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Via unichrome or compatible video
- chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
- tristate "Savage video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
- chipset. If M is selected the module will be called savage.
-
config DRM_VGEM
tristate "Virtual GEM provider"
depends on DRM
@@ -290,3 +222,81 @@ source "drivers/gpu/drm/arc/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
+
+# Keep legacy drivers last
+
+menuconfig DRM_LEGACY
+ bool "Enable legacy drivers (DANGEROUS)"
+ depends on DRM
+ help
+ Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
+ APIs to user-space, which can be used to circumvent access
+ restrictions and other security measures. For backwards compatibility
+ those drivers are still available, but their use is highly
+ inadvisable and might harm your system.
+
+ You are recommended to use the safe modeset-only drivers instead, and
+ perform 3D emulation in user-space.
+
+ Unless you have strong reasons to go rogue, say "N".
+
+if DRM_LEGACY
+
+config DRM_TDFX
+ tristate "3dfx Banshee/Voodoo3+"
+ depends on DRM && PCI
+ help
+ Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+ graphics card. If M is selected, the module will be called tdfx.
+
+config DRM_R128
+ tristate "ATI Rage 128"
+ depends on DRM && PCI
+ select FW_LOADER
+ help
+ Choose this option if you have an ATI Rage 128 graphics card. If M
+ is selected, the module will be called r128. AGP support for
+ this card is strongly suggested (unless you have a PCI version).
+
+config DRM_I810
+ tristate "Intel I810"
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+ help
+ Choose this option if you have an Intel I810 graphics card. If M is
+ selected, the module will be called i810. AGP support is required
+ for this driver to work.
+
+config DRM_MGA
+ tristate "Matrox g200/g400"
+ depends on DRM && PCI
+ select FW_LOADER
+ help
+ Choose this option if you have a Matrox G200, G400 or G450 graphics
+ card. If M is selected, the module will be called mga. AGP
+ support is required for this driver to work.
+
+config DRM_SIS
+ tristate "SiS video cards"
+ depends on DRM && AGP
+ depends on FB_SIS || FB_SIS=n
+ help
+ Choose this option if you have a SiS 630 or compatible video
+ chipset. If M is selected the module will be called sis. AGP
+ support is required for this driver to work.
+
+config DRM_VIA
+ tristate "Via unichrome video cards"
+ depends on DRM && PCI
+ help
+ Choose this option if you have a Via unichrome or compatible video
+ chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+ tristate "Savage video cards"
+ depends on DRM && PCI
+ help
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
+
+endif # DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0238bf8bc8c3..25c720454017 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,10 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
- drm_modeset_lock.o drm_atomic.o drm_bridge.o
+ drm_modeset_lock.o drm_atomic.o drm_bridge.o \
+ drm_framebuffer.o drm_connector.o drm_blend.o \
+ drm_encoder.o drm_mode_object.o drm_property.o \
+ drm_plane.o drm_color_mgmt.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -24,7 +27,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
- drm_simple_kms_helper.o drm_blend.o
+ drm_simple_kms_helper.o drm_modeset_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -46,7 +49,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_VC4) += vc4/
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 7335c0420c70..61360e27715f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,3 +1,10 @@
+config DRM_AMDGPU_SI
+ bool "Enable amdgpu support for SI parts"
+ depends on DRM_AMDGPU
+ help
+ Choose this option if you want to enable experimental support
+ for SI asics.
+
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
depends on DRM_AMDGPU
@@ -25,3 +32,4 @@ config DRM_AMDGPU_GART_DEBUGFS
Selecting this option creates a debugfs file to inspect the mapped
pages. Uses more memory for housekeeping, enable only for debugging.
+source "drivers/gpu/drm/amd/acp/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7fcdcedaadb..248a05d02917 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,13 +23,16 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
- amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
+ amdgpu_gtt_mgr.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
amdgpu_amdkfd_gfx_v7.o
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+
amdgpu-y += \
vi.o
@@ -50,15 +53,13 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_dpm.o \
amdgpu_powerplay.o \
- cz_smc.o cz_dpm.o \
- tonga_smc.o tonga_dpm.o \
- fiji_smc.o fiji_dpm.o \
- iceland_smc.o iceland_dpm.o
+ cz_smc.o cz_dpm.o
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o
+ dce_v11_0.o \
+ dce_virtual.o
# add GFX block
amdgpu-y += \
@@ -110,14 +111,10 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
-ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
-
include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
-endif
-
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..b8d66670bb17 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -90,6 +90,7 @@
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
+#define ENCODER_OBJECT_ID_VIRTUAL 0x28
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -119,6 +120,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17
/* deleted */
@@ -147,6 +149,7 @@
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
#define GRAPH_OBJECT_ENUM_ID7 0x07
+#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -408,6 +411,10 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
+#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT)
+
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 700c56baf2de..496f72b134eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -51,11 +51,13 @@
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_ttm.h"
#include "amdgpu_gds.h"
#include "amd_powerplay.h"
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
+#include "amdgpu_virt.h"
/*
* Modules parameters.
@@ -63,6 +65,7 @@
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
extern int amdgpu_gart_size;
+extern int amdgpu_moverate;
extern int amdgpu_benchmarking;
extern int amdgpu_testing;
extern int amdgpu_audio;
@@ -91,6 +94,9 @@ extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
extern char *amdgpu_disable_cu;
+extern int amdgpu_sclk_deep_sleep_en;
+extern char *amdgpu_virtual_display;
+extern unsigned amdgpu_pp_feature_mask;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -105,7 +111,7 @@ extern char *amdgpu_disable_cu;
#define AMDGPU_MAX_RINGS 16
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
-#define AMDGPU_MAX_VCE_RINGS 2
+#define AMDGPU_MAX_VCE_RINGS 3
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
@@ -248,10 +254,9 @@ struct amdgpu_vm_pte_funcs {
uint64_t pe, uint64_t src,
unsigned count);
/* write pte one entry at a time with addr mapping */
- void (*write_pte)(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr);
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -316,6 +321,10 @@ struct amdgpu_ring_funcs {
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
+ void (*emit_switch_buffer) (struct amdgpu_ring *ring);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+ unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring);
+ unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring);
};
/*
@@ -396,48 +405,8 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
- * TTM.
+ * BO.
*/
-
-#define AMDGPU_TTM_LRU_SIZE 20
-
-struct amdgpu_mman_lru {
- struct list_head *lru[TTM_NUM_MEM_TYPES];
- struct list_head *swap_lru;
-};
-
-struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_device bdev;
- bool mem_global_referenced;
- bool initialized;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
-#endif
-
- /* buffer handling */
- const struct amdgpu_buffer_funcs *buffer_funcs;
- struct amdgpu_ring *buffer_funcs_ring;
- /* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
-
- /* custom LRU management */
- struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
- /* guard for log2_size array, don't add anything in between */
- struct amdgpu_mman_lru guard;
-};
-
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
- struct reservation_object *resv,
- struct fence **fence);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
struct amdgpu_bo_list_entry {
struct amdgpu_bo *robj;
struct ttm_validate_buffer tv;
@@ -476,8 +445,6 @@ struct amdgpu_bo_va {
#define AMDGPU_GEM_DOMAIN_MAX 0x3
struct amdgpu_bo {
- /* Protected by gem.mutex */
- struct list_head list;
/* Protected by tbo.reserved */
u32 prefered_domains;
u32 allowed_domains;
@@ -492,6 +459,7 @@ struct amdgpu_bo {
u64 metadata_flags;
void *metadata;
u32 metadata_size;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
@@ -500,10 +468,12 @@ struct amdgpu_bo {
struct amdgpu_device *adev;
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
struct list_head mn_list;
+ struct list_head shadow_list;
};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
@@ -653,6 +623,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
/*
* GPU MC structures, functions & helpers
@@ -679,6 +650,8 @@ struct amdgpu_mc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint32_t srbm_soft_reset;
+ struct amdgpu_mode_mc_save save;
};
/*
@@ -723,13 +696,14 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
*/
struct amdgpu_flip_work {
- struct work_struct flip_work;
+ struct delayed_work flip_work;
struct work_struct unpin_work;
struct amdgpu_device *adev;
int crtc_id;
+ u32 target_vblank;
uint64_t base;
struct drm_pending_vblank_event *event;
- struct amdgpu_bo *old_rbo;
+ struct amdgpu_bo *old_abo;
struct fence *excl;
unsigned shared_count;
struct fence **shared;
@@ -817,13 +791,17 @@ struct amdgpu_ring {
/* maximum number of VMIDs */
#define AMDGPU_NUM_VM 16
+/* Maximum number of PTEs the hardware can write with one command */
+#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
+
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
-#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
+
+/* LOG2 number of continuous pages for the fragment field */
+#define AMDGPU_LOG2_PAGES_PER_FRAG 4
#define AMDGPU_PTE_VALID (1 << 0)
#define AMDGPU_PTE_SYSTEM (1 << 1)
@@ -835,10 +813,7 @@ struct amdgpu_ring {
#define AMDGPU_PTE_READABLE (1 << 5)
#define AMDGPU_PTE_WRITEABLE (1 << 6)
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define AMDGPU_PTE_FRAG_4KB (0 << 7)
-#define AMDGPU_PTE_FRAG_64KB (4 << 7)
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
@@ -848,6 +823,7 @@ struct amdgpu_ring {
struct amdgpu_vm_pt {
struct amdgpu_bo_list_entry entry;
uint64_t addr;
+ uint64_t shadow_addr;
};
struct amdgpu_vm {
@@ -950,7 +926,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
@@ -959,7 +934,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+ bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
@@ -994,6 +969,7 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
+ bool preamble_presented;
};
struct amdgpu_ctx_mgr {
@@ -1197,6 +1173,10 @@ struct amdgpu_gfx {
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
const struct amdgpu_gfx_funcs *funcs;
+
+ /* reset mask */
+ uint32_t grbm_soft_reset;
+ uint32_t srbm_soft_reset;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1249,11 +1229,16 @@ struct amdgpu_cs_parser {
struct fence *fence;
uint64_t bytes_moved_threshold;
uint64_t bytes_moved;
+ struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
};
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
+
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
@@ -1262,9 +1247,10 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */
+ uint32_t preamble_status;
uint32_t num_ibs;
void *owner;
- uint64_t ctx;
+ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
@@ -1685,6 +1671,7 @@ struct amdgpu_uvd {
bool address_64_bit;
bool use_ctx_buf;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1711,6 +1698,8 @@ struct amdgpu_vce {
struct amdgpu_irq_src irq;
unsigned harvest_config;
struct amd_sched_entity entity;
+ uint32_t srbm_soft_reset;
+ unsigned num_rings;
};
/*
@@ -1728,9 +1717,14 @@ struct amdgpu_sdma_instance {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+#ifdef CONFIG_DRM_AMDGPU_SI
+ //SI DMA has a difference trap irq number for the second engine
+ struct amdgpu_irq_src trap_irq_1;
+#endif
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
+ uint32_t srbm_soft_reset;
};
/*
@@ -1832,6 +1826,7 @@ struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+ void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1841,8 +1836,9 @@ struct amdgpu_asic_funcs {
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
- /* query virtual capabilities */
- u32 (*get_virtual_caps)(struct amdgpu_device *adev);
+ /* static power management */
+ int (*get_pcie_lanes)(struct amdgpu_device *adev);
+ void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
};
/*
@@ -1935,16 +1931,6 @@ struct amdgpu_atcs {
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
-
-/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
-struct amdgpu_virtualization {
- bool supports_sr_iov;
- bool is_virtual;
- u32 caps;
-};
-
/*
* Core structure, functions and helpers.
*/
@@ -1958,6 +1944,8 @@ struct amdgpu_ip_block_status {
bool valid;
bool sw;
bool hw;
+ bool late_initialized;
+ bool hang;
};
struct amdgpu_device {
@@ -2016,6 +2004,8 @@ struct amdgpu_device {
spinlock_t pcie_idx_lock;
amdgpu_rreg_t pcie_rreg;
amdgpu_wreg_t pcie_wreg;
+ amdgpu_rreg_t pciep_rreg;
+ amdgpu_wreg_t pciep_wreg;
/* protects concurrent UVD register access */
spinlock_t uvd_ctx_idx_lock;
amdgpu_rreg_t uvd_ctx_rreg;
@@ -2056,7 +2046,16 @@ struct amdgpu_device {
atomic64_t num_evictions;
atomic_t gpu_reset_counter;
+ /* data for buffer migration throttling */
+ struct {
+ spinlock_t lock;
+ s64 last_update_us;
+ s64 accum_us; /* accumulated microseconds */
+ u32 log2_max_MBps;
+ } mm_stats;
+
/* display */
+ bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
@@ -2119,6 +2118,14 @@ struct amdgpu_device {
struct kfd_dev *kfd;
struct amdgpu_virtualization virtualization;
+
+ /* link all shadow bo */
+ struct list_head shadow_list;
+ struct mutex shadow_list_lock;
+ /* link all gtt */
+ spinlock_t gtt_list_lock;
+ struct list_head gtt_list;
+
};
bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2151,6 +2158,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
+#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -2194,6 +2203,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define REG_GET_FIELD(value, reg, field) \
(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, field, val) \
+ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
@@ -2237,14 +2249,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
+#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
+#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
+#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
@@ -2259,9 +2274,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
+#define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r))
+#define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2293,6 +2312,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2344,11 +2368,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
-#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
@@ -2389,6 +2408,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
/* Common functions */
int amdgpu_gpu_reset(struct amdgpu_device *adev);
+bool amdgpu_need_backup(struct amdgpu_device *adev);
void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_card_posted(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
@@ -2397,7 +2417,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
u32 ip_instance, u32 ring,
struct amdgpu_ring **out_ring);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -2414,6 +2434,10 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
+int amdgpu_ttm_global_init(struct amdgpu_device *adev);
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -2425,11 +2449,13 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
+bool amdgpu_atpx_dgpu_req_power_for_displays(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
+static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
#endif
/*
@@ -2446,8 +2472,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -2493,6 +2519,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
struct amdgpu_bo_va_mapping *
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 892d60fb225b..2057683f7b59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
{
int i, ret;
struct device *dev;
-
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* return early if no ACP */
+ if (!adev->acp.acp_genpd)
+ return 0;
+
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b736a9de..5796539a0bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
+ }
/* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d0807a5b..dba8a5b25e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
return r;
}
-u32 pool_to_domain(enum kgd_memory_pool p)
-{
- switch (p) {
- case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
- default: return AMDGPU_GEM_DOMAIN_GTT;
- }
-}
-
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 362bedc9e507..1a0a5f7cccbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
+ unsigned int utimeout);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
unsigned int watch_point_id,
@@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
+ int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
+ if (timeout <= 0) {
+ pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
+ unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
+ int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout == 0)
+ if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 04b744d64b57..6697612239c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
+ unsigned int utimeout);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
@@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
+ unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
+ int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
@@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
+ if (timeout <= 0) {
+ pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
+ unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
+ int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout == 0)
+ if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index fe872b82e619..8e6bf548d689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -259,6 +259,33 @@ static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown
};
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 size, data_offset;
+ u8 frev, crev;
+ ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ ATOM_OBJECT_HEADER *obj_header;
+
+ if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
+
+ if (crev < 2)
+ return false;
+
+ obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+ path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usDisplayPathTableOffset));
+
+ if (path_obj->ucNumOfDispPath)
+ return true;
+ else
+ return false;
+}
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
@@ -964,6 +991,48 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
return -EINVAL;
switch (crev) {
+ case 2:
+ case 3:
+ case 5:
+ /* r6xx, r7xx, evergreen, ni, si.
+ * TODO: add support for asic_type <= CHIP_RV770*/
+ if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+ args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v3.ucPostDiv;
+ dividers->enable_post_div = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v3.ucRefDiv;
+ dividers->vco_mode = (args.v3.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ } else {
+ /* for SI we use ComputeMemoryClockParam for memory plls */
+ if (adev->asic_type >= CHIP_TAHITI)
+ return -EINVAL;
+ args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+ if (strobe_mode)
+ args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ dividers->post_div = args.v5.ucPostDiv;
+ dividers->enable_post_div = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+ dividers->enable_dithen = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+ dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+ dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+ dividers->ref_div = args.v5.ucRefDiv;
+ dividers->vco_mode = (args.v5.ucCntlFlag &
+ ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+ }
+ break;
case 4:
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
@@ -1108,6 +1177,32 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ u8 frev, crev;
+ u16 data_offset;
+ union firmware_info *firmware_info;
+
+ *vddc = 0;
+ *vddci = 0;
+ *mvdd = 0;
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2)) {
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+ *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
+ }
+ }
+}
+
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
@@ -1115,6 +1210,52 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
};
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ case 3:
+ args.v3.ucVoltageType = voltage_type;
+ args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+ args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx)
+{
+ return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
+}
+
void amdgpu_atombios_set_voltage(struct amdgpu_device *adev,
u16 voltage_level,
u8 voltage_type)
@@ -1335,6 +1476,50 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
return NULL;
}
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (adev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
u8 voltage_type, u8 voltage_mode)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8c2e69661799..17356151db38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev);
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
@@ -206,5 +208,19 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
+ u8 clock_type,
+ u32 clock,
+ bool strobe_mode,
+ struct atom_clock_dividers *dividers);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 10b5ddf2c588..dae35a96a694 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
bool is_hybrid;
+ bool dgpu_req_power_for_displays;
};
static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
return amdgpu_atpx_priv.atpx.is_hybrid;
}
+bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -204,6 +209,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = true;
}
+ atpx->dgpu_req_power_for_displays = false;
+ if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
+ atpx->dgpu_req_power_for_displays = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 33e47a43ae32..345305235349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+ r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+ false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 651115dcce12..c02db01f6583 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
+ entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index bc0440f7a31d..662976292535 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -616,7 +616,7 @@ static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, un
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
-int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
@@ -637,7 +637,7 @@ int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
return r;
}
-int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
+static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
return -EINVAL;
}
+static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ uint16_t fw_version;
+
+ switch (type) {
+ case CGS_UCODE_ID_SDMA0:
+ fw_version = adev->sdma.instance[0].fw_version;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ fw_version = adev->sdma.instance[1].fw_version;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ fw_version = adev->gfx.ce_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ fw_version = adev->gfx.pfp_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ fw_version = adev->gfx.me_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ fw_version = adev->gfx.rlc_fw_version;
+ break;
+ default:
+ DRM_ERROR("firmware type %d do not have version\n", type);
+ fw_version = 0;
+ }
+ return fw_version;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr;
info->image_size = data_size;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+ info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
char fw_name[30] = {0};
@@ -753,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TOPAZ:
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
+ strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
+ ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
+ strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
@@ -848,6 +899,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_GFX_SE_INFO:
sys_info->value = adev->gfx.config.max_shader_engines;
break;
+ case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
+ sys_info->value = adev->pdev->subsystem_device;
+ break;
+ case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
+ sys_info->value = adev->pdev->subsystem_vendor;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ff0b55a65ca3..086aa5c9c634 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -168,12 +168,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
}
/* Any defined maximum tmds clock limit we must not exceed? */
- if (connector->max_tmds_clock > 0) {
+ if (connector->display_info.max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = amdgpu_connector->pixelclock_for_modeset;
/* Maximum allowable input clock in kHz */
- max_tmds_clock = connector->max_tmds_clock * 1000;
+ max_tmds_clock = connector->display_info.max_tmds_clock;
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
@@ -765,12 +765,20 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->ddc_bus->has_aux)
+ if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = false;
+ }
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -824,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
@@ -934,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
@@ -1201,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1491,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1500,10 +1512,93 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
+static struct drm_encoder *
+amdgpu_connector_virtual_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+ int i;
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
+ continue;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ return encoder;
+ }
+
+ /* pick the first one */
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
+ return NULL;
+}
+
+static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
+
+ if (encoder) {
+ amdgpu_connector_add_common_modes(encoder, connector);
+ }
+
+ return 0;
+}
+
+static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static int
+amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+{
+ return 0;
+}
+
+static enum drm_connector_status
+
+amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void amdgpu_connector_virtual_force(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = {
+ .get_modes = amdgpu_connector_virtual_get_modes,
+ .mode_valid = amdgpu_connector_virtual_mode_valid,
+ .best_encoder = amdgpu_connector_virtual_encoder,
+};
+
+static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = {
+ .dpms = amdgpu_connector_virtual_dpms,
+ .detect = amdgpu_connector_virtual_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = amdgpu_connector_virtual_set_property,
+ .destroy = amdgpu_connector_destroy,
+ .force = amdgpu_connector_virtual_force,
+};
+
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
@@ -1888,6 +1983,17 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
+ if (!amdgpu_dig_connector)
+ goto failed;
+ amdgpu_connector->con_priv = amdgpu_dig_connector;
+ drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs);
+ subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0307ff5887c5..82dc8d20e28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -91,6 +91,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
uint32_t *offset)
{
struct drm_gem_object *gobj;
+ unsigned long size;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -101,6 +102,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
+
+ size = amdgpu_bo_size(p->uf_entry.robj);
+ if (size != PAGE_SIZE || (data->offset + 8) > size)
+ return -EINVAL;
+
*offset = data->offset;
drm_gem_object_unreference_unlocked(gobj);
@@ -235,70 +241,212 @@ free_chunk:
return ret;
}
-/* Returns how many bytes TTM can move per IB.
+/* Convert microseconds to bytes. */
+static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
+{
+ if (us <= 0 || !adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ /* Since accum_us is incremented by a million per second, just
+ * multiply it by the number of MB/s to get the number of bytes.
+ */
+ return us << adev->mm_stats.log2_max_MBps;
+}
+
+static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
+{
+ if (!adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ return bytes >> adev->mm_stats.log2_max_MBps;
+}
+
+/* Returns how many bytes TTM can move right now. If no bytes can be moved,
+ * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
+ * which means it can go over the threshold once. If that happens, the driver
+ * will be in debt and no other buffer migrations can be done until that debt
+ * is repaid.
+ *
+ * This approach allows moving a buffer of any size (it's important to allow
+ * that).
+ *
+ * The currency is simply time in microseconds and it increases as the clock
+ * ticks. The accumulated microseconds (us) are converted to bytes and
+ * returned.
*/
static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
{
- u64 real_vram_size = adev->mc.real_vram_size;
- u64 vram_usage = atomic64_read(&adev->vram_usage);
+ s64 time_us, increment_us;
+ u64 max_bytes;
+ u64 free_vram, total_vram, used_vram;
- /* This function is based on the current VRAM usage.
+ /* Allow a maximum of 200 accumulated ms. This is basically per-IB
+ * throttling.
*
- * - If all of VRAM is free, allow relocating the number of bytes that
- * is equal to 1/4 of the size of VRAM for this IB.
+ * It means that in order to get full max MBps, at least 5 IBs per
+ * second must be submitted and not more than 200ms apart from each
+ * other.
+ */
+ const s64 us_upper_bound = 200000;
- * - If more than one half of VRAM is occupied, only allow relocating
- * 1 MB of data for this IB.
- *
- * - From 0 to one half of used VRAM, the threshold decreases
- * linearly.
- * __________________
- * 1/4 of -|\ |
- * VRAM | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \________|1 MB
- * |----------------|
- * VRAM 0 % 100 %
- * used used
- *
- * Note: It's a threshold, not a limit. The threshold must be crossed
- * for buffer relocations to stop, so any buffer of an arbitrary size
- * can be moved as long as the threshold isn't crossed before
- * the relocation takes place. We don't want to disable buffer
- * relocations completely.
+ if (!adev->mm_stats.log2_max_MBps)
+ return 0;
+
+ total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
+ used_vram = atomic64_read(&adev->vram_usage);
+ free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+
+ spin_lock(&adev->mm_stats.lock);
+
+ /* Increase the amount of accumulated us. */
+ time_us = ktime_to_us(ktime_get());
+ increment_us = time_us - adev->mm_stats.last_update_us;
+ adev->mm_stats.last_update_us = time_us;
+ adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
+ us_upper_bound);
+
+ /* This prevents the short period of low performance when the VRAM
+ * usage is low and the driver is in debt or doesn't have enough
+ * accumulated us to fill VRAM quickly.
*
- * The idea is that buffers should be placed in VRAM at creation time
- * and TTM should only do a minimum number of relocations during
- * command submission. In practice, you need to submit at least
- * a dozen IBs to move all buffers to VRAM if they are in GTT.
+ * The situation can occur in these cases:
+ * - a lot of VRAM is freed by userspace
+ * - the presence of a big buffer causes a lot of evictions
+ * (solution: split buffers into smaller ones)
*
- * Also, things can get pretty crazy under memory pressure and actual
- * VRAM usage can change a lot, so playing safe even at 50% does
- * consistently increase performance.
+ * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
+ * accum_us to a positive number.
+ */
+ if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
+ s64 min_us;
+
+ /* Be more aggresive on dGPUs. Try to fill a portion of free
+ * VRAM now.
+ */
+ if (!(adev->flags & AMD_IS_APU))
+ min_us = bytes_to_us(adev, free_vram / 4);
+ else
+ min_us = 0; /* Reset accum_us on APUs. */
+
+ adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
+ }
+
+ /* This returns 0 if the driver is in debt to disallow (optional)
+ * buffer moves.
+ */
+ max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ spin_unlock(&adev->mm_stats.lock);
+ return max_bytes;
+}
+
+/* Report how many bytes have really been moved for the last command
+ * submission. This can result in a debt that can stop buffer migrations
+ * temporarily.
+ */
+static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev,
+ u64 num_bytes)
+{
+ spin_lock(&adev->mm_stats.lock);
+ adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+ spin_unlock(&adev->mm_stats.lock);
+}
+
+static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo *bo)
+{
+ u64 initial_bytes_moved;
+ uint32_t domain;
+ int r;
+
+ if (bo->pin_count)
+ return 0;
+
+ /* Don't move this buffer if we have depleted our allowance
+ * to move it. Don't move anything if the threshold is zero.
*/
+ if (p->bytes_moved < p->bytes_moved_threshold)
+ domain = bo->prefered_domains;
+ else
+ domain = bo->allowed_domains;
+
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+ }
- u64 half_vram = real_vram_size >> 1;
- u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
- u64 bytes_moved_threshold = half_free_vram >> 1;
- return max(bytes_moved_threshold, 1024*1024ull);
+ return r;
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
+/* Last resort, try to evict something from the current working set */
+static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
+ struct amdgpu_bo_list_entry *lobj)
+{
+ uint32_t domain = lobj->robj->allowed_domains;
+ int r;
+
+ if (!p->evictable)
+ return false;
+
+ for (;&p->evictable->tv.head != &p->validated;
+ p->evictable = list_prev_entry(p->evictable, tv.head)) {
+
+ struct amdgpu_bo_list_entry *candidate = p->evictable;
+ struct amdgpu_bo *bo = candidate->robj;
+ u64 initial_bytes_moved;
+ uint32_t other;
+
+ /* If we reached our current BO we can forget it */
+ if (candidate == lobj)
+ break;
+
+ other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+
+ /* Check if this BO is in one of the domains we need space for */
+ if (!(other & domain))
+ continue;
+
+ /* Check if we can move this BO somewhere else */
+ other = bo->allowed_domains & ~domain;
+ if (!other)
+ continue;
+
+ /* Good we can try to move this BO somewhere else */
+ amdgpu_ttm_placement_from_domain(bo, other);
+ initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
+ initial_bytes_moved;
+
+ if (unlikely(r))
+ break;
+
+ p->evictable = list_prev_entry(p->evictable, tv.head);
+ list_move(&candidate->tv.head, &p->validated);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
struct amdgpu_bo_list_entry *lobj;
- u64 initial_bytes_moved;
int r;
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj;
bool binding_userptr = false;
struct mm_struct *usermm;
- uint32_t domain;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm && usermm != current->mm)
@@ -313,35 +461,19 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
binding_userptr = true;
}
- if (bo->pin_count)
- continue;
-
- /* Avoid moving this one if we have moved too many buffers
- * for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if (p->bytes_moved <= p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
- domain = bo->allowed_domains;
-
- retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) -
- initial_bytes_moved;
+ if (p->evictable == lobj)
+ p->evictable = NULL;
- if (unlikely(r)) {
- if (r != -ERESTARTSYS && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
+ do {
+ r = amdgpu_cs_bo_validate(p, bo);
+ } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj));
+ if (r)
return r;
+
+ if (bo->shadow) {
+ r = amdgpu_cs_bo_validate(p, bo);
+ if (r)
+ return r;
}
if (binding_userptr) {
@@ -386,8 +518,11 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto error_free_pages;
+ }
/* Without a BO list we don't have userptr BOs */
if (!p->bo_list)
@@ -427,9 +562,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
/* Unreserve everything again. */
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- /* We tried to often, just abort */
+ /* We tried too many times, just abort */
if (!--tries) {
r = -EDEADLK;
+ DRM_ERROR("deadlock in %s\n", __func__);
goto error_free_pages;
}
@@ -441,11 +577,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page*));
if (!e->user_pages) {
r = -ENOMEM;
+ DRM_ERROR("calloc failure in %s\n", __func__);
goto error_free_pages;
}
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
+ DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
drm_free_large(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
@@ -460,14 +598,23 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
+ p->evictable = list_last_entry(&p->validated,
+ struct amdgpu_bo_list_entry,
+ tv.head);
r = amdgpu_cs_list_validate(p, &duplicates);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
goto error_validate;
+ }
r = amdgpu_cs_list_validate(p, &p->validated);
- if (r)
+ if (r) {
+ DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
goto error_validate;
+ }
+
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
@@ -499,8 +646,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
- if (p->uf_entry.robj)
- p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+ if (!r && p->uf_entry.robj) {
+ struct amdgpu_bo *uf = p->uf_entry.robj;
+
+ r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
+ p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+ }
error_validate:
if (r) {
@@ -617,7 +768,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
@@ -710,6 +861,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
+ if (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
+ parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+ if (!parser->ctx->preamble_presented) {
+ parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ parser->ctx->preamble_presented = true;
+ }
+ }
+
if (parser->job->ring && parser->job->ring != ring)
return -EINVAL;
@@ -849,7 +1008,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
job->owner = p->filp;
- job->ctx = entity->fence_context;
+ job->fence_ctx = entity->fence_context;
p->fence = fence_get(&job->base.s_fence->finished);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
@@ -1015,3 +1174,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
return NULL;
}
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+ unsigned i;
+ int r;
+
+ if (!parser->bo_list)
+ return 0;
+
+ for (i = 0; i < parser->bo_list->num_entries; i++) {
+ struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17e13621fae9..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -60,6 +63,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
+ ctx->fences = NULL;
return r;
}
return 0;
@@ -77,6 +81,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
+ ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
amd_sched_entity_fini(&adev->rings[i]->sched,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39c01b942ee4..3161d77bf299 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -41,16 +41,26 @@
#include "atom.h"
#include "amdgpu_atombios.h"
#include "amd_pcie.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "si.h"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h"
#endif
#include "vi.h"
#include "bif/bif_4_1_d.h"
+#include <linux/pci.h>
+#include <linux/firmware.h>
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
+ "TAHITI",
+ "PITCAIRN",
+ "VERDE",
+ "OLAND",
+ "HAINAN",
"BONAIRE",
"KAVERI",
"KABINI",
@@ -101,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -642,6 +652,33 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
}
+static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ if (amdgpu_passthrough(adev)) {
+ /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
+ * some old smc fw still need driver do vPost otherwise gpu hang, while
+ * those smc fw version above 22.15 doesn't have this flaw, so we force
+ * vpost executed for smc version below 22.15
+ */
+ if (adev->asic_type == CHIP_FIJI) {
+ int err;
+ uint32_t fw_ver;
+ err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
+ /* force vPost if error occured */
+ if (err)
+ return true;
+
+ fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ if (fw_ver < 0x00160e00)
+ return true;
+ }
+ }
+ return !amdgpu_card_posted(adev);
+}
+
/**
* amdgpu_dummy_page_init - init dummy page used by the driver
*
@@ -1026,7 +1063,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_resume_kms(dev, true, true);
+ amdgpu_device_resume(dev, true, true);
dev->pdev->d3_delay = d3_delay;
@@ -1036,7 +1073,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "amdgpu: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_suspend_kms(dev, true, true);
+ amdgpu_device_suspend(dev, true, true);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1181,10 +1218,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
+{
+ adev->enable_virtual_display = false;
+
+ if (amdgpu_virtual_display) {
+ struct drm_device *ddev = adev->ddev;
+ const char *pci_address_name = pci_name(ddev->pdev);
+ char *pciaddstr, *pciaddstr_tmp, *pciaddname;
+
+ pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
+ pciaddstr_tmp = pciaddstr;
+ while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
+ if (!strcmp(pci_address_name, pciaddname)) {
+ adev->enable_virtual_display = true;
+ break;
+ }
+ }
+
+ DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display);
+
+ kfree(pciaddstr);
+ }
+}
+
static int amdgpu_early_init(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_whether_enable_virtual_display(adev);
+
switch (adev->asic_type) {
case CHIP_TOPAZ:
case CHIP_TONGA:
@@ -1202,6 +1267,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r)
return r;
break;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ adev->family = AMDGPU_FAMILY_SI;
+ r = si_set_ip_blocks(adev);
+ if (r)
+ return r;
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -1318,19 +1395,25 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
- /* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
- }
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
+ adev->ip_block_status[i].late_initialized = true;
+ }
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
}
}
@@ -1341,6 +1424,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
{
int i, r;
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].hw)
+ continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ }
+ adev->ip_block_status[i].hw = false;
+ break;
+ }
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
continue;
@@ -1376,8 +1483,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_block_status[i].late_initialized)
+ continue;
if (adev->ip_blocks[i].funcs->late_fini)
adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ adev->ip_block_status[i].late_initialized = false;
}
return 0;
@@ -1433,13 +1543,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
-#ifdef CONFIG_X86
- return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
- return false;
-#endif
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
/**
@@ -1461,6 +1568,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
{
int r, i;
bool runtime = false;
+ u32 max_MBps;
adev->shutdown = false;
adev->dev = &pdev->dev;
@@ -1484,6 +1592,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->smc_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg = &amdgpu_invalid_rreg;
adev->pcie_wreg = &amdgpu_invalid_wreg;
+ adev->pciep_rreg = &amdgpu_invalid_rreg;
+ adev->pciep_wreg = &amdgpu_invalid_wreg;
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1520,9 +1630,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
+ spin_lock_init(&adev->mm_stats.lock);
+
+ INIT_LIST_HEAD(&adev->shadow_list);
+ mutex_init(&adev->shadow_list_lock);
+
+ INIT_LIST_HEAD(&adev->gtt_list);
+ spin_lock_init(&adev->gtt_list_lock);
+
+ if (adev->asic_type >= CHIP_BONAIRE) {
+ adev->rmmio_base = pci_resource_start(adev->pdev, 5);
+ adev->rmmio_size = pci_resource_len(adev->pdev, 5);
+ } else {
+ adev->rmmio_base = pci_resource_start(adev->pdev, 2);
+ adev->rmmio_size = pci_resource_len(adev->pdev, 2);
+ }
- adev->rmmio_base = pci_resource_start(adev->pdev, 5);
- adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) {
return -ENOMEM;
@@ -1530,8 +1653,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ /* doorbell bar mapping */
+ amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1579,25 +1703,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
- /* See if the asic supports SR-IOV */
- adev->virtualization.supports_sr_iov =
- amdgpu_atombios_has_gpu_virtualization_table(adev);
-
- /* Check if we are executing in a virtualized environment */
- adev->virtualization.is_virtual = amdgpu_device_is_virtual();
- adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+ /* detect if we are with an SRIOV vbios */
+ amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- (adev->virtualization.is_virtual &&
- !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+ if (amdgpu_vpost_needed(adev)) {
if (!adev->bios) {
- dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+ dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU not posted. posting now...\n");
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ DRM_INFO("GPU posting now...\n");
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r) {
+ dev_err(adev->dev, "gpu post error!\n");
+ goto failed;
+ }
+ } else {
+ DRM_INFO("GPU post is not needed\n");
}
/* Initialize clocks */
@@ -1628,6 +1751,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->accel_working = true;
+ /* Initialize the buffer migration limit. */
+ if (amdgpu_moverate >= 0)
+ max_MBps = amdgpu_moverate;
+ else
+ max_MBps = 8; /* Allow 8 MB/s. */
+ /* Get a log2 for easy divisions. */
+ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
+
amdgpu_fbdev_init(adev);
r = amdgpu_ib_pool_init(adev);
@@ -1732,7 +1863,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ amdgpu_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
amdgpu_debugfs_remove_files(adev);
}
@@ -1742,7 +1874,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
- * amdgpu_suspend_kms - initiate device suspend
+ * amdgpu_device_suspend - initiate device suspend
*
* @pdev: drm dev pointer
* @state: suspend state
@@ -1751,7 +1883,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
{
struct amdgpu_device *adev;
struct drm_crtc *crtc;
@@ -1814,11 +1946,16 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* evict remaining vram memory */
amdgpu_bo_evict_vram(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
+ } else {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ DRM_ERROR("amdgpu asic reset failed\n");
}
if (fbcon) {
@@ -1830,7 +1967,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
}
/**
- * amdgpu_resume_kms - initiate device resume
+ * amdgpu_device_resume - initiate device resume
*
* @pdev: drm dev pointer
*
@@ -1838,7 +1975,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private;
@@ -1848,22 +1985,27 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon) {
+ if (fbcon)
console_lock();
- }
+
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
+ r = pci_enable_device(dev->pdev);
+ if (r) {
if (fbcon)
console_unlock();
- return -1;
+ return r;
}
}
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (!amdgpu_card_posted(adev))
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!amdgpu_card_posted(adev) || !resume) {
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r)
+ DRM_ERROR("amdgpu asic init failed\n");
+ }
r = amdgpu_resume(adev);
if (r)
@@ -1937,6 +2079,135 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
return 0;
}
+static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+{
+ int i;
+ bool asic_hang = false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].funcs->check_soft_reset)
+ adev->ip_block_status[i].hang =
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("IP block:%d is hang!\n", i);
+ asic_hang = true;
+ }
+ }
+ return asic_hang;
+}
+
+static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->pre_soft_reset) {
+ r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static int amdgpu_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->soft_reset) {
+ r = adev->ip_blocks[i].funcs->soft_reset(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_block_status[i].hang &&
+ adev->ip_blocks[i].funcs->post_soft_reset)
+ r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ return amdgpu_lockup_timeout > 0 ? true : false;
+}
+
+static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct fence **fence)
+{
+ uint32_t domain;
+ int r;
+
+ if (!bo->shadow)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ return r;
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ /* if bo has been evicted, then no need to recover */
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+ NULL, fence, true);
+ if (r) {
+ DRM_ERROR("recover page table failed!\n");
+ goto err;
+ }
+ }
+err:
+ amdgpu_bo_unreserve(bo);
+ return r;
+}
+
/**
* amdgpu_gpu_reset - reset the asic
*
@@ -1949,6 +2220,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
int i, r;
int resched;
+ bool need_full_reset;
+
+ if (!amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
+ return 0;
+ }
atomic_inc(&adev->gpu_reset_counter);
@@ -1967,40 +2244,90 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(adev);
- /* save scratch */
- amdgpu_atombios_scratch_regs_save(adev);
- r = amdgpu_suspend(adev);
+ need_full_reset = amdgpu_need_full_reset(adev);
-retry:
- /* Disable fb access */
- if (adev->mode_info.num_crtc) {
- struct amdgpu_mode_mc_save save;
- amdgpu_display_stop_mc_access(adev, &save);
- amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!need_full_reset) {
+ amdgpu_pre_soft_reset(adev);
+ r = amdgpu_soft_reset(adev);
+ amdgpu_post_soft_reset(adev);
+ if (r || amdgpu_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
}
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (need_full_reset) {
+ r = amdgpu_suspend(adev);
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume(adev);
+retry:
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
+ /* post card */
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
+
+ if (!r) {
+ dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_resume(adev);
+ }
}
- /* restore scratch */
- amdgpu_atombios_scratch_regs_restore(adev);
if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(adev);
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ r = amdgpu_ttm_recover_gart(adev);
+ if (r)
+ DRM_ERROR("gart recovery failed!!!\n");
+ }
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
r = amdgpu_suspend(adev);
+ need_full_reset = true;
goto retry;
}
+ /**
+ * recovery vm page tables, since we cannot depend on VRAM is
+ * consistent after gpu full reset.
+ */
+ if (need_full_reset && amdgpu_need_backup(adev)) {
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_bo *bo, *tmp;
+ struct fence *fence = NULL, *next = NULL;
+
+ DRM_INFO("recover vram bo from shadow\n");
+ mutex_lock(&adev->shadow_list_lock);
+ list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r) {
+ WARN(r, "recovery from shadow isn't comleted\n");
+ break;
+ }
+ }
+ fence_put(fence);
+ fence = next;
+ }
+ mutex_unlock(&adev->shadow_list_lock);
+ if (fence) {
+ r = fence_wait(fence, false);
+ if (r)
+ WARN(r, "recovery from shadow isn't comleted\n");
+ }
+ fence_put(fence);
+ }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
+
amd_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
@@ -2020,7 +2347,6 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
- amdgpu_irq_gpu_reset_resume_helper(adev);
return r;
}
@@ -2178,22 +2504,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
- bool use_bank;
+ bool pm_pg_lock, use_bank;
unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
if (*pos & (1ULL << 62)) {
se_bank = (*pos >> 24) & 0x3FF;
sh_bank = (*pos >> 34) & 0x3FF;
instance_bank = (*pos >> 44) & 0x3FF;
use_bank = 1;
- *pos &= 0xFFFFFF;
} else {
use_bank = 0;
}
+ *pos &= 0x3FFFF;
+
if (use_bank) {
if (sh_bank >= adev->gfx.config.max_sh_per_se ||
se_bank >= adev->gfx.config.max_shader_engines)
@@ -2203,6 +2533,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
sh_bank, instance_bank);
}
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
while (size) {
uint32_t value;
@@ -2228,6 +2561,9 @@ end:
mutex_unlock(&adev->grbm_idx_mutex);
}
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
return result;
}
@@ -2385,7 +2721,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = RREG32_SMC(*pos >> 2);
+ value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
@@ -2416,7 +2752,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r)
return r;
- WREG32_SMC(*pos >> 2, value);
+ WREG32_SMC(*pos, value);
result += 4;
buf += 4;
@@ -2438,12 +2774,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
/* version, increment each time something is added */
- config[no_regs++] = 0;
+ config[no_regs++] = 2;
config[no_regs++] = adev->gfx.config.max_shader_engines;
config[no_regs++] = adev->gfx.config.max_tile_pipes;
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -2468,6 +2804,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
config[no_regs++] = adev->gfx.config.gb_addr_config;
config[no_regs++] = adev->gfx.config.num_rbs;
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
while (size && (*pos < no_regs * 4)) {
uint32_t value;
@@ -2488,6 +2833,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int idx, r;
+ int32_t value;
+
+ if (size != 4 || *pos & 0x3)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
+ else
+ return -EINVAL;
+
+ if (!r)
+ r = put_user(value, (int32_t *)buf);
+
+ return !r ? 4 : r;
+}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
@@ -2520,12 +2888,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2534,6 +2909,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
"amdgpu_gca_config",
+ "amdgpu_sensors",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 76f96028313d..083e2b429872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amdgpu_flip_work, cb);
fence_put(f);
- schedule_work(&work->flip_work);
+ schedule_work(&work->flip_work.work);
}
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
static void amdgpu_flip_work_func(struct work_struct *__work)
{
+ struct delayed_work *delayed_work =
+ container_of(__work, struct delayed_work, work);
struct amdgpu_flip_work *work =
- container_of(__work, struct amdgpu_flip_work, flip_work);
+ container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i, repcnt = 4;
- int vpos, hpos, stat, min_udelay = 0;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ unsigned i;
+ int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl))
return;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
if (amdgpu_flip_handle_fence(work, &work->shared[i]))
return;
- /* We borrow the event spin lock for protecting flip_status */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
- * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
*/
- while (amdgpuCrtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (amdgpuCrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(work->target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) {
+ schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
+ return;
}
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
+ /* We borrow the event spin lock for protecting flip_status */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
@@ -154,25 +123,25 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
- r = amdgpu_bo_reserve(work->old_rbo, false);
+ r = amdgpu_bo_reserve(work->old_abo, false);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_rbo);
+ r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
DRM_ERROR("failed to unpin buffer after flip\n");
}
- amdgpu_bo_unreserve(work->old_rbo);
+ amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
kfree(work->shared);
kfree(work);
}
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -181,7 +150,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
- struct amdgpu_bo *new_rbo;
+ struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
u64 base;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- INIT_WORK(&work->flip_work, amdgpu_flip_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
work->event = event;
@@ -204,28 +173,28 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
obj = old_amdgpu_fb->obj;
/* take a reference to the old object */
- work->old_rbo = gem_to_amdgpu_bo(obj);
- amdgpu_bo_ref(work->old_rbo);
+ work->old_abo = gem_to_amdgpu_bo(obj);
+ amdgpu_bo_ref(work->old_abo);
new_amdgpu_fb = to_amdgpu_framebuffer(fb);
obj = new_amdgpu_fb->obj;
- new_rbo = gem_to_amdgpu_bo(obj);
+ new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
- r = amdgpu_bo_reserve(new_rbo, false);
+ r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
- DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ DRM_ERROR("failed to reserve new abo buffer before flip\n");
goto cleanup;
}
- r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
+ r = amdgpu_bo_pin_restricted(new_abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
r = -EINVAL;
- DRM_ERROR("failed to pin new rbo buffer before flip\n");
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
- r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+ r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
@@ -233,16 +202,12 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
goto unpin;
}
- amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
+ amdgpu_bo_unreserve(new_abo);
work->base = base;
-
- r = drm_crtc_vblank_get(crtc);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
- goto vblank_cleanup;
+ goto pflip_cleanup;
}
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
@@ -262,26 +227,23 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work);
+ amdgpu_flip_work_func(&work->flip_work.work);
return 0;
-vblank_cleanup:
- drm_crtc_vblank_put(crtc);
-
pflip_cleanup:
- if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
- DRM_ERROR("failed to reserve new rbo in error path\n");
+ if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+ DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
+ DRM_ERROR("failed to unpin new abo in error path\n");
}
unreserve:
- amdgpu_bo_unreserve(new_rbo);
+ amdgpu_bo_unreserve(new_abo);
cleanup:
- amdgpu_bo_unref(&work->old_rbo);
+ amdgpu_bo_unref(&work->old_abo);
fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
fence_put(work->shared[i]);
@@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set)
return ret;
}
-static const char *encoder_names[38] = {
+static const char *encoder_names[41] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -374,6 +336,9 @@ static const char *encoder_names[38] = {
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
+ "HDMI_ANX9805",
+ "INTERNAL_AMCLK",
+ "VIRTUAL",
};
static const char *hpd_names[6] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9aa533cf4ad1..02ff0747197c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -53,13 +53,19 @@
* - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
* at the end of IBs.
* - 3.3.0 - Add VM support for UVD on supported hardware.
+ * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
+ * - 3.5.0 - Add support for new UVD_NO_OP register.
+ * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
+ * - 3.7.0 - Add support for VCE clock list packet
+ * - 3.8.0 - Add support raster config init in the kernel
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 3
+#define KMS_DRIVER_MINOR 8
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
+int amdgpu_moverate = -1; /* auto */
int amdgpu_benchmarking = 0;
int amdgpu_testing = 0;
int amdgpu_audio = -1;
@@ -84,11 +90,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
int amdgpu_powercontainment = 1;
+int amdgpu_sclk_deep_sleep_en = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff;
char *amdgpu_disable_cu = NULL;
+char *amdgpu_virtual_display = NULL;
+unsigned amdgpu_pp_feature_mask = 0xffffffff;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -96,6 +105,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
@@ -162,13 +174,17 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
-#endif
+
+MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+
+MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)");
+module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444);
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
@@ -185,7 +201,84 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)");
+module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
+ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
+ {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
+ {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
+ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
+ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
+ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+ {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -341,7 +434,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
kfree(ap);
return 0;
@@ -383,32 +476,70 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+static void
+amdgpu_pci_shutdown(struct pci_dev *pdev)
+{
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown.
+ * unfortunately we can't detect certain
+ * hypervisors so just do this all the time.
+ */
+ amdgpu_pci_remove(pdev);
+}
+
static int amdgpu_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_suspend_kms(drm_dev, true, true);
+ return amdgpu_device_suspend(drm_dev, true, true);
}
static int amdgpu_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_resume_kms(drm_dev, true, true);
+
+ /* GPU comes up enabled by the bios on resume */
+ if (amdgpu_device_is_px(drm_dev)) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return amdgpu_device_resume(drm_dev, true, true);
}
static int amdgpu_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_suspend_kms(drm_dev, false, true);
+ return amdgpu_device_suspend(drm_dev, false, true);
}
static int amdgpu_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return amdgpu_device_resume(drm_dev, false, true);
+}
+
+static int amdgpu_pmops_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ return amdgpu_device_suspend(drm_dev, true, true);
+}
+
+static int amdgpu_pmops_restore(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_resume_kms(drm_dev, false, true);
+ return amdgpu_device_resume(drm_dev, false, true);
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -426,7 +557,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
- ret = amdgpu_suspend_kms(drm_dev, false, false);
+ ret = amdgpu_device_suspend(drm_dev, false, false);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -459,7 +590,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- ret = amdgpu_resume_kms(drm_dev, false, false);
+ ret = amdgpu_device_resume(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
@@ -513,8 +644,8 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
.thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_freeze,
- .restore = amdgpu_pmops_resume,
+ .poweroff = amdgpu_pmops_poweroff,
+ .restore = amdgpu_pmops_restore,
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -596,6 +727,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.id_table = pciidlist,
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
+ .shutdown = amdgpu_pci_shutdown,
.driver.pm = &amdgpu_pm_ops,
};
@@ -603,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
- amdgpu_sync_init();
- amdgpu_fence_slab_init();
+ int r;
+
+ r = amdgpu_sync_init();
+ if (r)
+ goto error_sync;
+
+ r = amdgpu_fence_slab_init();
+ if (r)
+ goto error_fence;
+
+ r = amd_sched_fence_slab_init();
+ if (r)
+ goto error_sched;
+
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -616,6 +760,15 @@ static int __init amdgpu_init(void)
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
+
+error_sched:
+ amdgpu_fence_slab_fini();
+
+error_fence:
+ amdgpu_sync_fini();
+
+error_sync:
+ return r;
}
static void __exit amdgpu_exit(void)
@@ -624,6 +777,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 919146780a15..9fb8aa4d6bae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -48,8 +48,35 @@ struct amdgpu_fbdev {
struct amdgpu_device *adev;
};
+static int
+amdgpufb_open(struct fb_info *info, int user)
+{
+ struct amdgpu_fbdev *rfbdev = info->par;
+ struct amdgpu_device *adev = rfbdev->adev;
+ int ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+amdgpufb_release(struct fb_info *info, int user)
+{
+ struct amdgpu_fbdev *rfbdev = info->par;
+ struct amdgpu_device *adev = rfbdev->adev;
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return 0;
+}
+
static struct fb_ops amdgpufb_ops = {
.owner = THIS_MODULE,
+ .fb_open = amdgpufb_open,
+ .fb_release = amdgpufb_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -88,14 +115,14 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(gobj);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret;
- ret = amdgpu_bo_reserve(rbo, false);
+ ret = amdgpu_bo_reserve(abo, false);
if (likely(ret == 0)) {
- amdgpu_bo_kunmap(rbo);
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_kunmap(abo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
drm_gem_object_unreference_unlocked(gobj);
}
@@ -106,7 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
{
struct amdgpu_device *adev = rfbdev->adev;
struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *rbo = NULL;
+ struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
@@ -132,30 +159,30 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
if (fb_tiled)
tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
- ret = amdgpu_bo_reserve(rbo, false);
+ ret = amdgpu_bo_reserve(abo, false);
if (unlikely(ret != 0))
goto out_unref;
if (tiling_flags) {
- ret = amdgpu_bo_set_tiling_flags(rbo,
+ ret = amdgpu_bo_set_tiling_flags(abo,
tiling_flags);
if (ret)
dev_err(adev->dev, "FB failed to set tiling flags\n");
}
- ret = amdgpu_bo_pin_restricted(rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
+ ret = amdgpu_bo_pin_restricted(abo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, NULL);
if (ret) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
goto out_unref;
}
- ret = amdgpu_bo_kmap(rbo, NULL);
- amdgpu_bo_unreserve(rbo);
+ ret = amdgpu_bo_kmap(abo, NULL);
+ amdgpu_bo_unreserve(abo);
if (ret) {
goto out_unref;
}
@@ -177,7 +204,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *rbo = NULL;
+ struct amdgpu_bo *abo = NULL;
int ret;
unsigned long tmp;
@@ -196,7 +223,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
return ret;
}
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_fbi(helper);
@@ -219,7 +246,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
- memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
+ memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
strcpy(info->fix.id, "amdgpudrmfb");
@@ -228,11 +255,11 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(rbo) - adev->mc.vram_start;
+ tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
- info->fix.smem_len = amdgpu_bo_size(rbo);
- info->screen_base = rbo->kptr;
- info->screen_size = amdgpu_bo_size(rbo);
+ info->fix.smem_len = amdgpu_bo_size(abo);
+ info->screen_base = abo->kptr;
+ info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -249,7 +276,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(rbo));
+ DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@@ -259,7 +286,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
- if (rbo) {
+ if (abo) {
}
if (fb && ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 0b109aebfec6..77b34ec92632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void)
void amdgpu_fence_slab_fini(void)
{
+ rcu_barrier();
kmem_cache_destroy(amdgpu_fence_slab);
}
/*
@@ -454,6 +455,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
+ ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0feea347f680..21a1242fc13b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -238,7 +238,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = NULL;
#endif
page_base = adev->dummy_page.addr;
@@ -286,7 +286,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = pagelist[i];
#endif
if (adev->gart.ptr) {
@@ -331,7 +331,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
if (adev->gart.pages == NULL) {
@@ -357,7 +357,7 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
}
adev->gart.ready = false;
-#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index 503d54098128..e73728d90388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -31,14 +31,6 @@
#define AMDGPU_GWS_SHIFT PAGE_SHIFT
#define AMDGPU_OA_SHIFT PAGE_SHIFT
-#define AMDGPU_PL_GDS TTM_PL_PRIV0
-#define AMDGPU_PL_GWS TTM_PL_PRIV1
-#define AMDGPU_PL_OA TTM_PL_PRIV2
-
-#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0
-#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1
-#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2
-
struct amdgpu_ring;
struct amdgpu_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 88fbed2389c0..a7ea9a3b454e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -118,23 +118,23 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
*/
int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = rbo->adev;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = abo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
int r;
- r = amdgpu_bo_reserve(rbo, false);
+ r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
- bo_va = amdgpu_vm_bo_find(vm, rbo);
+ bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va) {
- bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
+ bo_va = amdgpu_vm_bo_add(adev, vm, abo);
} else {
++bo_va->ref_count;
}
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return 0;
}
@@ -528,7 +528,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error_unreserve;
if (operation == AMDGPU_VA_OP_MAP)
- r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
@@ -547,7 +547,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct ww_acquire_ctx ticket;
@@ -587,10 +587,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gem_to_amdgpu_bo(gobj);
+ abo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
- tv.bo = &rbo->tbo;
+ tv.bo = &abo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
@@ -604,7 +604,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return r;
}
- bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
+ bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
new file mode 100644
index 000000000000..f86c84427778
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+struct amdgpu_gtt_mgr {
+ struct drm_mm mm;
+ spinlock_t lock;
+ uint64_t available;
+};
+
+/**
+ * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
+ *
+ * @man: TTM memory type manager
+ * @p_size: maximum size of GTT
+ *
+ * Allocate and initialize the GTT manager.
+ */
+static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
+ unsigned long p_size)
+{
+ struct amdgpu_gtt_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ drm_mm_init(&mgr->mm, 0, p_size);
+ spin_lock_init(&mgr->lock);
+ mgr->available = p_size;
+ man->priv = mgr;
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_fini - free and destroy GTT manager
+ *
+ * @man: TTM memory type manager
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ if (!drm_mm_clean(&mgr->mm)) {
+ spin_unlock(&mgr->lock);
+ return -EBUSY;
+ }
+
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+ kfree(mgr);
+ man->priv = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_alloc - allocate new ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Allocate the address space for a node.
+ */
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node = mem->mm_node;
+ enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
+ enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+ unsigned long fpfn, lpfn;
+ int r;
+
+ if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ return 0;
+
+ if (place)
+ fpfn = place->fpfn;
+ else
+ fpfn = 0;
+
+ if (place && place->lpfn)
+ lpfn = place->lpfn;
+ else
+ lpfn = man->size;
+
+ if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
+ sflags = DRM_MM_SEARCH_BELOW;
+ aflags = DRM_MM_CREATE_TOP;
+ }
+
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
+ mem->page_alignment, 0,
+ fpfn, lpfn, sflags, aflags);
+ spin_unlock(&mgr->lock);
+
+ if (!r) {
+ mem->start = node->start;
+ if (&tbo->mem == mem)
+ tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
+ tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
+ }
+
+ return r;
+}
+
+/**
+ * amdgpu_gtt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Dummy, allocate the node but no space for it yet.
+ */
+static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node;
+ int r;
+
+ spin_lock(&mgr->lock);
+ if (mgr->available < mem->num_pages) {
+ spin_unlock(&mgr->lock);
+ return 0;
+ }
+ mgr->available -= mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->start = AMDGPU_BO_INVALID_OFFSET;
+ mem->mm_node = node;
+
+ if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
+ r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
+ if (unlikely(r)) {
+ kfree(node);
+ mem->mm_node = NULL;
+ }
+ } else {
+ mem->start = node->start;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (!node)
+ return;
+
+ spin_lock(&mgr->lock);
+ if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ drm_mm_remove_node(node);
+ mgr->available += mem->num_pages;
+ spin_unlock(&mgr->lock);
+
+ kfree(node);
+ mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_gtt_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @prefix: text prefix
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
+ const char *prefix)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ spin_lock(&mgr->lock);
+ drm_mm_debug_table(&mgr->mm, prefix);
+ spin_unlock(&mgr->lock);
+}
+
+const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
+ amdgpu_gtt_mgr_init,
+ amdgpu_gtt_mgr_fini,
+ amdgpu_gtt_mgr_new,
+ amdgpu_gtt_mgr_del,
+ amdgpu_gtt_mgr_debug
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 31a676376d73..91d367399956 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -158,8 +158,8 @@ static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
};
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name)
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name)
{
struct amdgpu_i2c_chan *i2c;
int ret;
@@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
+ if (ret)
goto out_free;
- }
} else {
/* set the amdgpu bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -222,6 +220,7 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
@@ -251,8 +250,8 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
/* Add additional buses */
void amdgpu_i2c_add(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name)
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name)
{
struct drm_device *dev = adev->ddev;
int i;
@@ -268,7 +267,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
/* looks up bus based on id */
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *i2c_bus)
+ const struct amdgpu_i2c_bus_rec *i2c_bus)
{
int i;
@@ -338,7 +337,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
/* ddc router switching */
void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
@@ -367,7 +366,7 @@ amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector)
/* clock/data router switching */
void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector)
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index d81e19b53973..63c2ff7499e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -25,20 +25,20 @@
#define __AMDGPU_I2C_H__
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name);
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
void amdgpu_i2c_add(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *rec,
- const char *name);
+ const struct amdgpu_i2c_bus_rec *rec,
+ const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
- struct amdgpu_i2c_bus_rec *i2c_bus);
+ const struct amdgpu_i2c_bus_rec *i2c_bus);
void
-amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *connector);
void
-amdgpu_i2c_router_select_cd_port(struct amdgpu_connector *amdgpu_connector);
+amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ec1282af2479..6a6c86c9c169 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -124,7 +124,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
- uint64_t ctx;
+ uint64_t fence_ctx;
+ uint32_t status = 0, alloc_size;
unsigned i;
int r = 0;
@@ -135,14 +136,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- ctx = job->ctx;
+ fence_ctx = job->fence_ctx;
} else {
vm = NULL;
- ctx = 0;
+ fence_ctx = 0;
}
if (!ring->ready) {
- dev_err(adev->dev, "couldn't schedule ib\n");
+ dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -151,7 +152,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- r = amdgpu_ring_alloc(ring, 256 * num_ibs);
+ alloc_size = amdgpu_ring_get_dma_frame_size(ring) +
+ num_ibs * amdgpu_ring_get_emit_ib_size(ring);
+
+ r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
@@ -174,13 +178,22 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
- skip_preamble = ring->current_ctx == ctx;
- need_ctx_switch = ring->current_ctx != ctx;
+ skip_preamble = ring->current_ctx == fence_ctx;
+ need_ctx_switch = ring->current_ctx != fence_ctx;
+ if (job && ring->funcs->emit_cntxcntl) {
+ if (need_ctx_switch)
+ status |= AMDGPU_HAVE_CTX_SWITCH;
+ status |= job->preamble_status;
+ amdgpu_ring_emit_cntxcntl(ring, status);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
/* drop preamble IBs if we don't have a context switch */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
+ skip_preamble &&
+ !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST))
continue;
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
@@ -209,7 +222,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
- ring->current_ctx = ctx;
+ ring->current_ctx = fence_ctx;
+ if (ring->funcs->emit_switch_buffer)
+ amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_commit(ring);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 534fc04e80fd..3ab4c65ecc8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
/* Allocate ring buffer */
if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &adev->irq.ih.ring_obj);
+ r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->irq.ih.ring_obj,
+ &adev->irq.ih.gpu_addr,
+ (void **)&adev->irq.ih.ring);
if (r) {
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->irq.ih.ring_obj,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->irq.ih.ring_obj,
- (void **)&adev->irq.ih.ring);
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- if (r) {
- DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r);
- return r;
- }
}
return 0;
}
@@ -136,8 +119,6 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
*/
void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
{
- int r;
-
if (adev->irq.ih.use_bus_addr) {
if (adev->irq.ih.ring) {
/* add 8 bytes for the rptr/wptr shadows and
@@ -149,17 +130,9 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
adev->irq.ih.ring = NULL;
}
} else {
- if (adev->irq.ih.ring_obj) {
- r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->irq.ih.ring_obj);
- amdgpu_bo_unpin(adev->irq.ih.ring_obj);
- amdgpu_bo_unreserve(adev->irq.ih.ring_obj);
- }
- amdgpu_bo_unref(&adev->irq.ih.ring_obj);
- adev->irq.ih.ring = NULL;
- adev->irq.ih.ring_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
+ &adev->irq.ih.gpu_addr,
+ (void **)&adev->irq.ih.ring);
amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 278708f5a744..9fa809876339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
adev->irq.installed = false;
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
return r;
}
@@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
if (adev->irq.msi_enabled)
pci_disable_msi(adev->pdev);
flush_work(&adev->hotplug_work);
+ cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 7ef09352e534..f016464035b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -70,6 +70,7 @@ struct amdgpu_irq {
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
+ uint32_t srbm_soft_reset;
};
void amdgpu_irq_preinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 6674d40eb3ab..8c5807994073 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -91,7 +91,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -124,7 +124,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
return r;
job->owner = owner;
- job->ctx = entity->fence_context;
+ job->fence_ctx = entity->fence_context;
*f = fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d942654a1de0..3938fca1ea8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0))
flags |= AMD_IS_PX;
@@ -292,14 +294,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
- for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++)
+ for (i = 0; i < adev->vce.num_rings; i++)
ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 1;
break;
default:
return -EINVAL;
@@ -373,6 +375,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_NUM_BYTES_MOVED:
ui64 = atomic64_read(&adev->num_bytes_moved);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_NUM_EVICTIONS:
+ ui64 = atomic64_read(&adev->num_evictions);
+ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
ui64 = atomic64_read(&adev->vram_usage);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
@@ -456,10 +461,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* return all clocks in KHz */
dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
- dev_info.max_engine_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
- dev_info.max_memory_clock =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10;
+ dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->pm.default_sclk * 10;
dev_info.max_memory_clock = adev->pm.default_mclk * 10;
@@ -539,12 +542,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (unlikely(!fpriv))
- return -ENOMEM;
+ if (unlikely(!fpriv)) {
+ r = -ENOMEM;
+ goto out_suspend;
+ }
r = amdgpu_vm_init(adev, &fpriv->vm);
- if (r)
- goto error_free;
+ if (r) {
+ kfree(fpriv);
+ goto out_suspend;
+ }
mutex_init(&fpriv->bo_list_lock);
idr_init(&fpriv->bo_list_handles);
@@ -553,12 +560,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
+out_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- return 0;
-
-error_free:
- kfree(fpriv);
return r;
}
@@ -597,6 +601,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
/**
@@ -611,6 +618,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ pm_runtime_get_sync(dev->dev);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6b1d7d306564..7b0eff7d060b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -39,6 +39,8 @@
#include <drm/drm_plane_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/hrtimer.h>
+#include "amdgpu_irq.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -339,6 +341,8 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ struct hrtimer vblank_timer;
+ enum amdgpu_interrupt_state vsync_timer_enabled;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
void amdgpu_print_display_setup(struct drm_device *dev);
int amdgpu_modeset_create_props(struct amdgpu_device *adev);
int amdgpu_crtc_set_config(struct drm_mode_set *set);
-int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6f0873c75a25..f3efb1c5dae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -38,20 +38,17 @@
#include "amdgpu_trace.h"
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
struct ttm_mem_reg *mem)
{
- u64 ret = 0;
- if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) {
- ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
- }
- return ret;
+ if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
+ return 0;
+
+ return ((mem->start << PAGE_SHIFT) + mem->size) >
+ adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
+ mem->size;
}
static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
@@ -99,6 +96,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
+ if (!list_empty(&bo->shadow_list)) {
+ mutex_lock(&bo->adev->shadow_list_lock);
+ list_del_init(&bo->shadow_list);
+ mutex_unlock(&bo->adev->shadow_list_lock);
+ }
kfree(bo->metadata);
kfree(bo);
}
@@ -112,90 +114,99 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
struct ttm_placement *placement,
- struct ttm_place *placements,
+ struct ttm_place *places,
u32 domain, u64 flags)
{
- u32 c = 0, i;
-
- placement->placement = placements;
- placement->busy_placement = placements;
+ u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+
if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS &&
- adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- placements[c].fpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
+ !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ places[c].fpfn = visible_pfn;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_TOPDOWN;
+ c++;
}
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
- if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
- placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ places[c].lpfn = visible_pfn;
+ else
+ places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_TT;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM |
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_SYSTEM;
+ if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
- } else {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
- }
+ else
+ places[c].flags |= TTM_PL_FLAG_CACHED;
+ c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GDS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_GWS;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ c++;
}
+
if (domain & AMDGPU_GEM_DOMAIN_OA) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- AMDGPU_PL_FLAG_OA;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ c++;
}
if (!c) {
- placements[c].fpfn = 0;
- placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ c++;
}
+
placement->num_placement = c;
- placement->num_busy_placement = c;
+ placement->placement = places;
- for (i = 0; i < c; i++) {
- if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
- (placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !placements[i].fpfn)
- placements[i].lpfn =
- adev->mc.visible_vram_size >> PAGE_SHIFT;
- else
- placements[i].lpfn = 0;
- }
+ placement->num_busy_placement = c;
+ placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
- amdgpu_ttm_placement_init(rbo->adev, &rbo->placement,
- rbo->placements, domain, rbo->flags);
+ amdgpu_ttm_placement_init(abo->adev, &abo->placement,
+ abo->placements, domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
@@ -211,6 +222,98 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
bo->placement.busy_placement = bo->placements;
}
+/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(*bo_ptr, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+ goto error_free;
+ }
+
+ r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+ goto error_unreserve;
+ }
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+
+error_unreserve:
+ amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+ amdgpu_bo_unref(bo_ptr);
+
+ return r;
+}
+
+/**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+ * @bo: amdgpu BO to free
+ *
+ * unmaps and unpin a BO for kernel internal use.
+ */
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+ void **cpu_addr)
+{
+ if (*bo == NULL)
+ return;
+
+ if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo);
+
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ }
+ amdgpu_bo_unref(bo);
+
+ if (gpu_addr)
+ *gpu_addr = 0;
+
+ if (cpu_addr)
+ *cpu_addr = NULL;
+}
+
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -249,7 +352,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
return r;
}
bo->adev = adev;
- INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
@@ -277,11 +380,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
if (unlikely(r != 0)) {
return r;
}
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ struct fence *fence;
+
+ if (adev->mman.buffer_funcs_ring == NULL ||
+ !adev->mman.buffer_funcs_ring->ready) {
+ r = -EBUSY;
+ goto fail_free;
+ }
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ goto fail_free;
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ goto fail_unreserve;
+
+ amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ amdgpu_bo_fence(bo, fence, false);
+ amdgpu_bo_unreserve(bo);
+ fence_put(bo->tbo.moving);
+ bo->tbo.moving = fence_get(fence);
+ fence_put(fence);
+ }
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
return 0;
+
+fail_unreserve:
+ amdgpu_bo_unreserve(bo);
+fail_free:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ struct amdgpu_bo *bo)
+{
+ struct ttm_placement placement = {0};
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
+
+ if (bo->shadow)
+ return 0;
+
+ bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
+ memset(&placements, 0,
+ (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+
+ amdgpu_ttm_placement_init(adev, &placement,
+ placements, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ NULL, &placement,
+ bo->tbo.resv,
+ &bo->shadow);
+ if (!r) {
+ bo->shadow->parent = amdgpu_bo_ref(bo);
+ mutex_lock(&adev->shadow_list_lock);
+ list_add_tail(&bo->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
+ }
+
+ return r;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -293,6 +464,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ int r;
memset(&placements, 0,
(AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
@@ -300,9 +472,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
placements, domain, flags);
- return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, bo_ptr);
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+ domain, flags, sg, &placement,
+ resv, bo_ptr);
+ if (r)
+ return r;
+
+ if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
+ r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+ if (r)
+ amdgpu_bo_unref(bo_ptr);
+ }
+
+ return r;
+}
+
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
+}
+
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct)
+
+{
+ struct amdgpu_bo *shadow = bo->shadow;
+ uint64_t bo_addr, shadow_addr;
+ int r;
+
+ if (!shadow)
+ return -EINVAL;
+
+ bo_addr = amdgpu_bo_gpu_offset(bo);
+ shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ goto err;
+
+ r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
+ amdgpu_bo_size(bo), resv, fence,
+ direct);
+ if (!r)
+ amdgpu_bo_fence(bo, *fence, true);
+
+err:
+ return r;
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -380,16 +626,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
if (bo->pin_count) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+
+ if (domain != amdgpu_mem_type_to_domain(mem_type))
+ return -EINVAL;
+
bo->pin_count++;
if (gpu_addr)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
if (max_offset != 0) {
- u64 domain_start;
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
- domain_start = bo->adev->mc.vram_start;
- else
- domain_start = bo->adev->mc.gtt_start;
+ u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
@@ -401,7 +648,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) {
+ (!max_offset || max_offset >
+ bo->adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
bo->adev->mc.visible_vram_size))
return -EINVAL;
@@ -420,19 +668,28 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size += amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
+ goto error;
+ }
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r)) {
+ dev_err(bo->adev->dev, "%p bind failed\n", bo);
+ goto error;
}
+
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ bo->adev->vram_pin_size += amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+ bo->adev->gart_pin_size += amdgpu_bo_size(bo);
+ }
+
+error:
return r;
}
@@ -457,16 +714,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
- } else
- bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
- } else {
+ if (unlikely(r)) {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
+ goto error;
}
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+
+error:
return r;
}
@@ -493,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -508,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -588,23 +854,23 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
- rbo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(rbo->adev, rbo);
+ abo = container_of(bo, struct amdgpu_bo, tbo);
+ amdgpu_vm_bo_invalidate(abo->adev, abo);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+ amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem);
- trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -637,7 +903,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
for (i = 0; i < abo->placement.num_placement; i++) {
/* Force into visible VRAM */
if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn))
+ (!abo->placements[i].lpfn ||
+ abo->placements[i].lpfn > lpfn))
abo->placements[i].lpfn = lpfn;
}
r = ttm_bo_validate(bo, &abo->placement, false, false);
@@ -674,3 +941,24 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
else
reservation_object_add_excl_fence(resv, fence);
}
+
+/**
+ * amdgpu_bo_gpu_offset - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
+{
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+ !amdgpu_ttm_is_bound(bo->tbo.ttm));
+ WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ !bo->pin_count);
+ WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
+
+ return bo->tbo.offset;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index bdb01d932548..8255034d73eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -31,6 +31,8 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -85,21 +87,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
ttm_bo_unreserve(&bo->tbo);
}
-/**
- * amdgpu_bo_gpu_offset - return GPU offset of bo
- * @bo: amdgpu object for which we query the offset
- *
- * Returns current GPU offset of the object.
- *
- * Note: object should either be pinned or reserved when calling this
- * function, it might be useful to add check for this for debugging.
- */
-static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
-{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- return bo->tbo.offset;
-}
-
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
return bo->tbo.num_pages << PAGE_SHIFT;
@@ -139,6 +126,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct ttm_placement *placement,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
+void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+ void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
@@ -165,6 +158,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
+u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
+int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct);
+int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct reservation_object *resv,
+ struct fence **fence,
+ bool direct);
+
/*
* sub allocation
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index d15314957732..8e67c1210d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "atom.h"
#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
#include <asm/div64.h>
#include <linux/gcd.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5cc7052e391d..accc908bdc88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1103,54 +1103,46 @@ force:
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ /* enable/disable UVD */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_uvd) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- /* enable/disable UVD */
- amdgpu_dpm_powergate_uvd(adev, !enable);
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.uvd_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
-
+ amdgpu_pm_compute_clocks(adev);
}
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled)
+ if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ /* enable/disable VCE */
+ mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
- else {
- if (adev->pm.funcs->powergate_vce) {
+ mutex_unlock(&adev->pm.mutex);
+ } else {
+ if (enable) {
mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.vce_active = false;
+ mutex_unlock(&adev->pm.mutex);
}
+ amdgpu_pm_compute_clocks(adev);
}
}
@@ -1330,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+{
+ int32_t value;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* GPU Clocks */
+ seq_printf(m, "GFX Clocks and Power:\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+ seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+ seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+ seq_printf(m, "\t%u mV (VDDGFX)\n", value);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+ seq_printf(m, "\t%u mV (VDDNB)\n", value);
+ seq_printf(m, "\n");
+
+ /* GPU Temp */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+ seq_printf(m, "GPU Temperature: %u C\n", value/1000);
+
+ /* GPU Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+ seq_printf(m, "GPU Load: %u %%\n", value);
+ seq_printf(m, "\n");
+
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
+
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
+ }
+
+ return 0;
+}
+
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1345,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) {
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level)
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index c5738a22b690..7532ff822aa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -30,6 +30,7 @@
#include "amdgpu_pm.h"
#include <drm/amdgpu_drm.h>
#include "amdgpu_powerplay.h"
+#include "si_dpm.h"
#include "cik_dpm.h"
#include "vi_dpm.h"
@@ -41,7 +42,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp = &(adev->powerplay);
if (adev->pp_enabled) {
-#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amd_pp_init *pp_init;
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
@@ -52,15 +52,21 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
- pp_init->powercontainment_enabled = amdgpu_powercontainment;
-
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
-#endif
} else {
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
@@ -72,15 +78,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break;
#endif
- case CHIP_TOPAZ:
- amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
- break;
- case CHIP_TONGA:
- amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
- break;
- case CHIP_FIJI:
- amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -98,19 +95,17 @@ static int amdgpu_pp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- adev->pp_enabled = true;
- break;
case CHIP_TONGA:
case CHIP_FIJI:
- adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+ case CHIP_TOPAZ:
+ adev->pp_enabled = true;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+ adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
break;
/* These chips don't have powerplay implemenations */
case CHIP_BONAIRE:
@@ -118,14 +113,10 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
- case CHIP_TOPAZ:
default:
adev->pp_enabled = false;
break;
}
-#else
- adev->pp_enabled = false;
-#endif
ret = amdgpu_powerplay_init(adev);
if (ret)
@@ -147,12 +138,11 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
-#endif
+
return ret;
}
@@ -165,10 +155,8 @@ static int amdgpu_pp_sw_init(void *handle)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
-#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled)
adev->pm.dpm_enabled = true;
-#endif
return ret;
}
@@ -219,7 +207,6 @@ static int amdgpu_pp_hw_fini(void *handle)
static void amdgpu_pp_late_fini(void *handle)
{
-#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled) {
@@ -230,7 +217,6 @@ static void amdgpu_pp_late_fini(void *handle)
if (adev->powerplay.ip_funcs->late_fini)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
-#endif
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 7700dc22f243..3826d5aea0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
return ERR_PTR(ret);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret = 0;
+ long ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
+ /*
+ * Wait for all shared fences to complete before we switch to future
+ * use of exclusive fence on this prime shared bo.
+ */
+ ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(ret < 0)) {
+ DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
+ amdgpu_bo_unreserve(bo);
+ return ret;
+ }
+
/* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
amdgpu_bo_unreserve(bo);
return ret;
}
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
return;
amdgpu_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
amdgpu_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 85aeb0a804bb..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &ring->ring_obj);
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = amdgpu_bo_reserve(ring->ring_obj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
- &ring->gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(ring->ring_obj);
- dev_err(adev->dev, "(%d) ring pin failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(ring->ring_obj,
- (void **)&ring->ring);
-
memset((void *)ring->ring, 0, ring->ring_size);
-
- amdgpu_bo_unreserve(ring->ring_obj);
- if (r) {
- dev_err(adev->dev, "(%d) ring map failed\n", r);
- return r;
- }
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->max_dw = max_dw;
@@ -269,29 +252,20 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- int r;
- struct amdgpu_bo *ring_obj;
-
- ring_obj = ring->ring_obj;
ring->ready = false;
- ring->ring = NULL;
- ring->ring_obj = NULL;
amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- if (ring_obj) {
- r = amdgpu_bo_reserve(ring_obj, false);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(ring_obj);
- amdgpu_bo_unpin(ring_obj);
- amdgpu_bo_unreserve(ring_obj);
- }
- amdgpu_bo_unref(&ring_obj);
- }
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
+
amdgpu_debugfs_ring_fini(ring);
+
+ ring->adev->rings[ring->idx] = NULL;
}
/*
@@ -371,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 05a53f4fc334..b827c75e95de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 0d8d65eb46cd..067e5e683bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -247,7 +247,7 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
-TRACE_EVENT(amdgpu_vm_set_page,
+TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
TP_ARGS(pe, addr, count, incr, flags),
@@ -271,6 +271,24 @@ TRACE_EVENT(amdgpu_vm_set_page,
__entry->flags, __entry->count)
);
+TRACE_EVENT(amdgpu_vm_copy_ptes,
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
+ TP_ARGS(pe, src, count),
+ TP_STRUCT__entry(
+ __field(u64, pe)
+ __field(u64, src)
+ __field(u32, count)
+ ),
+
+ TP_fast_assign(
+ __entry->pe = pe;
+ __entry->src = src;
+ __entry->count = count;
+ ),
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u",
+ __entry->pe, __entry->src, __entry->count)
+);
+
TRACE_EVENT(amdgpu_vm_flush,
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
TP_ARGS(pd_addr, ring, id),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 716f2afeb6a9..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -34,6 +34,7 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_memory.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
@@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
ttm_mem_global_release(ref->object);
}
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
+int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
@@ -88,10 +89,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
global_ref->init = &amdgpu_ttm_mem_global_init;
global_ref->release = &amdgpu_ttm_mem_global_release;
r = drm_global_item_ref(global_ref);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
- return r;
+ goto error_mem;
}
adev->mman.bo_global_ref.mem_glob =
@@ -102,26 +103,30 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = drm_global_item_ref(global_ref);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&adev->mman.mem_global_ref);
- return r;
+ goto error_bo;
}
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, amdgpu_sched_jobs);
- if (r != 0) {
+ if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- drm_global_item_unref(&adev->mman.mem_global_ref);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- return r;
+ goto error_entity;
}
adev->mman.mem_global_referenced = true;
return 0;
+
+error_entity:
+ drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+error_bo:
+ drm_global_item_unref(&adev->mman.mem_global_ref);
+error_mem:
+ return r;
}
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
@@ -155,7 +160,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
@@ -190,12 +195,13 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
static struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
+ unsigned i;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
@@ -204,28 +210,44 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- rbo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->adev->mman.buffer_funcs_ring->ready == false)
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
- else
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT);
+ if (abo->adev->mman.buffer_funcs_ring->ready == false) {
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else {
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 0; i < abo->placement.num_placement; ++i) {
+ if (!(abo->placements[i].flags &
+ TTM_PL_FLAG_TT))
+ continue;
+
+ if (abo->placements[i].lpfn)
+ continue;
+
+ /* set an upper limit to force directly
+ * allocating address space for the BO.
+ */
+ abo->placements[i].lpfn =
+ abo->adev->mc.gtt_size >> PAGE_SHIFT;
+ }
+ }
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
- *placement = rbo->placement;
+ *placement = abo->placement;
}
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&abo->gem_base.vma_node,
+ filp->private_data);
}
static void amdgpu_move_null(struct ttm_buffer_object *bo,
@@ -251,26 +273,30 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
- case TTM_PL_VRAM:
- old_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- old_start += adev->mc.gtt_start;
+ r = amdgpu_ttm_bind(bo, old_mem);
+ if (r)
+ return r;
+
+ case TTM_PL_VRAM:
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
switch (new_mem->mem_type) {
- case TTM_PL_VRAM:
- new_start += adev->mc.vram_start;
- break;
case TTM_PL_TT:
- new_start += adev->mc.gtt_start;
+ r = amdgpu_ttm_bind(bo, new_mem);
+ if (r)
+ return r;
+
+ case TTM_PL_VRAM:
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
+ new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -285,7 +311,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence);
+ bo->resv, &fence, false);
if (r)
return r;
@@ -314,7 +340,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = 0;
+ placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -335,7 +361,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -361,14 +387,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = 0;
+ placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -435,8 +461,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
if (r) {
return r;
}
@@ -524,15 +549,19 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ struct list_head list;
};
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
unsigned pinned = 0;
int r;
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ flags |= FOLL_WRITE;
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -555,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
list_add(&guptask.list, &gtt->guptasks);
spin_unlock(&gtt->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+ r = get_user_pages(userptr, num_pages, flags, p, NULL);
spin_lock(&gtt->guptasklock);
list_del(&guptask.list);
@@ -641,7 +670,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
int r;
if (gtt->userptr) {
@@ -651,7 +679,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
return r;
}
}
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
@@ -662,14 +689,71 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
+ return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ uint32_t flags;
+ int r;
+
+ if (!ttm || amdgpu_ttm_is_bound(ttm))
+ return 0;
+
+ r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
+ NULL, bo_mem);
+ if (r) {
+ DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
+ return r;
+ }
+
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- ttm->num_pages, (unsigned)gtt->offset);
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
return r;
}
+ spin_lock(&gtt->adev->gtt_list_lock);
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+ spin_unlock(&gtt->adev->gtt_list_lock);
+ return 0;
+}
+
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+{
+ struct amdgpu_ttm_tt *gtt, *tmp;
+ struct ttm_mem_reg bo_mem;
+ uint32_t flags;
+ int r;
+
+ bo_mem.mem_type = TTM_PL_TT;
+ spin_lock(&adev->gtt_list_lock);
+ list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
+ r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+ gtt->ttm.ttm.pages, gtt->ttm.dma_address,
+ flags);
+ if (r) {
+ spin_unlock(&adev->gtt_list_lock);
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ gtt->ttm.ttm.num_pages, gtt->offset);
+ return r;
+ }
+ }
+ spin_unlock(&adev->gtt_list_lock);
return 0;
}
@@ -677,12 +761,19 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ if (gtt->userptr)
+ amdgpu_ttm_tt_unpin_userptr(ttm);
+
+ if (!amdgpu_ttm_is_bound(ttm))
+ return 0;
+
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
if (gtt->adev->gart.ready)
amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
- if (gtt->userptr)
- amdgpu_ttm_tt_unpin_userptr(ttm);
+ spin_lock(&gtt->adev->gtt_list_lock);
+ list_del_init(&gtt->list);
+ spin_unlock(&gtt->adev->gtt_list_lock);
return 0;
}
@@ -720,6 +811,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
kfree(gtt);
return NULL;
}
+ INIT_LIST_HEAD(&gtt->list);
return &gtt->ttm.ttm;
}
@@ -991,10 +1083,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
unsigned i, j;
int r;
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
@@ -1159,7 +1247,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence)
+ struct fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1203,8 +1291,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
+ if (direct_submit) {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+ NULL, NULL, fence);
+ job->fence = fence_get(*fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+ }
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence)
+{
+ struct amdgpu_device *adev = bo->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+
+ uint32_t max_bytes, byte_count;
+ uint64_t dst_offset;
+ unsigned int num_loops, num_dw;
+ unsigned int i;
+ int r;
+
+ byte_count = bo->tbo.num_pages << PAGE_SHIFT;
+ max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ num_loops = DIV_ROUND_UP(byte_count, max_bytes);
+ num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* for IB padding */
+ while (num_dw & 0x7)
+ num_dw++;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ if (r)
+ return r;
+
+ if (resv) {
+ r = amdgpu_sync_resv(adev, &job->sync, resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
+ if (r) {
+ DRM_ERROR("sync failed (%d).\n", r);
+ goto error_free;
+ }
+ }
+
+ dst_offset = bo->tbo.mem.start << PAGE_SHIFT;
+ for (i = 0; i < num_loops; i++) {
+ uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
+ dst_offset, cur_size_in_bytes);
+
+ dst_offset += cur_size_in_bytes;
+ byte_count -= cur_size_in_bytes;
+ }
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
@@ -1395,3 +1554,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
#endif
}
+
+u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev)
+{
+ return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
new file mode 100644
index 000000000000..9812c805326c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_TTM_H__
+#define __AMDGPU_TTM_H__
+
+#include "gpu_scheduler.h"
+
+#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
+#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
+#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
+
+#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
+#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
+#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
+
+#define AMDGPU_TTM_LRU_SIZE 20
+
+struct amdgpu_mman_lru {
+ struct list_head *lru[TTM_NUM_MEM_TYPES];
+ struct list_head *swap_lru;
+};
+
+struct amdgpu_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *vram;
+ struct dentry *gtt;
+#endif
+
+ /* buffer handling */
+ const struct amdgpu_buffer_funcs *buffer_funcs;
+ struct amdgpu_ring *buffer_funcs_ring;
+ /* Scheduler entity for buffer moves */
+ struct amd_sched_entity entity;
+
+ /* custom LRU management */
+ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
+};
+
+extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
+
+int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem);
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count,
+ struct reservation_object *resv,
+ struct fence **fence, bool direct_submit);
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct reservation_object *resv,
+ struct fence **fence);
+
+int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cc95f1a7dab..cb3d252f3c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,40 +247,32 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- err = -ENOMEM;
goto failed;
}
err = amdgpu_bo_reserve(*bo, false);
if (err) {
- amdgpu_bo_unref(bo);
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed;
+ goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr);
if (err) {
- amdgpu_bo_unreserve(*bo);
- amdgpu_bo_unref(bo);
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed;
+ goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- amdgpu_bo_unpin(*bo);
- amdgpu_bo_unreserve(*bo);
- amdgpu_bo_unref(bo);
- goto failed;
+ goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
- fw_offset = 0;
for (i = 0; i < AMDGPU_UCODE_ID_MAXIMUM; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
@@ -290,10 +282,16 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
+ return 0;
+failed_kmap:
+ amdgpu_bo_unpin(*bo);
+failed_pin:
+ amdgpu_bo_unreserve(*bo);
+failed_reserve:
+ amdgpu_bo_unref(bo);
failed:
- if (err)
- adev->firmware.smu_load = false;
+ adev->firmware.smu_load = false;
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 4aa993d19018..e3281cacc586 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
- r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, &adev->uvd.vcpu_bo);
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (r) {
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->uvd.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- dev_err(adev->dev, "(%d) UVD bo pin failed\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) UVD map failed\n", r);
- return r;
- }
-
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-
ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
@@ -274,22 +249,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
- int r;
-
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- if (adev->uvd.vcpu_bo) {
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
-
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
- }
+ amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
+ &adev->uvd.gpu_addr,
+ (void **)&adev->uvd.cpu_addr);
amdgpu_ring_fini(&adev->uvd.ring);
@@ -323,7 +289,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.saved_bo)
return -ENOMEM;
- memcpy(adev->uvd.saved_bo, ptr, size);
+ memcpy_fromio(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -340,7 +306,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.cpu_addr;
if (adev->uvd.saved_bo != NULL) {
- memcpy(ptr, adev->uvd.saved_bo, size);
+ memcpy_toio(ptr, adev->uvd.saved_bo, size);
kfree(adev->uvd.saved_bo);
adev->uvd.saved_bo = NULL;
} else {
@@ -349,11 +315,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
+ memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ memset_io(ptr, 0, size);
}
return 0;
@@ -385,12 +351,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
}
-static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo)
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
{
int i;
- for (i = 0; i < rbo->placement.num_placement; ++i) {
- rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
- rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ for (i = 0; i < abo->placement.num_placement; ++i) {
+ abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
+ abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
}
}
@@ -843,6 +809,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
return r;
break;
case mmUVD_ENGINE_CNTL:
+ case mmUVD_NO_OP:
break;
default:
DRM_ERROR("Invalid reg 0x%X!\n", reg);
@@ -915,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
+ r = amdgpu_cs_sysvm_access_required(parser);
+ if (r)
+ return r;
+
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
@@ -981,8 +952,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->ptr[3] = addr >> 32;
ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
ib->ptr[5] = 0;
- for (i = 6; i < 16; ++i)
- ib->ptr[i] = PACKET2(0);
+ for (i = 6; i < 16; i += 2) {
+ ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
+ ib->ptr[i+1] = 0;
+ }
ib->length_dw = 16;
if (direct) {
@@ -1114,15 +1087,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
- unsigned i, fences, handles = 0;
-
- fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- ++handles;
+ unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- if (fences == 0 && handles == 0) {
+ if (fences == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 05865ce35351..7fe8fd884f06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -210,6 +210,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
*/
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
{
+ unsigned i;
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -217,8 +219,8 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
amdgpu_bo_unref(&adev->vce.vcpu_bo);
- amdgpu_ring_fini(&adev->vce.ring[0]);
- amdgpu_ring_fini(&adev->vce.ring[1]);
+ for (i = 0; i < adev->vce.num_rings; i++)
+ amdgpu_ring_fini(&adev->vce.ring[i]);
release_firmware(adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
@@ -282,8 +284,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(cpu_addr, (adev->vce.fw->data) + offset,
- (adev->vce.fw->size) - offset);
+ memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ adev->vce.fw->size - offset);
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
@@ -303,9 +305,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vce.idle_work.work);
+ unsigned i, count = 0;
+
+ for (i = 0; i < adev->vce.num_rings; i++)
+ count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
- if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
- (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
+ if (count == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
@@ -634,7 +639,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r = 0, idx = 0;
+ int i, r, idx = 0;
+
+ r = amdgpu_cs_sysvm_access_required(p);
+ if (r)
+ return r;
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
@@ -687,6 +696,21 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
case 0x04000008: /* rdo */
case 0x04000009: /* vui */
case 0x05000002: /* auxiliary buffer */
+ case 0x05000009: /* clock table */
+ break;
+
+ case 0x0500000c: /* hw config */
+ switch (p->adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_CARRIZO:
+ break;
+ default:
+ r = -EINVAL;
+ goto out;
+ }
break;
case 0x03000001: /* encode */
@@ -799,6 +823,18 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
amdgpu_ring_write(ring, VCE_CMD_END);
}
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* amdgpu_vce_ring_emit_ib */
+}
+
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
/**
* amdgpu_vce_ring_test_ring - test if VCE ring is working
*
@@ -850,8 +886,8 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct fence *fence = NULL;
long r;
- /* skip vce ring1 ib test for now, since it's not reliable */
- if (ring == &ring->adev->vce.ring[1])
+ /* skip vce ring1/2 ib test for now, since it's not reliable */
+ if (ring != &ring->adev->vce.ring[0])
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 63f83d0d985c..12729d2852df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -42,5 +42,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
+unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 88d68cb6e89d..2c37a374917f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -19,22 +19,39 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Author: Monk.liu@amd.com
*/
+#ifndef AMDGPU_VIRT_H
+#define AMDGPU_VIRT_H
-#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
-#define _POLARIS10_CLOCK_POWER_GATING_H_
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
+/* GPU virtualization */
+struct amdgpu_virtualization {
+ uint32_t virtual_caps;
+};
-#include "polaris10_hwmgr.h"
-#include "pp_asicblocks.h"
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id);
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
-#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 80120fa4092c..968c4260d7a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -51,19 +51,22 @@
* SI supports 16.
*/
-/* Special value that no flush is necessary */
-#define AMDGPU_VM_NO_FLUSH (~0ll)
-
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
-struct amdgpu_vm_update_params {
+struct amdgpu_pte_update_params {
+ /* amdgpu device we do this update for */
+ struct amdgpu_device *adev;
/* address where to copy page table entries from */
uint64_t src;
- /* DMA addresses to use for mapping */
- dma_addr_t *pages_addr;
/* indirect buffer to fill with commands */
struct amdgpu_ib *ib;
+ /* Function which actually does the update */
+ void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint32_t flags);
+ /* indicate update pt or its shadow */
+ bool shadow;
};
/**
@@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_update_pages - helper to call the right asic function
+ * amdgpu_vm_do_set_ptes - helper to call the right asic function
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -480,32 +482,46 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* Traces the parameters and calls the right asic functions
* to setup the page table using the DMA.
*/
-static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
+static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint32_t flags)
+{
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ if (count < 3) {
+ amdgpu_vm_write_pte(params->adev, params->ib, pe,
+ addr | flags, count, incr);
+
+ } else {
+ amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint32_t flags)
{
- trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
-
- if (vm_update_params->src) {
- amdgpu_vm_copy_pte(adev, vm_update_params->ib,
- pe, (vm_update_params->src + (addr >> 12) * 8), count);
+ uint64_t src = (params->src + (addr >> 12) * 8);
- } else if (vm_update_params->pages_addr) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib,
- vm_update_params->pages_addr,
- pe, addr, count, incr, flags);
- } else if (count < 3) {
- amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
- count, incr, flags);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
- } else {
- amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
- count, incr, flags);
- }
+ amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
}
/**
@@ -523,12 +539,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
unsigned entries;
uint64_t addr;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -539,6 +554,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (r)
+ goto error;
+
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
@@ -546,9 +565,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- vm_update_params.ib = &job->ibs[0];
- amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
- 0, 0);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
+ amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
@@ -577,55 +597,46 @@ error:
* Look up the physical address of the page that the pte resolves
* to and return the pointer for the page table entry.
*/
-uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
- if (pages_addr) {
- /* page table offset */
- result = pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ /* page table offset */
+ result = pages_addr[addr >> PAGE_SHIFT];
- } else {
- /* No mapping required */
- result = addr;
- }
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (~PAGE_MASK);
result &= 0xFFFFFFFFFFFFF000ULL;
return result;
}
-/**
- * amdgpu_vm_update_pdes - make sure that page directory is valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- *
- * Allocates new page tables if necessary
- * and updates the page directory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ bool shadow)
{
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = vm->page_directory;
- uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
+ struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow :
+ vm->page_directory;
+ uint64_t pd_addr;
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *fence = NULL;
int r;
- memset(&vm_update_params, 0, sizeof(vm_update_params));
+ if (!pd)
+ return 0;
+
+ r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem);
+ if (r)
+ return r;
+
+ pd_addr = amdgpu_bo_gpu_offset(pd);
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -638,7 +649,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -648,20 +661,34 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (bo == NULL)
continue;
+ if (bo->shadow) {
+ struct amdgpu_bo *shadow = bo->shadow;
+
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ }
+
pt = amdgpu_bo_gpu_offset(bo);
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
+ if (!shadow) {
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+ vm->page_tables[pt_idx].addr = pt;
+ } else {
+ if (vm->page_tables[pt_idx].shadow_addr == pt)
+ continue;
+ vm->page_tables[pt_idx].shadow_addr = pt;
+ }
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt)) {
+ ((last_pt + incr * count) != pt) ||
+ (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
if (count) {
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr,
- AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde,
+ last_pt, count, incr,
+ AMDGPU_PTE_VALID);
}
count = 1;
@@ -673,15 +700,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, &vm_update_params,
- last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (vm_update_params.ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ if (params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -703,92 +729,33 @@ error_free:
return r;
}
-/**
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+/*
+ * amdgpu_vm_update_pdes - make sure that page directory is valid
*
* @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
- * @pe_start: first PTE to handle
- * @pe_end: last PTE to handle
- * @addr: addr those PTEs should point to
- * @flags: hw mapping flags
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory.
+ * Returns 0 for success, error for failure.
*/
-static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
- uint64_t pe_start, uint64_t pe_end,
- uint64_t addr, uint32_t flags)
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- /**
- * The MC L1 TLB supports variable sized pages, based on a fragment
- * field in the PTE. When this field is set to a non-zero value, page
- * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
- * flags are considered valid for all PTEs within the fragment range
- * and corresponding mappings are assumed to be physically contiguous.
- *
- * The L1 TLB can store a single PTE for the whole fragment,
- * significantly increasing the space available for translation
- * caching. This leads to large improvements in throughput when the
- * TLB is under pressure.
- *
- * The L2 TLB distributes small and large fragments into two
- * asymmetric partitions. The large fragment cache is significantly
- * larger. Thus, we try to use large fragments wherever possible.
- * Userspace can support this by aligning virtual base address and
- * allocation size to the fragment size.
- */
-
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
- uint64_t frag_align = 0x80;
-
- uint64_t frag_start = ALIGN(pe_start, frag_align);
- uint64_t frag_end = pe_end & ~(frag_align - 1);
-
- unsigned count;
-
- /* Abort early if there isn't anything to do */
- if (pe_start == pe_end)
- return;
-
- /* system pages are non continuously */
- if (vm_update_params->src || vm_update_params->pages_addr ||
- !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
-
- count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
- addr, count, AMDGPU_GPU_PAGE_SIZE,
- flags);
- return;
- }
-
- /* handle the 4K area at the beginning */
- if (pe_start != frag_start) {
- count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- addr += AMDGPU_GPU_PAGE_SIZE * count;
- }
-
- /* handle the area in the middle */
- count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
- AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
+ int r;
- /* handle the 4K area at the end */
- if (frag_end != pe_end) {
- addr += AMDGPU_GPU_PAGE_SIZE * count;
- count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
- count, AMDGPU_GPU_PAGE_SIZE, flags);
- }
+ r = amdgpu_vm_update_pd_or_shadow(adev, vm, true);
+ if (r)
+ return r;
+ return amdgpu_vm_update_pd_or_shadow(adev, vm, false);
}
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
- * @adev: amdgpu_device pointer
- * @vm_update_params: see amdgpu_vm_update_params definition
+ * @params: see amdgpu_pte_update_params definition
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@@ -797,16 +764,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
*
* Update the page tables in the range @start - @end.
*/
-static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- struct amdgpu_vm_update_params
- *vm_update_params,
+static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t cur_pe_start, cur_pe_end, cur_dst;
+ uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
uint64_t pt_idx;
struct amdgpu_bo *pt;
@@ -817,7 +782,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
addr = start;
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
-
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
@@ -825,7 +794,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
- cur_pe_end = cur_pe_start + 8 * nptes;
+ cur_nptes = nptes;
cur_dst = dst;
/* for next ptb*/
@@ -836,6 +805,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
while (addr < end) {
pt_idx = addr >> amdgpu_vm_block_size;
pt = vm->page_tables[pt_idx].entry.robj;
+ if (params->shadow) {
+ if (!pt->shadow)
+ return;
+ pt = vm->page_tables[pt_idx].entry.robj->shadow;
+ }
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -845,19 +819,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
- if (cur_pe_end == next_pe_start) {
+ if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
+ ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
/* The next ptb is consecutive to current ptb.
- * Don't call amdgpu_vm_frag_ptes now.
+ * Don't call the update function now.
* Will update two ptbs together in future.
*/
- cur_pe_end += 8 * nptes;
+ cur_nptes += nptes;
} else {
- amdgpu_vm_frag_ptes(adev, vm_update_params,
- cur_pe_start, cur_pe_end,
- cur_dst, flags);
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
cur_pe_start = next_pe_start;
- cur_pe_end = next_pe_start + 8 * nptes;
+ cur_nptes = nptes;
cur_dst = dst;
}
@@ -866,8 +840,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
- cur_pe_end, cur_dst, flags);
+ params->func(params, cur_pe_start, cur_dst, cur_nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+}
+
+/*
+ * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ *
+ * @params: see amdgpu_pte_update_params definition
+ * @vm: requested vm
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @dst: addr those PTEs should point to
+ * @flags: hw mapping flags
+ */
+static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ */
+
+ /* SI and newer are optimized for 64KB */
+ uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
+ uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+
+ uint64_t frag_start = ALIGN(start, frag_align);
+ uint64_t frag_end = end & ~(frag_align - 1);
+
+ /* system pages are non continuously */
+ if (params->src || !(flags & AMDGPU_PTE_VALID) ||
+ (frag_start >= frag_end)) {
+
+ amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
+ return;
+ }
+
+ /* handle the 4K area at the beginning */
+ if (start != frag_start) {
+ amdgpu_vm_update_ptes(params, vm, start, frag_start,
+ dst, flags);
+ dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
+ }
+
+ /* handle the area in the middle */
+ amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
+ flags | frag_flags);
+
+ /* handle the 4K area at the end */
+ if (frag_end != end) {
+ dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
+ amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
+ }
}
/**
@@ -900,14 +941,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_vm_update_params vm_update_params;
+ struct amdgpu_pte_update_params params;
struct fence *f = NULL;
int r;
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
+
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- memset(&vm_update_params, 0, sizeof(vm_update_params));
- vm_update_params.src = src;
- vm_update_params.pages_addr = pages_addr;
+
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -924,30 +970,53 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if (vm_update_params.src) {
+ if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (vm_update_params.pages_addr) {
- /* header for write data commands */
- ndw += ncmds * 4;
+ params.func = amdgpu_vm_do_copy_ptes;
+
+ } else if (pages_addr) {
+ /* copy commands needed */
+ ndw += ncmds * 7;
- /* body of write data command */
+ /* and also PTEs */
ndw += nptes * 2;
+ params.func = amdgpu_vm_do_copy_ptes;
+
} else {
/* set page commands needed */
ndw += ncmds * 10;
/* two extra commands for begin/end of fragment */
ndw += 2 * 10;
+
+ params.func = amdgpu_vm_do_set_ptes;
}
r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
if (r)
return r;
- vm_update_params.ib = &job->ibs[0];
+ params.ib = &job->ibs[0];
+
+ if (!src && pages_addr) {
+ uint64_t *pte;
+ unsigned i;
+
+ /* Put the PTEs at the end of the IB. */
+ i = ndw - nptes * 2;
+ pte= (uint64_t *)&(job->ibs->ptr[i]);
+ params.src = job->ibs->gpu_addr + i * 4;
+
+ for (i = 0; i < nptes; ++i) {
+ pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
+ AMDGPU_GPU_PAGE_SIZE);
+ pte[i] |= flags;
+ }
+ addr = 0;
+ }
r = amdgpu_sync_fence(adev, &job->sync, exclusive);
if (r)
@@ -962,11 +1031,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
- last + 1, addr, flags);
+ params.shadow = true;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
+ params.shadow = false;
+ amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, vm_update_params.ib);
- WARN_ON(vm_update_params.ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, params.ib);
+ WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
@@ -1062,28 +1133,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
- * @mem: ttm mem
+ * @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
* Returns 0 for success, -EINVAL for failure.
- *
- * Object have to be reserved and mutex must be locked!
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ bool clear)
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct ttm_mem_reg *mem;
struct fence *exclusive;
uint64_t addr;
int r;
- if (mem) {
+ if (clear) {
+ mem = NULL;
+ addr = 0;
+ exclusive = NULL;
+ } else {
struct ttm_dma_tt *ttm;
+ mem = &bo_va->bo->tbo.mem;
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
@@ -1101,13 +1176,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
- } else {
- addr = 0;
- exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+ gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+ adev == bo_va->bo->adev) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -1134,7 +1207,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
- if (!mem)
+ if (clear)
list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
@@ -1197,7 +1270,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, true);
if (r)
return r;
@@ -1342,7 +1415,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, resv, &pt);
if (r)
goto error_free;
@@ -1354,10 +1428,20 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
r = amdgpu_vm_clear_bo(adev, vm, pt);
if (r) {
+ amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
goto error_free;
}
+ if (pt->shadow) {
+ r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
+ if (r) {
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ goto error_free;
+ }
+ }
+
entry->robj = pt;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
@@ -1541,7 +1625,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, NULL, &vm->page_directory);
if (r)
goto error_free_sched_entity;
@@ -1551,14 +1636,25 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
goto error_free_page_directory;
r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
- amdgpu_bo_unreserve(vm->page_directory);
if (r)
- goto error_free_page_directory;
+ goto error_unreserve;
+
+ if (vm->page_directory->shadow) {
+ r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
+ if (r)
+ goto error_unreserve;
+ }
+
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ amdgpu_bo_unreserve(vm->page_directory);
return 0;
+error_unreserve:
+ amdgpu_bo_unreserve(vm->page_directory);
+
error_free_page_directory:
+ amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
vm->page_directory = NULL;
@@ -1600,10 +1696,18 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(mapping);
}
- for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
- amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
+ for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
+ struct amdgpu_bo *pt = vm->page_tables[i].entry.robj;
+
+ if (!pt)
+ continue;
+
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ }
drm_free_large(vm->page_tables);
+ amdgpu_bo_unref(&vm->page_directory->shadow);
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
}
@@ -1654,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
fence_put(id->flushed_updates);
+ fence_put(id->last_flush);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49a39b1a0a96..f7d236f95e74 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -497,7 +497,13 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
- args.v6.ucPpll = ATOM_EXT_PLL1;
+ if (adev->asic_type == CHIP_TAHITI ||
+ adev->asic_type == CHIP_PITCAIRN ||
+ adev->asic_type == CHIP_VERDE ||
+ adev->asic_type == CHIP_OLAND)
+ args.v6.ucPpll = ATOM_PPLL0;
+ else
+ args.v6.ucPpll = ATOM_EXT_PLL1;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 7f85c2c1d681..f81068ba4cc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
/* timeout */
if (args.v2.ucReplyStatus == 1) {
- DRM_DEBUG_KMS("dp_aux_ch timeout\n");
r = -ETIMEDOUT;
goto done;
}
@@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
- for (i = 0; i < 7; i++) {
- ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg,
- DP_DPCD_SIZE);
- if (ret == DP_DPCD_SIZE) {
- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV,
+ msg, DP_DPCD_SIZE);
+ if (ret == DP_DPCD_SIZE) {
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
- dig_connector->dpcd);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
- amdgpu_atombios_dp_probe_oui(amdgpu_connector);
+ amdgpu_atombios_dp_probe_oui(amdgpu_connector);
- return 0;
- }
+ return 0;
}
+
dig_connector->dpcd[0] = 0;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index bc56c8a181e6..b374653bd6cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -27,6 +27,7 @@
#include "amdgpu.h"
#include "atom.h"
#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#define TARGET_HW_I2C_CLOCK 50
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a5c94b482459..5be788b269e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
} else {
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ if (pi->uvd_enabled) {
pi->uvd_enabled = false;
pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
amdgpu_ci_send_msg_to_smc_with_parameter(adev,
@@ -5396,7 +5396,7 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- ci_dpm_powergate_uvd(adev, false);
+ ci_dpm_powergate_uvd(adev, true);
if (!amdgpu_ci_is_smc_running(adev))
return;
@@ -5874,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
pi->mclk_strobe_mode_threshold = 40000;
pi->mclk_stutter_mode_threshold = 40000;
@@ -6033,7 +6036,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->caps_dynamic_ac_timing = true;
- pi->uvd_power_gated = false;
+ pi->uvd_power_gated = true;
/* make sure dc limits are valid */
if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
@@ -6176,8 +6179,6 @@ static int ci_dpm_late_init(void *handle)
if (ret)
return ret;
- ci_dpm_powergate_uvd(adev, true);
-
return 0;
}
@@ -6235,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4efc901f658c..a845b6a93b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -67,6 +67,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
+#include "dce_virtual.h"
/*
* Indirect registers accessor
@@ -962,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* CIK does not support SR-IOV */
- return 0;
-}
-
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
@@ -1640,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gfx_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cik_sdma_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 4,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &uvd_v4_2_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v2_0_ip_funcs,
+ },
+};
+
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- adev->ip_blocks = bonaire_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
- break;
- case CHIP_HAWAII:
- adev->ip_blocks = hawaii_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
- break;
- case CHIP_KAVERI:
- adev->ip_blocks = kaveri_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
- break;
- case CHIP_KABINI:
- adev->ip_blocks = kabini_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
- break;
- case CHIP_MULLINS:
- adev->ip_blocks = mullins_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ adev->ip_blocks = bonaire_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks);
+ break;
+ case CHIP_HAWAII:
+ adev->ip_blocks = hawaii_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks);
+ break;
+ case CHIP_KAVERI:
+ adev->ip_blocks = kaveri_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks);
+ break;
+ case CHIP_KABINI:
+ adev->ip_blocks = kabini_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks);
+ break;
+ case CHIP_MULLINS:
+ adev->ip_blocks = mullins_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -2015,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
+ .detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- .get_virtual_caps = &cik_get_virtual_caps,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 77fdd9911c3c..cb952acc7133 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -695,24 +695,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -720,39 +712,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
- SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -768,40 +748,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -887,6 +848,22 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 4; /* cik_sdma_ring_emit_ib */
+}
+
+static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* cik_sdma_ring_emit_hdp_flush */
+ 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 6 + /* cik_sdma_ring_emit_pipeline_sync */
+ 12 + /* cik_sdma_ring_emit_vm_flush */
+ 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
+}
+
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -1262,6 +1239,8 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
+ .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size,
+ .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00d62bc..8659852aea9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@ enum {
MTYPE_NONCACHED = 3
};
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 2a11413ed54a..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -44,6 +44,7 @@
static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
+static void cz_dpm_fini(struct amdgpu_device *adev);
static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps)
{
@@ -350,6 +351,8 @@ static int cz_parse_power_table(struct amdgpu_device *adev)
ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL);
if (ps == NULL) {
+ for (j = 0; j < i; j++)
+ kfree(adev->pm.dpm.ps[j].ps_priv);
kfree(adev->pm.dpm.ps);
return -ENOMEM;
}
@@ -409,11 +412,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
ret = amdgpu_get_platform_caps(adev);
if (ret)
- return ret;
+ goto err;
ret = amdgpu_parse_extended_power_table(adev);
if (ret)
- return ret;
+ goto err;
pi->sram_end = SMC_RAM_END;
@@ -435,7 +438,11 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->caps_td_ramping = true;
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->voting_clients = 0x00c00033;
pi->auto_thermal_throttling_enabled = true;
pi->bapm_enabled = false;
@@ -463,23 +470,26 @@ static int cz_dpm_init(struct amdgpu_device *adev)
ret = cz_parse_sys_info_table(adev);
if (ret)
- return ret;
+ goto err;
cz_patch_voltage_values(adev);
cz_construct_boot_state(adev);
ret = cz_parse_power_table(adev);
if (ret)
- return ret;
+ goto err;
ret = cz_process_firmware_header(adev);
if (ret)
- return ret;
+ goto err;
pi->dpm_enabled = true;
pi->uvd_dynamic_pg = false;
return 0;
+err:
+ cz_dpm_fini(adev);
+ return ret;
}
static void cz_dpm_fini(struct amdgpu_device *adev)
@@ -668,17 +678,12 @@ static void cz_reset_ap_mask(struct amdgpu_device *adev)
struct cz_power_info *pi = cz_get_pi(adev);
pi->active_process_mask = 0;
-
}
static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev,
void **table)
{
- int ret = 0;
-
- ret = cz_smu_download_pptable(adev, table);
-
- return ret;
+ return cz_smu_download_pptable(adev, table);
}
static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev)
@@ -818,9 +823,9 @@ static void cz_init_sclk_limit(struct amdgpu_device *adev)
pi->sclk_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].clk;
- else {
+ } else {
DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].clk;
}
@@ -846,9 +851,9 @@ static void cz_init_uvd_limit(struct amdgpu_device *adev)
pi->uvd_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].vclk;
- else {
+ } else {
DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].vclk;
}
@@ -874,9 +879,9 @@ static void cz_init_vce_limit(struct amdgpu_device *adev)
pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].ecclk;
- else {
+ } else {
/* future BIOS would fix this error */
DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].ecclk;
@@ -903,9 +908,9 @@ static void cz_init_acp_limit(struct amdgpu_device *adev)
pi->acp_dpm.hard_min_clk = 0;
cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel);
level = cz_get_argument(adev);
- if (level < table->count)
+ if (level < table->count) {
clock = table->entries[level].clk;
- else {
+ } else {
DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n");
clock = table->entries[table->count - 1].clk;
}
@@ -930,7 +935,6 @@ static void cz_init_sclk_threshold(struct amdgpu_device *adev)
struct cz_power_info *pi = cz_get_pi(adev);
pi->low_sclk_interrupt_threshold = 0;
-
}
static void cz_dpm_setup_asic(struct amdgpu_device *adev)
@@ -1203,7 +1207,7 @@ static int cz_enable_didt(struct amdgpu_device *adev, bool enable)
int ret;
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
- pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) {
ret = cz_disable_cgpg(adev);
if (ret) {
@@ -1277,7 +1281,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->force_high = false;
ps->need_dfs_bypass = true;
pi->video_start = new_rps->dclk || new_rps->vclk ||
- new_rps->evclk || new_rps->ecclk;
+ new_rps->evclk || new_rps->ecclk;
if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
@@ -1335,7 +1339,6 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
}
cz_reset_acp_boot_level(adev);
-
cz_update_current_ps(adev, adev->pm.dpm.boot_ps);
return 0;
@@ -1511,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
return 0;
}
-/* borrowed from KV, need future unify */
static int cz_dpm_get_temperature(struct amdgpu_device *adev)
{
int actual_temp = 0;
- uint32_t temp = RREG32_SMC(0xC0300E0C);
+ uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+ uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
- if (temp)
+ if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = 1000 * ((temp / 8) - 49);
+ else
+ actual_temp = 1000 * (temp / 8);
return actual_temp;
}
@@ -1665,7 +1670,6 @@ static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
struct amdgpu_ps *ps = &pi->requested_rps;
cz_update_current_ps(adev, ps);
-
}
static int cz_dpm_force_highest(struct amdgpu_device *adev)
@@ -2108,29 +2112,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
/* disable clockgating so we can properly shut down the block */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n");
+ return;
+ }
+
/* shutdown the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
- /* XXX: check for errors */
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
- if (pi->caps_uvd_pg)
+ if (pi->caps_uvd_pg) {
/* power off the UVD block */
- cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF);
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n");
+ return;
+ }
+ }
} else {
if (pi->caps_uvd_pg) {
/* power on the UVD block */
if (pi->uvd_dynamic_pg)
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1);
else
- cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+ ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n");
+ return;
+ }
+
/* re-init the UVD block */
ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
+
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n");
+ return;
+ }
+
/* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
- /* XXX: check for errors */
+ if (ret) {
+ DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n");
+ return;
+ }
}
cz_update_uvd_dpm(adev, gate);
}
@@ -2168,7 +2201,6 @@ static int cz_update_vce_dpm(struct amdgpu_device *adev)
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (pi->caps_stable_power_state) {
pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk;
-
} else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */
/* leave it as set by user */
/*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index ac7fee7b7eca..aed7033c0973 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -29,6 +29,8 @@
#include "cz_smumgr.h"
#include "smu_ucode_xfer_cz.h"
#include "amdgpu_ucode.h"
+#include "cz_dpm.h"
+#include "vi_dpm.h"
#include "smu/smu_8_0_d.h"
#include "smu/smu_8_0_sh_mask.h"
@@ -48,7 +50,7 @@ static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
return priv;
}
-int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
+static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
{
int i;
u32 content = 0, tmp;
@@ -99,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
return 0;
}
-int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
- u16 msg, u32 parameter)
-{
- WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc_async(adev, msg);
-}
-
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
u16 msg, u32 parameter)
{
@@ -140,7 +135,7 @@ int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
return 0;
}
-int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
u32 value, u32 limit)
{
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c1b04e9aab57..9260caef74fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
@@ -425,16 +427,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -458,6 +450,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -646,8 +651,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
if (save->crtc_enabled[i]) {
tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
+ if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
+ tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
@@ -712,6 +717,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ num_crtc = 6;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v10_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v10_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2063,7 +2107,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2071,6 +2115,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2090,23 +2135,23 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -2182,8 +2227,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2275,17 +2321,17 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2698,7 +2744,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.gamma_set = dce_v10_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2765,16 +2811,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2962,10 +3008,11 @@ static int dce_v10_0_early_init(void *handle)
dce_v10_0_set_display_funcs(adev);
dce_v10_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_TONGA:
- adev->mode_info.num_crtc = 6; /* XXX 7??? */
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
@@ -3104,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v10_0_hw_fini(handle);
}
@@ -3118,8 +3161,6 @@ static int dce_v10_0_resume(void *handle)
ret = dce_v10_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3141,6 +3182,13 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
+static bool dce_v10_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return dce_v10_0_is_display_hung(adev);
+}
+
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@@ -3512,6 +3560,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
+ .check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
index 1bfa48ddd8a6..e3dc04d293e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v10_0_ip_funcs;
+void dce_v10_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d4bf133908b1..367739bd1927 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -673,6 +676,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ num_crtc = 3;
+ break;
+ case CHIP_STONEY:
+ num_crtc = 2;
+ break;
+ case CHIP_POLARIS10:
+ num_crtc = 6;
+ break;
+ case CHIP_POLARIS11:
+ num_crtc = 5;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v11_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v11_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -2038,7 +2088,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
@@ -2046,6 +2096,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2065,23 +2116,23 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -2157,8 +2208,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2250,17 +2302,17 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2708,7 +2760,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.gamma_set = dce_v11_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2775,16 +2827,16 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2999,24 +3051,22 @@ static int dce_v11_0_early_init(void *handle)
dce_v11_0_set_display_funcs(adev);
dce_v11_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_CARRIZO:
- adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_STONEY:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
- adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3109,6 +3159,7 @@ static int dce_v11_0_sw_fini(void *handle)
dce_v11_0_afmt_fini(adev);
+ drm_mode_config_cleanup(adev->ddev);
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3164,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v11_0_hw_fini(handle);
}
@@ -3178,8 +3225,6 @@ static int dce_v11_0_resume(void *handle)
ret = dce_v11_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
index 84e4618f5253..1f58a65ba2ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v11_0_ip_funcs;
+void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
new file mode 100644
index 000000000000..15f9fc0514b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -0,0 +1,3170 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_atombios.h"
+#include "atombios_crtc.h"
+#include "atombios_encoders.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#include "si/si_reg.h"
+#include "si/sid.h"
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+
+static const u32 crtc_offsets[6] =
+{
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET
+};
+
+static const uint32_t dig_offsets[] = {
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET,
+ (0x13830 - 0x7030) >> 2,
+};
+
+static const struct {
+ uint32_t reg;
+ uint32_t vblank;
+ uint32_t vline;
+ uint32_t hpd;
+
+} interrupt_status_offsets[6] = { {
+ .reg = DISP_INTERRUPT_STATUS,
+ .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
+}, {
+ .reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+ .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
+ .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
+ .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
+} };
+
+static const uint32_t hpd_int_control_offsets[6] = {
+ DC_HPD1_INT_CONTROL,
+ DC_HPD2_INT_CONTROL,
+ DC_HPD3_INT_CONTROL,
+ DC_HPD4_INT_CONTROL,
+ DC_HPD5_INT_CONTROL,
+ DC_HPD6_INT_CONTROL,
+};
+
+static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
+ return 0;
+}
+
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
+}
+
+static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
+{
+ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+ return true;
+ else
+ return false;
+}
+
+static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
+{
+ u32 pos1, pos2;
+
+ pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ if (pos1 != pos2)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_wait_for_vblank - vblank wait asic callback.
+ *
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ unsigned i = 100;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return;
+
+ if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+ return;
+
+ /* depending on when we hit vblank, we may be close to active; if so,
+ * wait for another frame.
+ */
+ while (dce_v6_0_is_in_vblank(adev, crtc)) {
+ if (i++ == 100) {
+ i = 0;
+ if (!dce_v6_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+
+ while (!dce_v6_0_is_in_vblank(adev, crtc)) {
+ if (i++ == 100) {
+ i = 0;
+ if (!dce_v6_0_is_counter_moving(adev, crtc))
+ break;
+ }
+ }
+}
+
+static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ /* Enable pflip interrupts */
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
+ amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ /* Disable pflip interrupts */
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
+ amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
+/**
+ * dce_v6_0_page_flip - pageflip callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+static void dce_v6_0_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base, bool async)
+{
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ /* flip at hsync for async, default is vsync */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+ EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* post the write */
+ RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+}
+
+static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ *vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+ *position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+ return 0;
+
+}
+
+/**
+ * dce_v6_0_hpd_sense - hpd sense callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case AMDGPU_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+
+ return connected;
+}
+
+/**
+ * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = dce_v6_0_hpd_sense(adev, hpd);
+
+ switch (hpd) {
+ case AMDGPU_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * dce_v6_0_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+ DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
+ dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
+ amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+
+}
+
+/**
+ * dce_v6_0_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ break;
+ case AMDGPU_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+ amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
+
+static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return SI_DC_GPIO_HPD_A;
+}
+
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
+{
+ DRM_INFO("xxxx: dce_v6_0_is_display_hung ----no imp!!!!!\n");
+
+ return true;
+}
+
+static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 crtc_enabled, tmp, frame_count;
+ int i, j;
+
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+
+ /* blank the display controllers */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ if (crtc_enabled) {
+ save->crtc_enabled[i] = true;
+ tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+
+ if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+ dce_v6_0_vblank_wait(adev, i);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ /* wait for the next frame */
+ frame_count = evergreen_get_vblank_counter(adev, i);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (evergreen_get_vblank_counter(adev, i) != frame_count)
+ break;
+ udelay(1);
+ }
+
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ save->crtc_enabled[i] = false;
+ /* ***** */
+ } else {
+ save->crtc_enabled[i] = false;
+ }
+ }
+}
+
+static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp;
+ int i, j;
+
+ /* update crtc base addresses */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)adev->mc.vram_start);
+ }
+
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+
+ /* unlock regs and wait for update */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+ if ((tmp & 0x7) != 3) {
+ tmp &= ~0x7;
+ tmp |= 0x3;
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+ }
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+ if (tmp & 1) {
+ tmp &= ~1;
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+ }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+ break;
+ udelay(1);
+ }
+ }
+ }
+
+ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+
+}
+
+static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ if (!render)
+ WREG32(R_000300_VGA_RENDER_CONTROL,
+ RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+
+}
+
+static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ int bpc = 0;
+ u32 tmp = 0;
+ enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ bpc = amdgpu_connector_get_monitor_bpc(connector);
+ dither = amdgpu_connector->dither;
+ }
+
+ /* LVDS FMT is set up by atom */
+ if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return;
+
+ if (bpc == 0)
+ return;
+
+
+ switch (bpc) {
+ case 6:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN);
+ else
+ tmp |= FMT_TRUNCATE_EN;
+ break;
+ case 8:
+ if (dither == AMDGPU_FMT_DITHER_ENABLE)
+ /* XXX sort out optimal dither settings */
+ tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+ FMT_RGB_RANDOM_ENABLE |
+ FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+ else
+ tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+ break;
+ case 10:
+ default:
+ /* not needed */
+ break;
+ }
+
+ WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+}
+
+/**
+ * cik_get_number_of_dram_channels - get the number of dram channels
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up the number of video ram channels (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the number of dram channels
+ */
+static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+ switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
+ case 0:
+ default:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 3;
+ case 5:
+ return 6;
+ case 6:
+ return 10;
+ case 7:
+ return 12;
+ case 8:
+ return 16;
+ }
+}
+
+struct dce6_wm_params {
+ u32 dram_channels; /* number of dram channels */
+ u32 yclk; /* bandwidth per dram data pin in kHz */
+ u32 sclk; /* engine clock in kHz */
+ u32 disp_clk; /* display clock in kHz */
+ u32 src_width; /* viewport width */
+ u32 active_time; /* active display time in ns */
+ u32 blank_time; /* blank time in ns */
+ bool interlaced; /* mode is interlaced */
+ fixed20_12 vsc; /* vertical scale ratio */
+ u32 num_heads; /* number of active crtcs */
+ u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+ u32 lb_size; /* line buffer allocated to pipe */
+ u32 vtaps; /* vertical scaler taps */
+};
+
+/**
+ * dce_v6_0_dram_bandwidth - get the dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the raw dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate raw DRAM Bandwidth */
+ fixed20_12 dram_efficiency; /* 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ dram_efficiency.full = dfixed_const(7);
+ dram_efficiency.full = dfixed_div(dram_efficiency, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dram bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dram bandwidth for display in MBytes/s
+ */
+static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ /* Calculate DRAM Bandwidth and the part allocated to display. */
+ fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+ fixed20_12 yclk, dram_channels, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ yclk.full = dfixed_const(wm->yclk);
+ yclk.full = dfixed_div(yclk, a);
+ dram_channels.full = dfixed_const(wm->dram_channels * 4);
+ a.full = dfixed_const(10);
+ disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+ disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+ bandwidth.full = dfixed_mul(dram_channels, yclk);
+ bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_data_return_bandwidth - get the data return bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the data return bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the data return bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display Data return Bandwidth */
+ fixed20_12 return_efficiency; /* 0.8 */
+ fixed20_12 sclk, bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ sclk.full = dfixed_const(wm->sclk);
+ sclk.full = dfixed_div(sclk, a);
+ a.full = dfixed_const(10);
+ return_efficiency.full = dfixed_const(8);
+ return_efficiency.full = dfixed_div(return_efficiency, a);
+ a.full = dfixed_const(32);
+ bandwidth.full = dfixed_mul(a, sclk);
+ bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the dmif bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the dmif bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the DMIF Request Bandwidth */
+ fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+ fixed20_12 disp_clk, bandwidth;
+ fixed20_12 a, b;
+
+ a.full = dfixed_const(1000);
+ disp_clk.full = dfixed_const(wm->disp_clk);
+ disp_clk.full = dfixed_div(disp_clk, a);
+ a.full = dfixed_const(32);
+ b.full = dfixed_mul(a, disp_clk);
+
+ a.full = dfixed_const(10);
+ disp_clk_request_efficiency.full = dfixed_const(8);
+ disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+ bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_available_bandwidth - get the min available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the min available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the min available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+ u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
+ u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
+ u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
+
+ return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+/**
+ * dce_v6_0_average_bandwidth - get the average available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the average available bandwidth used for display (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the average available bandwidth in MBytes/s
+ */
+static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
+{
+ /* Calculate the display mode Average Bandwidth
+ * DisplayMode should contain the source and destination dimensions,
+ * timing, etc.
+ */
+ fixed20_12 bpp;
+ fixed20_12 line_time;
+ fixed20_12 src_width;
+ fixed20_12 bandwidth;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1000);
+ line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+ line_time.full = dfixed_div(line_time, a);
+ bpp.full = dfixed_const(wm->bytes_per_pixel);
+ src_width.full = dfixed_const(wm->src_width);
+ bandwidth.full = dfixed_mul(src_width, bpp);
+ bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+ bandwidth.full = dfixed_div(bandwidth, line_time);
+
+ return dfixed_trunc(bandwidth);
+}
+
+/**
+ * dce_v6_0_latency_watermark - get the latency watermark
+ *
+ * @wm: watermark calculation data
+ *
+ * Calculate the latency watermark (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns the latency watermark in ns
+ */
+static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
+{
+ /* First calculate the latency in ns */
+ u32 mc_latency = 2000; /* 2000 ns. */
+ u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
+ u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+ u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+ u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+ u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+ (wm->num_heads * cursor_line_pair_return_time);
+ u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+ u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+ u32 tmp, dmif_size = 12288;
+ fixed20_12 a, b, c;
+
+ if (wm->num_heads == 0)
+ return 0;
+
+ a.full = dfixed_const(2);
+ b.full = dfixed_const(1);
+ if ((wm->vsc.full > a.full) ||
+ ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+ (wm->vtaps >= 5) ||
+ ((wm->vsc.full >= a.full) && wm->interlaced))
+ max_src_lines_per_dst_line = 4;
+ else
+ max_src_lines_per_dst_line = 2;
+
+ a.full = dfixed_const(available_bandwidth);
+ b.full = dfixed_const(wm->num_heads);
+ a.full = dfixed_div(a, b);
+
+ b.full = dfixed_const(mc_latency + 512);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(b, c);
+
+ c.full = dfixed_const(dmif_size);
+ b.full = dfixed_div(c, b);
+
+ tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(wm->disp_clk);
+ b.full = dfixed_div(c, b);
+ c.full = dfixed_const(wm->bytes_per_pixel);
+ b.full = dfixed_mul(b, c);
+
+ lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+ a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+ b.full = dfixed_const(1000);
+ c.full = dfixed_const(lb_fill_bw);
+ b.full = dfixed_div(c, b);
+ a.full = dfixed_div(a, b);
+ line_fill_time = dfixed_trunc(a);
+
+ if (line_fill_time < wm->active_time)
+ return latency;
+ else
+ return latency + (line_fill_time - wm->active_time);
+
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
+ * average and available dram bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * dram bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+ if (dce_v6_0_average_bandwidth(wm) <=
+ (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
+ * average and available bandwidth
+ *
+ * @wm: watermark calculation data
+ *
+ * Check if the display average bandwidth fits in the display
+ * available bandwidth (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+ if (dce_v6_0_average_bandwidth(wm) <=
+ (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_check_latency_hiding - check latency hiding
+ *
+ * @wm: watermark calculation data
+ *
+ * Check latency hiding (CIK).
+ * Used for display watermark bandwidth calculations
+ * Returns true if the display fits, false if not.
+ */
+static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
+{
+ u32 lb_partitions = wm->lb_size / wm->src_width;
+ u32 line_time = wm->active_time + wm->blank_time;
+ u32 latency_tolerant_lines;
+ u32 latency_hiding;
+ fixed20_12 a;
+
+ a.full = dfixed_const(1);
+ if (wm->vsc.full > a.full)
+ latency_tolerant_lines = 1;
+ else {
+ if (lb_partitions <= (wm->vtaps + 1))
+ latency_tolerant_lines = 1;
+ else
+ latency_tolerant_lines = 2;
+ }
+
+ latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+ if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dce_v6_0_program_watermarks - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @lb_size: line buffer size
+ * @num_heads: number of display controllers in use
+ *
+ * Calculate and program the display watermarks for the
+ * selected display controller (CIK).
+ */
+static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ u32 lb_size, u32 num_heads)
+{
+ struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
+ struct dce6_wm_params wm_low, wm_high;
+ u32 dram_channels;
+ u32 pixel_period;
+ u32 line_time = 0;
+ u32 latency_watermark_a = 0, latency_watermark_b = 0;
+ u32 priority_a_mark = 0, priority_b_mark = 0;
+ u32 priority_a_cnt = PRIORITY_OFF;
+ u32 priority_b_cnt = PRIORITY_OFF;
+ u32 tmp, arb_control3;
+ fixed20_12 a, b, c;
+
+ if (amdgpu_crtc->base.enabled && num_heads && mode) {
+ pixel_period = 1000000 / (u32)mode->clock;
+ line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ priority_a_cnt = 0;
+ priority_b_cnt = 0;
+
+ dram_channels = si_get_number_of_dram_channels(adev);
+
+ /* watermark for high clocks */
+ if (adev->pm.dpm_enabled) {
+ wm_high.yclk =
+ amdgpu_dpm_get_mclk(adev, false) * 10;
+ wm_high.sclk =
+ amdgpu_dpm_get_sclk(adev, false) * 10;
+ } else {
+ wm_high.yclk = adev->pm.current_mclk * 10;
+ wm_high.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_high.interlaced = true;
+ wm_high.vsc = amdgpu_crtc->vsc;
+ wm_high.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = dram_channels;
+ wm_high.num_heads = num_heads;
+
+ if (adev->pm.dpm_enabled) {
+ /* watermark for low clocks */
+ wm_low.yclk =
+ amdgpu_dpm_get_mclk(adev, true) * 10;
+ wm_low.sclk =
+ amdgpu_dpm_get_sclk(adev, true) * 10;
+ } else {
+ wm_low.yclk = adev->pm.current_mclk * 10;
+ wm_low.sclk = adev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = amdgpu_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (amdgpu_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = dram_channels;
+ wm_low.num_heads = num_heads;
+
+ /* set for high clocks */
+ latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+ /* set for low clocks */
+ latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce_v6_0_check_latency_hiding(&wm_high) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+ if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce_v6_0_check_latency_hiding(&wm_low) ||
+ (adev->mode_info.disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ priority_a_cnt |= PRIORITY_ALWAYS_ON;
+ priority_b_cnt |= PRIORITY_ALWAYS_ON;
+ }
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_a);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_a_mark = dfixed_trunc(c);
+ priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+ a.full = dfixed_const(1000);
+ b.full = dfixed_const(mode->clock);
+ b.full = dfixed_div(b, a);
+ c.full = dfixed_const(latency_watermark_b);
+ c.full = dfixed_mul(c, b);
+ c.full = dfixed_mul(c, amdgpu_crtc->hsc);
+ c.full = dfixed_div(c, a);
+ a.full = dfixed_const(16);
+ c.full = dfixed_div(c, a);
+ priority_b_mark = dfixed_trunc(c);
+ priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+ }
+
+ /* select wm A */
+ arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp = arb_control3;
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(1);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* select wm B */
+ tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+ tmp &= ~LATENCY_WATERMARK_MASK(3);
+ tmp |= LATENCY_WATERMARK_MASK(2);
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
+ (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+ LATENCY_HIGH_WATERMARK(line_time)));
+ /* restore original selection */
+ WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+
+ /* write the priority marks */
+ WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+ WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+
+ /* save values for DPM */
+ amdgpu_crtc->line_time = line_time;
+ amdgpu_crtc->wm_high = latency_watermark_a;
+}
+
+/* watermark setup */
+static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
+ struct amdgpu_crtc *amdgpu_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+{
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+ * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+ * the display controllers. The paritioning is done via one of four
+ * preset allocations specified in bits 21:20:
+ * 0 - half lb
+ * 2 - whole lb, other crtc must be disabled
+ */
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (amdgpu_crtc->base.enabled && mode) {
+ if (other_mode) {
+ tmp = 0; /* 1/2 */
+ buffer_alloc = 1;
+ } else {
+ tmp = 2; /* whole */
+ buffer_alloc = 2;
+ }
+ } else {
+ tmp = 0;
+ buffer_alloc = 0;
+ }
+
+ WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+ DC_LB_MEMORY_CONFIG(tmp));
+
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
+ if (amdgpu_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ default:
+ return 4096 * 2;
+ case 2:
+ return 8192 * 2;
+ }
+ }
+
+ /* controller not enabled, so no lb used */
+ return 0;
+}
+
+
+/**
+ *
+ * dce_v6_0_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
+{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 num_heads = 0, lb_size;
+ int i;
+
+ if (!adev->mode_info.mode_config_initialized)
+ return;
+
+ amdgpu_update_display_priority(adev);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i]->base.enabled)
+ num_heads++;
+ }
+ for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
+ mode0 = &adev->mode_info.crtcs[i]->base.mode;
+ mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
+ lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
+ dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
+ lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
+ dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
+ }
+}
+/*
+static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ offset = adev->mode_info.audio.pin[i].offset;
+ tmp = RREG32_AUDIO_ENDPT(offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ adev->mode_info.audio.pin[i].connected = false;
+ else
+ adev->mode_info.audio.pin[i].connected = true;
+ }
+
+}
+
+static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
+{
+ int i;
+
+ dce_v6_0_audio_get_connected_pins(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ if (adev->mode_info.audio.pin[i].connected)
+ return &adev->mode_info.audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+{
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
+
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->offset;
+
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+ AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+
+}
+
+static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+
+}
+*/
+static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
+ struct amdgpu_audio_pin *pin,
+ bool enable)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x1780 - 0x1780),
+ (0x1786 - 0x1780),
+ (0x178c - 0x1780),
+ (0x1792 - 0x1780),
+ (0x1798 - 0x1780),
+ (0x179d - 0x1780),
+ (0x17a4 - 0x1780),
+};
+
+static int dce_v6_0_audio_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
+{
+
+}
+
+/*
+static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+}
+*/
+/*
+ * build a HDMI Video Info Frame
+ */
+/*
+static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
+ void *buffer, size_t size)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+}
+
+static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+ DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+}
+*/
+/*
+ * update the info frames with the data from the current display mode
+ */
+static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+}
+
+static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (enable && dig->afmt->enabled)
+ return;
+ if (!enable && !dig->afmt->enabled)
+ return;
+
+ if (!enable && dig->afmt->pin) {
+ dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+ dig->afmt->pin = NULL;
+ }
+
+ dig->afmt->enabled = enable;
+
+ DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
+ enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
+}
+
+static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++)
+ adev->mode_info.afmt[i] = NULL;
+
+ /* DCE6 has audio blocks tied to DIG encoders */
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
+ if (adev->mode_info.afmt[i]) {
+ adev->mode_info.afmt[i]->offset = dig_offsets[i];
+ adev->mode_info.afmt[i]->id = i;
+ } else {
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ DRM_ERROR("Out of memory allocating afmt table\n");
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->mode_info.num_dig; i++) {
+ kfree(adev->mode_info.afmt[i]);
+ adev->mode_info.afmt[i] = NULL;
+ }
+}
+
+static const u32 vga_control_regs[6] =
+{
+ AVIVO_D1VGA_CONTROL,
+ AVIVO_D2VGA_CONTROL,
+ EVERGREEN_D3VGA_CONTROL,
+ EVERGREEN_D4VGA_CONTROL,
+ EVERGREEN_D5VGA_CONTROL,
+ EVERGREEN_D6VGA_CONTROL,
+};
+
+static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 vga_control;
+
+ vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
+ WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
+}
+
+static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+}
+
+static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_framebuffer *target_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *abo;
+ uint64_t fb_location, tiling_flags;
+ uint32_t fb_format, fb_pitch_pixels, pipe_config;
+ u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ u32 viewport_w, viewport_h;
+ int r;
+ bool bypass_lut = false;
+
+ /* no fb bound */
+ if (!atomic && !crtc->primary->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+
+ if (atomic) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ target_fb = fb;
+ } else {
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ target_fb = crtc->primary->fb;
+ }
+
+ /* If atomic, assume fb object is pinned & idle & fenced and
+ * just update base pointers
+ */
+ obj = amdgpu_fb->obj;
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ if (atomic) {
+ fb_location = amdgpu_bo_gpu_offset(abo);
+ } else {
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(abo);
+ return -EINVAL;
+ }
+ }
+
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
+
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_BGRA5551:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_RGB565:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_BGRA1010102:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
+ return -EINVAL;
+ }
+
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+ unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+ fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+ fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+ fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ }
+
+ pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+ fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+
+ dce_v6_0_vga_enable(crtc, false);
+
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+ WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+
+ /*
+ * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+ * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+ * retain the full precision throughout the pipeline.
+ */
+ WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
+ (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+ ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+
+ dce_v6_0_grph_enable(crtc, true);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+ (x << 16) | y);
+ viewport_w = crtc->mode.hdisplay;
+ viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+
+ WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+ (viewport_w << 16) | viewport_h);
+
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
+
+ if (!atomic && fb && fb != crtc->primary->fb) {
+ amdgpu_fb = to_amdgpu_framebuffer(fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r != 0))
+ return r;
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+
+ /* Bytes per pixel may have changed */
+ dce_v6_0_bandwidth_update(adev);
+
+ return 0;
+
+}
+
+static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+}
+
+static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
+
+ WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+ NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+ WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+ NI_GRPH_PRESCALE_BYPASS);
+ WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+ NI_OVL_PRESCALE_BYPASS);
+ WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+ NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+
+
+ WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+ (amdgpu_crtc->lut_r[i] << 20) |
+ (amdgpu_crtc->lut_g[i] << 10) |
+ (amdgpu_crtc->lut_b[i] << 0));
+ }
+
+ WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+ NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+ WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+ NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+ WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+ NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+ WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+ (NI_OUTPUT_CSC_GRPH_MODE(0) |
+ NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+ /* XXX match this to the depth of the crtc fmt block, move to modeset? */
+ WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
+
+
+}
+
+static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ return dig->linkb ? 1 : 0;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ return dig->linkb ? 3 : 2;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return dig->linkb ? 5 : 4;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ return 6;
+ default:
+ DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+ return 0;
+ }
+}
+
+/**
+ * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+ * monitors a dedicated PPLL must be used. If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself. If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ *
+ */
+static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ u32 pll_in_use;
+ int pll;
+
+ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
+ if (adev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else
+ return ATOM_PPLL0;
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+
+ /* PPLL1, and PPLL2 */
+ pll_in_use = amdgpu_pll_get_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+}
+
+static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint32_t cur_lock;
+
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+}
+
+static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+
+}
+
+static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+ upper_32_bits(amdgpu_crtc->cursor_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+ lower_32_bits(amdgpu_crtc->cursor_addr));
+
+ WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
+ EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+
+}
+
+static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+
+ int w = amdgpu_crtc->cursor_width;
+
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+
+ WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+ WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
+
+ amdgpu_crtc->cursor_x = x;
+ amdgpu_crtc->cursor_y = y;
+ return 0;
+}
+
+static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ int ret;
+
+ dce_v6_0_lock_cursor(crtc, true);
+ ret = dce_v6_0_cursor_move_locked(crtc, x, y);
+ dce_v6_0_lock_cursor(crtc, false);
+
+ return ret;
+}
+
+static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height,
+ int32_t hot_x,
+ int32_t hot_y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *aobj;
+ int ret;
+
+ if (!handle) {
+ /* turn off cursor */
+ dce_v6_0_hide_cursor(crtc);
+ obj = NULL;
+ goto unpin;
+ }
+
+ if ((width > amdgpu_crtc->max_cursor_width) ||
+ (height > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ aobj = gem_to_amdgpu_bo(obj);
+ ret = amdgpu_bo_reserve(aobj, false);
+ if (ret != 0) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ amdgpu_bo_unreserve(aobj);
+ if (ret) {
+ DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+
+ dce_v6_0_lock_cursor(crtc, true);
+
+ if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ hot_y != amdgpu_crtc->cursor_hot_y) {
+ int x, y;
+
+ x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+ y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+ dce_v6_0_cursor_move_locked(crtc, x, y);
+
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
+ }
+
+ dce_v6_0_show_cursor(crtc);
+ dce_v6_0_lock_cursor(crtc, false);
+
+unpin:
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ ret = amdgpu_bo_reserve(aobj, false);
+ if (likely(ret == 0)) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ }
+
+ amdgpu_crtc->cursor_bo = obj;
+ return 0;
+}
+
+static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ dce_v6_0_lock_cursor(crtc, true);
+
+ dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+ amdgpu_crtc->cursor_y);
+
+ dce_v6_0_show_cursor(crtc);
+ dce_v6_0_lock_cursor(crtc, false);
+ }
+}
+
+static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int i;
+
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < size; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+ dce_v6_0_crtc_load_lut(crtc);
+
+ return 0;
+}
+
+static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
+ .cursor_set2 = dce_v6_0_crtc_cursor_set2,
+ .cursor_move = dce_v6_0_crtc_cursor_move,
+ .gamma_set = dce_v6_0_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_v6_0_crtc_destroy,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
+ amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
+ /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
+ amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+ drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ dce_v6_0_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ if (amdgpu_crtc->enabled)
+ amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
+ amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+ /* adjust pm to dpms */
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
+{
+ /* disable crtc pair power gating before programming */
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
+}
+
+static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_atom_ss ss;
+ int i;
+
+ dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *abo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve abo before unpin\n");
+ else {
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+ }
+ /* disable the GRPH */
+ dce_v6_0_grph_enable(crtc, false);
+
+ amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->mode_info.crtcs[i] &&
+ adev->mode_info.crtcs[i]->enabled &&
+ i != amdgpu_crtc->crtc_id &&
+ amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
+ switch (amdgpu_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ default:
+ break;
+ }
+done:
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (!amdgpu_crtc->adjusted_clock)
+ return -EINVAL;
+
+ amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+ amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
+ dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
+ amdgpu_atombios_crtc_scaler_setup(crtc);
+ dce_v6_0_cursor_reset(crtc);
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+ if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ return false;
+ if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
+ return false;
+ /* pick pll */
+ amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
+ /* if we can't get a PPLL for a non-DP encoder, fail */
+ if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
+ !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+ return false;
+
+ return true;
+}
+
+static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
+ .dpms = dce_v6_0_crtc_dpms,
+ .mode_fixup = dce_v6_0_crtc_mode_fixup,
+ .mode_set = dce_v6_0_crtc_mode_set,
+ .mode_set_base = dce_v6_0_crtc_set_base,
+ .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
+ .prepare = dce_v6_0_crtc_prepare,
+ .commit = dce_v6_0_crtc_commit,
+ .load_lut = dce_v6_0_crtc_load_lut,
+ .disable = dce_v6_0_crtc_disable,
+};
+
+static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
+ amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
+ adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->adjusted_clock = 0;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
+ adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
+
+ dce_v6_0_set_display_funcs(adev);
+ dce_v6_0_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_sw_init(void *handle)
+{
+ int r, i;
+ bool ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+ r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+ if (r)
+ return r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev->ddev->mode_config.async_page_flip = true;
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_v6_0_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
+ if (ret)
+ amdgpu_print_display_setup(adev->ddev);
+ else
+ return -EINVAL;
+
+ /* setup afmt */
+ r = dce_v6_0_afmt_init(adev);
+ if (r)
+ return r;
+
+ r = dce_v6_0_audio_init(adev);
+ if (r)
+ return r;
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ return r;
+}
+
+static int dce_v6_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ dce_v6_0_audio_fini(adev);
+ dce_v6_0_afmt_fini(adev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+
+ return 0;
+}
+
+static int dce_v6_0_hw_init(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* init dig PHYs, disp eng pll */
+ amdgpu_atombios_encoder_init_dig(adev);
+ amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+
+ /* initialize hpd */
+ dce_v6_0_hpd_init(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ dce_v6_0_pageflip_interrupt_init(adev);
+
+ return 0;
+}
+
+static int dce_v6_0_hw_fini(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ dce_v6_0_hpd_fini(adev);
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
+ dce_v6_0_pageflip_interrupt_fini(adev);
+
+ return 0;
+}
+
+static int dce_v6_0_suspend(void *handle)
+{
+ return dce_v6_0_hw_fini(handle);
+}
+
+static int dce_v6_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret;
+
+ ret = dce_v6_0_hw_init(handle);
+
+ /* turn on the BL */
+ if (adev->mode_info.bl_encoder) {
+ u8 bl_level = amdgpu_display_backlight_get_level(adev,
+ adev->mode_info.bl_encoder);
+ amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
+ bl_level);
+ }
+
+ return ret;
+}
+
+static bool dce_v6_0_is_idle(void *handle)
+{
+ return true;
+}
+
+static int dce_v6_0_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int dce_v6_0_soft_reset(void *handle)
+{
+ DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ return 0;
+}
+
+static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg_block, interrupt_mask;
+
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (crtc) {
+ case 0:
+ reg_block = SI_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ reg_block = SI_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ reg_block = SI_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ reg_block = SI_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ reg_block = SI_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ reg_block = SI_CRTC5_REGISTER_OFFSET;
+ break;
+ default:
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask &= ~VBLANK_INT_MASK;
+ WREG32(INT_MASK + reg_block, interrupt_mask);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ interrupt_mask = RREG32(INT_MASK + reg_block);
+ interrupt_mask |= VBLANK_INT_MASK;
+ WREG32(INT_MASK + reg_block, interrupt_mask);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+
+}
+
+static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (type) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL;
+ break;
+ default:
+ DRM_DEBUG("invalid hdp %d\n", type);
+ return 0;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK2:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK3:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK4:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK5:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VBLANK6:
+ dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE1:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE2:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE3:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE4:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE5:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
+ break;
+ case AMDGPU_CRTC_IRQ_VLINE6:
+ dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = entry->src_id - 1;
+ uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
+ unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+
+ switch (entry->src_data) {
+ case 0: /* vblank */
+ if (disp_int & interrupt_status_offsets[crtc].vblank)
+ WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ break;
+ case 1: /* vline */
+ if (disp_int & interrupt_status_offsets[crtc].vline)
+ WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ else
+ DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
+
+ DRM_DEBUG("IH: D%d vline\n", crtc + 1);
+ break;
+ default:
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 reg;
+
+ if (type >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+
+ reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+ if (state == AMDGPU_IRQ_STATE_DISABLE)
+ WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+ else
+ WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+ reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+
+ return 0;
+}
+
+static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned long flags;
+ unsigned crtc_id;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = (entry->src_id - 8) >> 1;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ if (crtc_id >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+ GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+ WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+ GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ schedule_work(&works->unpin_work);
+
+ return 0;
+}
+
+static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t disp_int, mask, int_control, tmp;
+ unsigned hpd;
+
+ if (entry->src_data >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
+ return 0;
+ }
+
+ hpd = entry->src_data;
+ disp_int = RREG32(interrupt_status_offsets[hpd].reg);
+ mask = interrupt_status_offsets[hpd].hpd;
+ int_control = hpd_int_control_offsets[hpd];
+
+ if (disp_int & mask) {
+ tmp = RREG32(int_control);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(int_control, tmp);
+ schedule_work(&adev->hotplug_work);
+ DRM_INFO("IH: HPD%d\n", hpd + 1);
+ }
+
+ return 0;
+
+}
+
+static int dce_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs dce_v6_0_ip_funcs = {
+ .name = "dce_v6_0",
+ .early_init = dce_v6_0_early_init,
+ .late_init = NULL,
+ .sw_init = dce_v6_0_sw_init,
+ .sw_fini = dce_v6_0_sw_fini,
+ .hw_init = dce_v6_0_hw_init,
+ .hw_fini = dce_v6_0_hw_fini,
+ .suspend = dce_v6_0_suspend,
+ .resume = dce_v6_0_resume,
+ .is_idle = dce_v6_0_is_idle,
+ .wait_for_idle = dce_v6_0_wait_for_idle,
+ .soft_reset = dce_v6_0_soft_reset,
+ .set_clockgating_state = dce_v6_0_set_clockgating_state,
+ .set_powergating_state = dce_v6_0_set_powergating_state,
+};
+
+static void
+dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->pixel_clock = adjusted_mode->clock;
+
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* set scaler clears this on some chips */
+ dce_v6_0_set_interleave(encoder->crtc, mode);
+
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ dce_v6_0_afmt_enable(encoder, true);
+ dce_v6_0_afmt_setmode(encoder, adjusted_mode);
+ }
+}
+
+static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
+{
+
+ struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+
+ if ((amdgpu_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
+ ENCODER_OBJECT_ID_NONE)) {
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ if (dig) {
+ dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
+ if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
+ dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
+ }
+ }
+
+ amdgpu_atombios_scratch_regs_lock(adev, true);
+
+ if (connector) {
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
+ /* select the clock/data port if it uses a router */
+ if (amdgpu_connector->router.cd_valid)
+ amdgpu_i2c_router_select_cd_port(amdgpu_connector);
+
+ /* turn eDP panel on for mode set */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_atombios_encoder_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ amdgpu_atombios_encoder_set_crtc_source(encoder);
+ /* set up the FMT blocks */
+ dce_v6_0_program_fmt(encoder);
+}
+
+static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ /* need to call this here as we need the crtc set up */
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ amdgpu_atombios_scratch_regs_lock(adev, false);
+}
+
+static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
+{
+
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig;
+
+ amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (amdgpu_atombios_encoder_is_digital(encoder)) {
+ if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ dce_v6_0_afmt_enable(encoder, false);
+ dig = amdgpu_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ amdgpu_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
+ .dpms = dce_v6_0_ext_dpms,
+ .mode_fixup = dce_v6_0_ext_mode_fixup,
+ .prepare = dce_v6_0_ext_prepare,
+ .mode_set = dce_v6_0_ext_mode_set,
+ .commit = dce_v6_0_ext_commit,
+ .disable = dce_v6_0_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v6_0_encoder_prepare,
+ .mode_set = dce_v6_0_encoder_mode_set,
+ .commit = dce_v6_0_encoder_commit,
+ .disable = dce_v6_0_encoder_disable,
+ .detect = amdgpu_atombios_encoder_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
+ .dpms = amdgpu_atombios_encoder_dpms,
+ .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
+ .prepare = dce_v6_0_encoder_prepare,
+ .mode_set = dce_v6_0_encoder_mode_set,
+ .commit = dce_v6_0_encoder_commit,
+ .detect = amdgpu_atombios_encoder_dac_detect,
+};
+
+static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
+ .destroy = dce_v6_0_encoder_destroy,
+};
+
+static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ encoder->possible_crtcs = 0x1;
+ break;
+ case 2:
+ default:
+ encoder->possible_crtcs = 0x3;
+ break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
+
+ amdgpu_encoder->enc_priv = NULL;
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ amdgpu_encoder->rmx_type = RMX_FULL;
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
+ } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ } else {
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
+ }
+ drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
+ break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ case ENCODER_OBJECT_ID_TRAVIS:
+ case ENCODER_OBJECT_ID_NUTMEG:
+ /* these are handled by the primary encoders */
+ amdgpu_encoder->is_ext_encoder = true;
+ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ else
+ drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
+ break;
+ }
+}
+
+static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
+ .set_vga_render_state = &dce_v6_0_set_vga_render_state,
+ .bandwidth_update = &dce_v6_0_bandwidth_update,
+ .vblank_get_counter = &dce_v6_0_vblank_get_counter,
+ .vblank_wait = &dce_v6_0_vblank_wait,
+ .is_display_hung = &dce_v6_0_is_display_hung,
+ .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
+ .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
+ .hpd_sense = &dce_v6_0_hpd_sense,
+ .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
+ .page_flip = &dce_v6_0_page_flip,
+ .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
+ .add_encoder = &dce_v6_0_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_v6_0_stop_mc_access,
+ .resume_mc_access = &dce_v6_0_resume_mc_access,
+};
+
+static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_v6_0_display_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
+ .set = dce_v6_0_set_crtc_interrupt_state,
+ .process = dce_v6_0_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
+ .set = dce_v6_0_set_pageflip_interrupt_state,
+ .process = dce_v6_0_pageflip_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
+ .set = dce_v6_0_set_hpd_interrupt_state,
+ .process = dce_v6_0_hpd_irq,
+};
+
+static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
index c031ff99fe3e..6a5528105bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,22 +21,9 @@
*
*/
-#ifndef TONGA_SMUMGR_H
-#define TONGA_SMUMGR_H
+#ifndef __DCE_V6_0_H__
+#define __DCE_V6_0_H__
-#include "tonga_ppsmc.h"
-
-int tonga_smu_init(struct amdgpu_device *adev);
-int tonga_smu_fini(struct amdgpu_device *adev);
-int tonga_smu_start(struct amdgpu_device *adev);
-
-struct tonga_smu_private_data
-{
- uint8_t *header;
- uint32_t smu_buffer_addr_high;
- uint32_t smu_buffer_addr_low;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_v6_0_ip_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4fdfab1e9200..8c4d808db0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
@@ -395,15 +397,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
WREG32(mmDC_HPD1_CONTROL, tmp);
@@ -426,6 +419,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
default:
break;
}
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
@@ -604,6 +636,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
+static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
+{
+ int num_crtc = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ num_crtc = 6;
+ break;
+ case CHIP_KAVERI:
+ num_crtc = 4;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ num_crtc = 2;
+ break;
+ default:
+ num_crtc = 0;
+ }
+ return num_crtc;
+}
+
+void dce_v8_0_disable_dce(struct amdgpu_device *adev)
+{
+ /*Disable VGA render and enabled crtc, if has DCE engine*/
+ if (amdgpu_atombios_has_dce_engine_info(adev)) {
+ u32 tmp;
+ int crtc_enabled, i;
+
+ dce_v8_0_set_vga_render_state(adev, false);
+
+ /*Disable crtc*/
+ for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
+ CRTC_CONTROL, CRTC_MASTER_EN);
+ if (crtc_enabled) {
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+ }
+ }
+ }
+}
+
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -1501,13 +1579,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
- value = (sad->channels <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
- (sad->byte2 <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
- (sad->freq <<
- AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
- max_channels = sad->channels;
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+ max_channels = sad->channels;
}
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
@@ -1613,7 +1691,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
+ WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
@@ -1693,6 +1771,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
+
offset = dig->afmt->offset;
/* hdmi deep color mode general control packets setup, if bpc > 8 */
@@ -1817,7 +1896,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
- HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */
+ HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
(2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
@@ -1826,13 +1905,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
- /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
- /* enable audio after to setting up hw */
+ /* enable audio after setting up hw */
dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
}
@@ -1944,7 +2022,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
@@ -1952,6 +2030,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1971,23 +2050,23 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = amdgpu_fb->obj;
- rbo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(rbo);
+ fb_location = amdgpu_bo_gpu_offset(abo);
} else {
- r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ amdgpu_bo_unreserve(abo);
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
@@ -1999,7 +2078,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
- (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
@@ -2056,8 +2135,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -2137,17 +2217,17 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
@@ -2552,7 +2632,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.gamma_set = dce_v8_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
- .page_flip = amdgpu_crtc_page_flip,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2619,16 +2699,16 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
if (crtc->primary->fb) {
int r;
struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *rbo;
+ struct amdgpu_bo *abo;
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(rbo, false);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
+ DRM_ERROR("failed to reserve abo before unpin\n");
else {
- amdgpu_bo_unpin(rbo);
- amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
@@ -2653,7 +2733,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_PPLL0:
/* disable the ppll */
@@ -2803,21 +2883,20 @@ static int dce_v8_0_early_init(void *handle)
dce_v8_0_set_display_funcs(adev);
dce_v8_0_set_irq_funcs(adev);
+ adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
+
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
- adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_KAVERI:
- adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6; /* ? */
break;
@@ -2954,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_atombios_scratch_regs_save(adev);
-
return dce_v8_0_hw_fini(handle);
}
@@ -2968,8 +3043,6 @@ static int dce_v8_0_resume(void *handle)
ret = dce_v8_0_hw_init(handle);
- amdgpu_atombios_scratch_regs_restore(adev);
-
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
@@ -3236,7 +3309,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
drm_handle_vblank(adev->ddev, crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
@@ -3245,7 +3317,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
index 77016852b252..7d0770c3a49b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs dce_v8_0_ip_funcs;
+void dce_v8_0_disable_dce(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
new file mode 100644
index 000000000000..c2bd9f045532
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_i2c.h"
+#include "atom.h"
+#include "amdgpu_pll.h"
+#include "amdgpu_connectors.h"
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "dce_virtual.h"
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+
+/**
+ * dce_virtual_vblank_wait - vblank wait asic callback.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
+{
+ return;
+}
+
+static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ return 0;
+}
+
+static void dce_virtual_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base, bool async)
+{
+ return;
+}
+
+static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ *vbl = 0;
+ *position = 0;
+
+ return -EINVAL;
+}
+
+static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return true;
+}
+
+static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
+ enum amdgpu_hpd_id hpd)
+{
+ return;
+}
+
+static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
+{
+ return false;
+}
+
+static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dce_v8_0_disable_dce(adev);
+ break;
+#endif
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+ /* no DCE */
+ return;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ }
+
+ return;
+}
+static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ return;
+}
+
+static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ return;
+}
+
+/**
+ * dce_virtual_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line
+ * buffer allocation (CIK).
+ */
+static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
+{
+ return;
+}
+
+static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t size)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int i;
+
+ /* userspace palettes are always correct as is */
+ for (i = 0; i < size; i++) {
+ amdgpu_crtc->lut_r[i] = red[i] >> 6;
+ amdgpu_crtc->lut_g[i] = green[i] >> 6;
+ amdgpu_crtc->lut_b[i] = blue[i] >> 6;
+ }
+
+ return 0;
+}
+
+static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(amdgpu_crtc);
+}
+
+static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
+ .cursor_set2 = NULL,
+ .cursor_move = NULL,
+ .gamma_set = dce_virtual_crtc_gamma_set,
+ .set_config = amdgpu_crtc_set_config,
+ .destroy = dce_virtual_crtc_destroy,
+ .page_flip_target = amdgpu_crtc_page_flip_target,
+};
+
+static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ unsigned type;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ amdgpu_crtc->enabled = true;
+ /* Make sure VBLANK and PFLIP interrupts are still enabled */
+ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_update(adev, &adev->crtc_irq, type);
+ amdgpu_irq_update(adev, &adev->pageflip_irq, type);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ amdgpu_crtc->enabled = false;
+ break;
+ }
+}
+
+
+static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
+{
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_bo *abo;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ r = amdgpu_bo_reserve(abo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve abo before unpin\n");
+ else {
+ amdgpu_bo_unpin(abo);
+ amdgpu_bo_unreserve(abo);
+ }
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+}
+
+static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ /* update the hw version fpr dpm */
+ amdgpu_crtc->hw_mode = *adjusted_mode;
+
+ return 0;
+}
+
+static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ amdgpu_crtc->encoder = encoder;
+ amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
+ break;
+ }
+ }
+ if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+
+static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return 0;
+}
+
+static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
+{
+ return;
+}
+
+static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
+ .dpms = dce_virtual_crtc_dpms,
+ .mode_fixup = dce_virtual_crtc_mode_fixup,
+ .mode_set = dce_virtual_crtc_mode_set,
+ .mode_set_base = dce_virtual_crtc_set_base,
+ .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
+ .prepare = dce_virtual_crtc_prepare,
+ .commit = dce_virtual_crtc_commit,
+ .load_lut = dce_virtual_crtc_load_lut,
+ .disable = dce_virtual_crtc_disable,
+};
+
+static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ int i;
+
+ amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
+ (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+ if (amdgpu_crtc == NULL)
+ return -ENOMEM;
+
+ drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
+ amdgpu_crtc->crtc_id = index;
+ adev->mode_info.crtcs[index] = amdgpu_crtc;
+
+ for (i = 0; i < 256; i++) {
+ amdgpu_crtc->lut_r[i] = i << 2;
+ amdgpu_crtc->lut_g[i] = i << 2;
+ amdgpu_crtc->lut_b[i] = i << 2;
+ }
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+
+ return 0;
+}
+
+static int dce_virtual_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+ dce_virtual_set_display_funcs(adev);
+ dce_virtual_set_irq_funcs(adev);
+
+ adev->mode_info.num_crtc = 1;
+ adev->mode_info.num_hpd = 1;
+ adev->mode_info.num_dig = 1;
+ return 0;
+}
+
+static bool dce_virtual_get_connector_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_i2c_bus_rec ddc_bus;
+ struct amdgpu_router router;
+ struct amdgpu_hpd hpd;
+
+ /* look up gpio for ddc, hpd */
+ ddc_bus.valid = false;
+ hpd.hpd = AMDGPU_HPD_NONE;
+ /* needed for aux chan transactions */
+ ddc_bus.hpd = hpd.hpd;
+
+ memset(&router, 0, sizeof(router));
+ router.ddc_valid = false;
+ router.cd_valid = false;
+ amdgpu_display_add_connector(adev,
+ 0,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus,
+ CONNECTOR_OBJECT_ID_VIRTUAL,
+ &hpd,
+ &router);
+
+ amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 0);
+
+ amdgpu_link_encoder_connector(adev->ddev);
+
+ return true;
+}
+
+static int dce_virtual_sw_init(void *handle)
+{
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
+ if (r)
+ return r;
+
+ adev->ddev->max_vblank_count = 0;
+
+ adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ /* allocate crtcs */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = dce_virtual_crtc_init(adev, i);
+ if (r)
+ return r;
+ }
+
+ dce_virtual_get_connector_info(adev);
+ amdgpu_print_display_setup(adev->ddev);
+
+ drm_kms_helper_poll_init(adev->ddev);
+
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
+}
+
+static int dce_virtual_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->mode_info.bios_hardcoded_edid);
+
+ drm_kms_helper_poll_fini(adev->ddev);
+
+ drm_mode_config_cleanup(adev->ddev);
+ adev->mode_info.mode_config_initialized = false;
+ return 0;
+}
+
+static int dce_virtual_hw_init(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_hw_fini(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_suspend(void *handle)
+{
+ return dce_virtual_hw_fini(handle);
+}
+
+static int dce_virtual_resume(void *handle)
+{
+ return dce_virtual_hw_init(handle);
+}
+
+static bool dce_virtual_is_idle(void *handle)
+{
+ return true;
+}
+
+static int dce_virtual_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int dce_virtual_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dce_virtual_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs dce_virtual_ip_funcs = {
+ .name = "dce_virtual",
+ .early_init = dce_virtual_early_init,
+ .late_init = NULL,
+ .sw_init = dce_virtual_sw_init,
+ .sw_fini = dce_virtual_sw_fini,
+ .hw_init = dce_virtual_hw_init,
+ .hw_fini = dce_virtual_hw_fini,
+ .suspend = dce_virtual_suspend,
+ .resume = dce_virtual_resume,
+ .is_idle = dce_virtual_is_idle,
+ .wait_for_idle = dce_virtual_wait_for_idle,
+ .soft_reset = dce_virtual_soft_reset,
+ .set_clockgating_state = dce_virtual_set_clockgating_state,
+ .set_powergating_state = dce_virtual_set_powergating_state,
+};
+
+/* these are handled by the primary encoders */
+static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return;
+}
+
+static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
+{
+ return;
+}
+
+static void
+dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ return;
+}
+
+static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ /* set the active encoder to connector routing */
+ amdgpu_encoder_set_active_device(encoder);
+
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
+ .dpms = dce_virtual_encoder_dpms,
+ .mode_fixup = dce_virtual_encoder_mode_fixup,
+ .prepare = dce_virtual_encoder_prepare,
+ .mode_set = dce_virtual_encoder_mode_set,
+ .commit = dce_virtual_encoder_commit,
+ .disable = dce_virtual_encoder_disable,
+};
+
+static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ kfree(amdgpu_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(amdgpu_encoder);
+}
+
+static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
+ .destroy = dce_virtual_encoder_destroy,
+};
+
+static void dce_virtual_encoder_add(struct amdgpu_device *adev,
+ uint32_t encoder_enum,
+ uint32_t supported_device,
+ u16 caps)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ /* see if we already added it */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+ if (amdgpu_encoder->encoder_enum == encoder_enum) {
+ amdgpu_encoder->devices |= supported_device;
+ return;
+ }
+
+ }
+
+ /* add a new one */
+ amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = 0x1;
+ amdgpu_encoder->enc_priv = NULL;
+ amdgpu_encoder->encoder_enum = encoder_enum;
+ amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ amdgpu_encoder->devices = supported_device;
+ amdgpu_encoder->rmx_type = RMX_OFF;
+ amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
+ amdgpu_encoder->is_ext_encoder = false;
+ amdgpu_encoder->caps = caps;
+
+ drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
+ DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id);
+}
+
+static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
+ .set_vga_render_state = &dce_virtual_set_vga_render_state,
+ .bandwidth_update = &dce_virtual_bandwidth_update,
+ .vblank_get_counter = &dce_virtual_vblank_get_counter,
+ .vblank_wait = &dce_virtual_vblank_wait,
+ .is_display_hung = &dce_virtual_is_display_hung,
+ .backlight_set_level = NULL,
+ .backlight_get_level = NULL,
+ .hpd_sense = &dce_virtual_hpd_sense,
+ .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
+ .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
+ .page_flip = &dce_virtual_page_flip,
+ .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
+ .add_encoder = &dce_virtual_encoder_add,
+ .add_connector = &amdgpu_connector_add,
+ .stop_mc_access = &dce_virtual_stop_mc_access,
+ .resume_mc_access = &dce_virtual_resume_mc_access,
+};
+
+static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dce_virtual_display_funcs;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
+{
+ struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer);
+ struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info);
+ unsigned crtc = 0;
+ drm_handle_vblank(adev->ddev, crtc);
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
+}
+
+static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
+ int crtc,
+ enum amdgpu_interrupt_state state)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+
+ if (state && !adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Enable software vsync timer\n");
+ hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+ adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle;
+ hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL);
+ } else if (!state && adev->mode_info.vsync_timer_enabled) {
+ DRM_DEBUG("Disable software vsync timer\n");
+ hrtimer_cancel(&adev->mode_info.vblank_timer);
+ }
+
+ adev->mode_info.vsync_timer_enabled = state;
+ DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
+}
+
+
+static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CRTC_IRQ_VBLANK1:
+ dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev,
+ int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc) {
+ DRM_DEBUG("invalid crtc %d\n", crtc);
+ return;
+ }
+}
+
+static int dce_virtual_crtc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned crtc = 0;
+ unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1;
+
+ dce_virtual_crtc_vblank_int_ack(adev, crtc);
+
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(adev->ddev, crtc);
+ }
+ dce_virtual_pageflip_irq(adev, NULL, NULL);
+ DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
+ return 0;
+}
+
+static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ if (type >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", type);
+ return -EINVAL;
+ }
+ DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state);
+
+ return 0;
+}
+
+static int dce_virtual_pageflip_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ unsigned long flags;
+ unsigned crtc_id = 0;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_flip_work *works;
+
+ crtc_id = 0;
+ amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+ if (crtc_id >= adev->mode_info.num_crtc) {
+ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+
+ /* IRQ could occur when in initial stage */
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return 0;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if (works->event)
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ schedule_work(&works->unpin_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
+ .set = dce_virtual_set_crtc_irq_state,
+ .process = dce_virtual_crtc_irq,
+};
+
+static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = {
+ .set = dce_virtual_set_pageflip_irq_state,
+ .process = dce_virtual_pageflip_irq,
+};
+
+static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
index 5983e3150cc5..e239243f6ebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
@@ -21,21 +21,11 @@
*
*/
-#ifndef ICELAND_SMUM_H
-#define ICELAND_SMUM_H
+#ifndef __DCE_VIRTUAL_H__
+#define __DCE_VIRTUAL_H__
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
- uint8_t *header;
- uint8_t *mec_image;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+extern const struct amd_ip_funcs dce_virtual_ip_funcs;
+#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75175d4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_smum.h"
-
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int fiji_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/fiji_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int fiji_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = fiji_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int fiji_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int fiji_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = fiji_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = fiji_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int fiji_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mutex_lock(&adev->pm.mutex);
- fiji_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int fiji_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_fini(adev);
-
- return 0;
-}
-
-static int fiji_dpm_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_init(adev);
-
- return 0;
-}
-
-static int fiji_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int fiji_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs fiji_dpm_ip_funcs = {
- .name = "fiji_dpm",
- .early_init = fiji_dpm_early_init,
- .late_init = NULL,
- .sw_init = fiji_dpm_sw_init,
- .sw_fini = fiji_dpm_sw_fini,
- .hw_init = fiji_dpm_hw_init,
- .hw_fini = fiji_dpm_hw_fini,
- .suspend = fiji_dpm_suspend,
- .resume = fiji_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = fiji_dpm_set_clockgating_state,
- .set_powergating_state = fiji_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &fiji_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba4c57f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_ppsmc.h"
-#include "fiji_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-#define FIJI_SMC_SIZE 0x20000
-
-static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!fiji_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc(adev, msg);
-}
-
-static int fiji_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > FIJI_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- fiji_send_msg_to_smc_offset(adev);
- DRM_INFO("[FM]try triger smu start\n");
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu started\n");
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu initialized\n");
-
- return 0;
-}
-
-static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- fiji_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int fiji_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- DRM_INFO("[FM]start smu in nonprotection mode\n");
- result = fiji_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- DRM_INFO("[FM]start smu in protection mode\n");
- result = fiji_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return fiji_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
- .check_fw_load_finish = fiji_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int fiji_smu_init(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
-
- return 0;
-}
-
-int fiji_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
new file mode 100644
index 000000000000..40abb6b81c09
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -0,0 +1,3362 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_ucode.h"
+#include "si/clearstate_si.h"
+#include "si/sid.h"
+
+#define GFX6_NUM_GFX_RINGS 1
+#define GFX6_NUM_COMPUTE_RINGS 2
+#define STATIC_PER_CU_PG_ENABLE (1 << 3)
+#define DYN_PER_CU_PG_ENABLE (1 << 2)
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
+
+
+static const u32 verde_rlc_save_restore_register_list[] =
+{
+ (0x8000 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0xe80 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0xe80 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x98f0 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xe7c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9148 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9148 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9150 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x897c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8d8c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac54 >> 2),
+ 0X00000000,
+ 0x3,
+ (0x9c00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9910 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9914 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9918 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x991c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9920 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9924 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9928 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x992c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9930 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9934 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9938 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x993c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9940 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9944 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9948 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x994c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9950 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9954 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9958 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x995c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9960 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9964 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9968 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x996c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9970 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9974 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9978 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x997c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9980 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9984 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9988 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x998c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c08 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0xe84 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0xe84 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x914c >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x914c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9354 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9354 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9060 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9364 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x913c >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e0 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e4 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e0 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e4 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e50 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8c0c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e58 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8e5c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9508 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x950c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9494 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x88cc >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x89b0 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8b10 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9830 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9838 >> 2),
+ 0x00000000,
+ (0x9c00 << 16) | (0x9a10 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8000 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8001 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8001 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8040 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ (0x8041 << 16) | (0x9870 >> 2),
+ 0x00000000,
+ (0x8041 << 16) | (0x9874 >> 2),
+ 0x00000000,
+ 0x00000000
+};
+
+static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct rlc_firmware_header_v1_0 *rlc_hdr;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+out:
+ if (err) {
+ printk(KERN_ERR
+ "gfx6: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ }
+ return err;
+}
+
+static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
+{
+ const u32 num_tile_mode_states = 32;
+ u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+ break;
+ case 2:
+ default:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+ break;
+ case 4:
+ split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+ break;
+ }
+
+ if (adev->asic_type == CHIP_VERDE ||
+ adev->asic_type == CHIP_OLAND ||
+ adev->asic_type == CHIP_HAINAN) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 1:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 2:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 3:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 4:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 7:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 8:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 11:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 13:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 15:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 16:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 17:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 21:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25:
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+ switch (reg_offset) {
+ case 0: /* non-AA compressed depth or any compressed stencil */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 1: /* 2xAA/4xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 2: /* 8xAA compressed depth only */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 8: /* 1D and 1D Array Surfaces */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 9: /* Displayable maps. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 10: /* Display 8bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 11: /* Display 16bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 12: /* Display 32bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 13: /* Thin. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 14: /* Thin 8 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 15: /* Thin 16 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 16: /* Thin 32 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 17: /* Thin 64 bpp. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ case 21: /* 8 bpp PRT. */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 22: /* 16 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ break;
+ case 23: /* 32 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 24: /* 64 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ break;
+ case 25: /* 128 bpp PRT */
+ gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ break;
+ default:
+ gb_tile_moden = 0;
+ break;
+ }
+ adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+ WREG32(GB_TILE_MODE0 + reg_offset, gb_tile_moden);
+ }
+ } else{
+
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ }
+
+}
+
+static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = INSTANCE_BROADCAST_WRITES;
+ else
+ data = INSTANCE_INDEX(instance);
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 gfx_v6_0_create_bitmask(u32 bit_width)
+{
+ return (u32)(((u64)1 << bit_width) - 1);
+}
+
+static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
+ u32 max_rb_num_per_se,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ data &= BACKEND_DISABLE_MASK;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se);
+
+ return data & mask;
+}
+
+static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
+{
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+ break;
+ case CHIP_VERDE:
+ *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+ break;
+ case CHIP_OLAND:
+ *rconf |= RB_YSEL;
+ break;
+ case CHIP_HAINAN:
+ *rconf |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, unsigned rb_mask,
+ unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
+static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num_per_se)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+ unsigned num_rb_pipes;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ adev->gfx.config.backend_enable_mask = enabled_rbs;
+ adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ gfx_v6_0_raster_config(adev, &data);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes)
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ else
+ gfx_v6_0_write_harvested_raster_configs(adev, data,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+/*
+static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev)
+{
+}
+*/
+
+static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ data &= INACTIVE_CUS_MASK;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = gfx_v6_0_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+
+static void gfx_v6_0_setup_spi(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask;
+ u32 active_cu = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
+{
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 sx_debug_1;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 12;
+ adev->gfx.config.max_cu_per_sh = 8;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 12;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_PITCAIRN:
+ adev->gfx.config.max_shader_engines = 2;
+ adev->gfx.config.max_tile_pipes = 8;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 8;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+ break;
+
+ case CHIP_VERDE:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 2;
+ adev->gfx.config.max_backends_per_se = 4;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_OLAND:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 6;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 2;
+ adev->gfx.config.max_texture_channel_caches = 4;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_HAINAN:
+ adev->gfx.config.max_shader_engines = 1;
+ adev->gfx.config.max_tile_pipes = 4;
+ adev->gfx.config.max_cu_per_sh = 5;
+ adev->gfx.config.max_sh_per_se = 1;
+ adev->gfx.config.max_backends_per_se = 1;
+ adev->gfx.config.max_texture_channel_caches = 2;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 16;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x40;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+ WREG32(SRBM_INT_CNTL, 1);
+ WREG32(SRBM_INT_ACK, 1);
+
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+ adev->gfx.config.mem_max_burst_length_bytes = 256;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (adev->gfx.config.mem_row_size_in_kb > 4)
+ adev->gfx.config.mem_row_size_in_kb = 4;
+ adev->gfx.config.shader_engine_tile_size = 32;
+ adev->gfx.config.num_gpus = 1;
+ adev->gfx.config.multi_gpu_tile_size = 64;
+
+ gb_addr_config &= ~ROW_SIZE_MASK;
+ switch (adev->gfx.config.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+ WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+#if 0
+ if (adev->has_uvd) {
+ WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+ WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+ }
+#endif
+ gfx_v6_0_tiling_mode_table_init(adev);
+
+ gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_backends_per_se);
+
+ gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh);
+
+ gfx_v6_0_get_cu_info(adev);
+
+ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+ ROQ_IB2_START(0x2b)));
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_frontend) |
+ SC_BACKEND_PRIM_FIFO_SIZE(adev->gfx.config.sc_prim_fifo_size_backend) |
+ SC_HIZ_TILE_FIFO_SIZE(adev->gfx.config.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(adev->gfx.config.sc_earlyz_tile_fifo_size)));
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+ WREG32(CP_PERFMON_CNTL, 0);
+ WREG32(SQ_CONFIG, 0);
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+ WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+
+static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.reg_base = SCRATCH_REG0;
+ for (i = 0; i < adev->gfx.scratch.num_reg; i++) {
+ adev->gfx.scratch.free[i] = true;
+ adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i;
+ }
+}
+
+static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ring->idx, scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* flush hdp cache */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, HDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x1);
+}
+
+static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ /* flush read cache over gart */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ amdgpu_ring_write(ring, 0xFFFFFFFF);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+}
+
+static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
+{
+ u32 header, control = 0;
+
+ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
+ if (ctx_switch) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+
+ if (ib->flags & AMDGPU_IB_FLAG_CE)
+ header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+ else
+ header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+ control |= ib->length_dw | (vm_id << 24);
+
+ amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+ amdgpu_ring_write(ring, control);
+}
+
+/**
+ * gfx_v6_0_ring_test_ib - basic ring IB test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Allocate an IB and execute it on the gfx ring (SI).
+ * Provides a basic gfx ring test to verify that IBs are working.
+ * Returns 0 on success, error on failure.
+ */
+static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct fence *f = NULL;
+ uint32_t scratch;
+ uint32_t tmp = 0;
+ long r;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err1;
+ }
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ scratch, tmp);
+ r = -EINVAL;
+ }
+
+err2:
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
+err1:
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+{
+ int i;
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].ready = false;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].ready = false;
+ }
+ udelay(50);
+}
+
+static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+{
+ unsigned i;
+ const struct gfx_firmware_header_v1_0 *pfp_hdr;
+ const struct gfx_firmware_header_v1_0 *ce_hdr;
+ const struct gfx_firmware_header_v1_0 *me_hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+ return -EINVAL;
+
+ gfx_v6_0_cp_gfx_enable(adev, false);
+ pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+
+ amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+ amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (adev->gfx.me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int gfx_v6_0_cp_gfx_start(struct amdgpu_device *adev)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
+ int r, i;
+
+ r = amdgpu_ring_alloc(ring, 7 + 4);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ amdgpu_ring_write(ring, 0x1);
+ amdgpu_ring_write(ring, 0x0);
+ amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
+ amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+ amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+ amdgpu_ring_write(ring, 0xc000);
+ amdgpu_ring_write(ring, 0xe000);
+ amdgpu_ring_commit(ring);
+
+ gfx_v6_0_cp_gfx_enable(adev, true);
+
+ r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ amdgpu_ring_write(ring,
+ PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ amdgpu_ring_write(ring, ext->extent[i]);
+ }
+ }
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ amdgpu_ring_write(ring, 0x00000316);
+ amdgpu_ring_write(ring, 0x0000000e);
+ amdgpu_ring_write(ring, 0x00000010);
+
+ amdgpu_ring_commit(ring);
+
+ return 0;
+}
+
+static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+ u64 rptr_addr;
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, 0);
+ WREG32(SCRATCH_ADDR, 0);
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ ring = &adev->gfx.gfx_ring[0];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
+
+ /* set the wb address whether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ WREG32(SCRATCH_UMSK, 0);
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+ /* start the rings */
+ gfx_v6_0_cp_gfx_start(adev);
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+static u32 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u32 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->gfx.gfx_ring[0])
+ return RREG32(CP_RB0_WPTR);
+ else if (ring == &adev->gfx.compute_ring[0])
+ return RREG32(CP_RB1_WPTR);
+ else if (ring == &adev->gfx.compute_ring[1])
+ return RREG32(CP_RB2_WPTR);
+ else
+ BUG();
+}
+
+static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ WREG32(CP_RB0_WPTR, ring->wptr);
+ (void)RREG32(CP_RB0_WPTR);
+}
+
+static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->gfx.compute_ring[0]) {
+ WREG32(CP_RB1_WPTR, ring->wptr);
+ (void)RREG32(CP_RB1_WPTR);
+ } else if (ring == &adev->gfx.compute_ring[1]) {
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ (void)RREG32(CP_RB2_WPTR);
+ } else {
+ BUG();
+ }
+
+}
+
+static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+ u64 rptr_addr;
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+
+ ring = &adev->gfx.compute_ring[0];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
+
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+ ring = &adev->gfx.compute_ring[1];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32(CP_RB2_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+ adev->gfx.compute_ring[0].ready = true;
+ adev->gfx.compute_ring[1].ready = true;
+
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[0]);
+ if (r) {
+ adev->gfx.compute_ring[0].ready = false;
+ return r;
+ }
+
+ r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[1]);
+ if (r) {
+ adev->gfx.compute_ring[1].ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+static void gfx_v6_0_cp_enable(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v6_0_cp_gfx_enable(adev, enable);
+}
+
+static int gfx_v6_0_cp_load_microcode(struct amdgpu_device *adev)
+{
+ return gfx_v6_0_cp_gfx_load_microcode(adev);
+}
+
+static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp = RREG32(CP_INT_CNTL_RING0);
+ u32 mask;
+ int i;
+
+ if (enable)
+ tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ else
+ tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ WREG32(CP_INT_CNTL_RING0, tmp);
+
+ if (!enable) {
+ /* read a gfx register */
+ tmp = RREG32(DB_DEPTH_INFO);
+
+ mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
+ break;
+ udelay(1);
+ }
+ }
+}
+
+static int gfx_v6_0_cp_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+ r = gfx_v6_0_cp_load_microcode(adev);
+ if (r)
+ return r;
+
+ r = gfx_v6_0_cp_gfx_resume(adev);
+ if (r)
+ return r;
+ r = gfx_v6_0_cp_compute_resume(adev);
+ if (r)
+ return r;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+
+ /* write new base address */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ if (vm_id < 8) {
+ amdgpu_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+ } else {
+ amdgpu_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ }
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* bits 0-15 are the VM contexts0-15 */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ WRITE_DATA_DST_SEL(0)));
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ /* wait for the invalidate to complete */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0); /* ref */
+ amdgpu_ring_write(ring, 0); /* mask */
+ amdgpu_ring_write(ring, 0x20); /* poll interval */
+
+ if (usepfp) {
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ amdgpu_ring_write(ring, 0x0);
+
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
+
+static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gfx.rlc.save_restore_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
+ adev->gfx.rlc.save_restore_obj = NULL;
+ }
+
+ if (adev->gfx.rlc.clear_state_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ adev->gfx.rlc.clear_state_obj = NULL;
+ }
+
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
+}
+
+static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 dws, i;
+ u64 reg_list_mc_addr;
+ const struct cs_section_def *cs_data;
+ int r;
+
+ adev->gfx.rlc.reg_list = verde_rlc_save_restore_register_list;
+ adev->gfx.rlc.reg_list_size =
+ (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+
+ adev->gfx.rlc.cs_data = si_cs_data;
+ src_ptr = adev->gfx.rlc.reg_list;
+ dws = adev->gfx.rlc.reg_list_size;
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (src_ptr) {
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj == NULL) {
+
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.save_restore_obj);
+
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+ }
+
+ if (cs_data) {
+ /* clear state block */
+ adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
+ dws = adev->gfx.rlc.clear_state_size + (256 / 4);
+
+ if (adev->gfx.rlc.clear_state_obj == NULL) {
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.clear_state_obj);
+
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ }
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ gfx_v6_0_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
+ dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+ dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+ dst_ptr[2] = cpu_to_le32(adev->gfx.rlc.clear_state_size);
+ gfx_v6_0_get_csb_buffer(adev, &dst_ptr[(256/4)]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ return 0;
+}
+
+static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_LB_CNTL);
+ if (enable)
+ tmp |= LOAD_BALANCE_ENABLE;
+ else
+ tmp &= ~LOAD_BALANCE_ENABLE;
+ WREG32(RLC_LB_CNTL, tmp);
+
+ if (!enable) {
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ WREG32(SPI_LB_CU_MASK, 0x00ff);
+ }
+
+}
+
+static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v6_0_update_rlc(struct amdgpu_device *adev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_CNTL);
+ if (tmp != rlc)
+ WREG32(RLC_CNTL, rlc);
+}
+
+static u32 gfx_v6_0_halt_rlc(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_CNTL);
+
+ if (data & RLC_ENABLE) {
+ data &= ~RLC_ENABLE;
+ WREG32(RLC_CNTL, data);
+
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+ }
+
+ return orig;
+}
+
+static void gfx_v6_0_rlc_stop(struct amdgpu_device *adev)
+{
+ WREG32(RLC_CNTL, 0);
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+}
+
+static void gfx_v6_0_rlc_start(struct amdgpu_device *adev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ udelay(50);
+}
+
+static void gfx_v6_0_rlc_reset(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(GRBM_SOFT_RESET);
+
+ tmp |= SOFT_RESET_RLC;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ udelay(50);
+ tmp &= ~SOFT_RESET_RLC;
+ WREG32(GRBM_SOFT_RESET, tmp);
+ udelay(50);
+}
+
+static bool gfx_v6_0_lbpw_supported(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ /* Enable LBPW only for DDR3 */
+ tmp = RREG32(MC_SEQ_MISC0);
+ if ((tmp & 0xF0000000) == 0xB0000000)
+ return true;
+ return false;
+}
+static void gfx_v6_0_init_cg(struct amdgpu_device *adev)
+{
+}
+
+static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
+{
+ u32 i;
+ const struct rlc_firmware_header_v1_0 *hdr;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+
+ if (!adev->gfx.rlc_fw)
+ return -EINVAL;
+
+ gfx_v6_0_rlc_stop(adev);
+ gfx_v6_0_rlc_reset(adev);
+ gfx_v6_0_init_pg(adev);
+ gfx_v6_0_init_cg(adev);
+
+ WREG32(RLC_RL_BASE, 0);
+ WREG32(RLC_RL_SIZE, 0);
+ WREG32(RLC_LB_CNTL, 0);
+ WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+ WREG32(RLC_LB_CNTR_INIT, 0);
+ WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ fw_data = (const __le32 *)
+ (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ amdgpu_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
+ gfx_v6_0_rlc_start(adev);
+
+ return 0;
+}
+
+static void gfx_v6_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig, tmp;
+
+ orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
+
+ gfx_v6_0_wait_for_rlc_serdes(adev);
+ gfx_v6_0_update_rlc(adev, tmp);
+
+ WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
+
+ data |= CGCG_EN | CGLS_EN;
+ } else {
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ data &= ~(CGCG_EN | CGLS_EN);
+ }
+
+ if (orig != data)
+ WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void gfx_v6_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+
+ u32 data, orig, tmp = 0;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data = 0x96940200;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data &= 0xffffffc0;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+
+ gfx_v6_0_update_rlc(adev, tmp);
+ } else {
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000003;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(CP_MEM_SLP_CNTL);
+ if (data & CP_MEM_LS_EN) {
+ data &= ~CP_MEM_LS_EN;
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data |= LS_OVERRIDE | OVERRIDE;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ tmp = gfx_v6_0_halt_rlc(adev);
+
+ WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
+ WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
+
+ gfx_v6_0_update_rlc(adev, tmp);
+ }
+}
+/*
+static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
+ bool enable)
+{
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ if (enable) {
+ gfx_v6_0_enable_mgcg(adev, true);
+ gfx_v6_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v6_0_enable_cgcg(adev, false);
+ gfx_v6_0_enable_mgcg(adev, false);
+ }
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+}
+*/
+static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
+ bool enable)
+{
+}
+
+static void gfx_v6_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
+ data &= ~0x8000;
+ else
+ data |= 0x8000;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
+{
+}
+/*
+static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_KAVERI)
+ max_me = 5;
+
+ if (adev->gfx.rlc.cp_table_ptr == NULL)
+ return;
+
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+*/
+static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+
+ u32 tmp;
+
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
+ tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
+ WREG32(RLC_TTOP_D, tmp);
+
+ tmp = RREG32(RLC_PG_CNTL);
+ tmp |= GFX_PG_ENABLE;
+ WREG32(RLC_PG_CNTL, tmp);
+
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+ tmp |= AUTO_PG_EN;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+ } else {
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+ tmp &= ~AUTO_PG_EN;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+
+ tmp = RREG32(DB_RENDER_CONTROL);
+ }
+}
+
+static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev,
+ u32 se, u32 sh)
+{
+
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff);
+ tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev)
+{
+ u32 i, j, k, active_cu_number = 0;
+
+ u32 mask, counter, cu_bitmap;
+ u32 tmp = 0;
+
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) {
+ if (counter < 2)
+ cu_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
+ }
+
+ active_cu_number += counter;
+ tmp |= (cu_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+ tmp = RREG32(RLC_MAX_PG_CU);
+ tmp &= ~MAX_PU_CU_MASK;
+ tmp |= MAX_PU_CU(active_cu_number);
+ WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
+ data |= STATIC_PER_CU_PG_ENABLE;
+ else
+ data &= ~STATIC_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
+ data |= DYN_PER_CU_PG_ENABLE;
+ else
+ data &= ~DYN_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+
+ tmp = RREG32(RLC_PG_CNTL);
+ tmp |= GFX_PG_SRC;
+ WREG32(RLC_PG_CNTL, tmp);
+
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+ tmp = RREG32(RLC_AUTO_PG_CTRL);
+
+ tmp &= ~GRBM_REG_SGIT_MASK;
+ tmp |= GRBM_REG_SGIT(0x700);
+ tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
+ WREG32(RLC_AUTO_PG_CTRL, tmp);
+}
+
+static void gfx_v6_0_update_gfx_pg(struct amdgpu_device *adev, bool enable)
+{
+ gfx_v6_0_enable_gfx_cgpg(adev, enable);
+ gfx_v6_0_enable_gfx_static_mgpg(adev, enable);
+ gfx_v6_0_enable_gfx_dynamic_mgpg(adev, enable);
+}
+
+static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config */
+ count += 3;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
+ volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ buffer[count++] = cpu_to_le32(0x2a00126a);
+ break;
+ case CHIP_VERDE:
+ buffer[count++] = cpu_to_le32(0x0000124a);
+ break;
+ case CHIP_OLAND:
+ buffer[count++] = cpu_to_le32(0x00000082);
+ break;
+ case CHIP_HAINAN:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ default:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v6_0_init_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_enable_sclk_slowdown_on_pu(adev, true);
+ gfx_v6_0_enable_sclk_slowdown_on_pd(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_init_gfx_cgpg(adev);
+ gfx_v6_0_enable_cp_pg(adev, true);
+ gfx_v6_0_enable_gds_pg(adev, true);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+
+ }
+ gfx_v6_0_init_ao_cu_mask(adev);
+ gfx_v6_0_update_gfx_pg(adev, true);
+ } else {
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, adev->gfx.rlc.clear_state_gpu_addr >> 8);
+ }
+}
+
+static void gfx_v6_0_fini_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_update_gfx_pg(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_enable_cp_pg(adev, false);
+ gfx_v6_0_enable_gds_pg(adev, false);
+ }
+ }
+}
+
+static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ uint64_t clock;
+
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ return clock;
+}
+
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, 0x80000000);
+ amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* gfx_v6_0_ring_emit_ib */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
+ 3; /* gfx_v6_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 5 + /* gfx_v6_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v6_0_ring_emit_vm_flush */
+ 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v6_0_select_se_sh,
+};
+
+static int gfx_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
+ adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+ gfx_v6_0_set_ring_funcs(adev);
+ gfx_v6_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, r;
+
+ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ gfx_v6_0_scratch_init(adev);
+
+ r = gfx_v6_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load gfx firmware!\n");
+ return r;
+ }
+
+ r = gfx_v6_0_rlc_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ ring->ring_obj = NULL;
+ sprintf(ring->name, "gfx");
+ r = amdgpu_ring_init(adev, ring, 1024,
+ 0x80000000, 0xf,
+ &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
+ AMDGPU_RING_TYPE_GFX);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ unsigned irq_type;
+
+ if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
+ DRM_ERROR("Too many (%d) compute rings!\n", i);
+ break;
+ }
+ ring = &adev->gfx.compute_ring[i];
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->doorbell_index = 0;
+ ring->me = 1;
+ ring->pipe = i;
+ ring->queue = i;
+ sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
+ r = amdgpu_ring_init(adev, ring, 1024,
+ 0x80000000, 0xf,
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_TYPE_COMPUTE);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int gfx_v6_0_sw_fini(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
+ amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ gfx_v6_0_rlc_fini(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v6_0_gpu_init(adev);
+
+ r = gfx_v6_0_rlc_resume(adev);
+ if (r)
+ return r;
+
+ r = gfx_v6_0_cp_resume(adev);
+ if (r)
+ return r;
+
+ adev->gfx.ce_ram_size = 0x8000;
+
+ return r;
+}
+
+static int gfx_v6_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v6_0_cp_enable(adev, false);
+ gfx_v6_0_rlc_stop(adev);
+ gfx_v6_0_fini_pg(adev);
+
+ return 0;
+}
+
+static int gfx_v6_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return gfx_v6_0_hw_fini(adev);
+}
+
+static int gfx_v6_0_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return gfx_v6_0_hw_init(adev);
+}
+
+static bool gfx_v6_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (RREG32(GRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK)
+ return false;
+ else
+ return true;
+}
+
+static int gfx_v6_0_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v6_0_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v6_0_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+ int ring,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+ switch (state){
+ case AMDGPU_IRQ_STATE_DISABLE:
+ if (ring == 0) {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ break;
+ } else {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+ cp_int_cntl &= ~CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ break;
+
+ }
+ case AMDGPU_IRQ_STATE_ENABLE:
+ if (ring == 0) {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING1);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING1, cp_int_cntl);
+ break;
+ } else {
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING2);
+ cp_int_cntl |= CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING2, cp_int_cntl);
+ break;
+
+ }
+
+ default:
+ BUG();
+ break;
+
+ }
+}
+
+static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cp_int_cntl;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cp_int_cntl = RREG32(CP_INT_CNTL_RING0);
+ cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK;
+ WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (type) {
+ case AMDGPU_CP_IRQ_GFX_EOP:
+ gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+ gfx_v6_0_set_compute_eop_interrupt_state(adev, 0, state);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+ gfx_v6_0_set_compute_eop_interrupt_state(adev, 1, state);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
+ break;
+ case 1:
+ case 2:
+ amdgpu_fence_process(&adev->gfx.compute_ring[entry->ring_id -1]);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal register access in command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int gfx_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ bool gate = false;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (state == AMD_CG_STATE_GATE)
+ gate = true;
+
+ gfx_v6_0_enable_gui_idle_interrupt(adev, false);
+ if (gate) {
+ gfx_v6_0_enable_mgcg(adev, true);
+ gfx_v6_0_enable_cgcg(adev, true);
+ } else {
+ gfx_v6_0_enable_cgcg(adev, false);
+ gfx_v6_0_enable_mgcg(adev, false);
+ }
+ gfx_v6_0_enable_gui_idle_interrupt(adev, true);
+
+ return 0;
+}
+
+static int gfx_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ bool gate = false;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (state == AMD_PG_STATE_GATE)
+ gate = true;
+
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v6_0_update_gfx_pg(adev, gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
+ gfx_v6_0_enable_cp_pg(adev, gate);
+ gfx_v6_0_enable_gds_pg(adev, gate);
+ }
+ }
+
+ return 0;
+}
+
+const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
+ .name = "gfx_v6_0",
+ .early_init = gfx_v6_0_early_init,
+ .late_init = NULL,
+ .sw_init = gfx_v6_0_sw_init,
+ .sw_fini = gfx_v6_0_sw_fini,
+ .hw_init = gfx_v6_0_hw_init,
+ .hw_fini = gfx_v6_0_hw_fini,
+ .suspend = gfx_v6_0_suspend,
+ .resume = gfx_v6_0_resume,
+ .is_idle = gfx_v6_0_is_idle,
+ .wait_for_idle = gfx_v6_0_wait_for_idle,
+ .soft_reset = gfx_v6_0_soft_reset,
+ .set_clockgating_state = gfx_v6_0_set_clockgating_state,
+ .set_powergating_state = gfx_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
+ .get_rptr = gfx_v6_0_ring_get_rptr,
+ .get_wptr = gfx_v6_0_ring_get_wptr,
+ .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v6_0_ring_emit_ib,
+ .emit_fence = gfx_v6_0_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+ .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = gfx_v6_0_ring_test_ring,
+ .test_ib = gfx_v6_0_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx,
+};
+
+static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+ .get_rptr = gfx_v6_0_ring_get_rptr,
+ .get_wptr = gfx_v6_0_ring_get_wptr,
+ .set_wptr = gfx_v6_0_ring_set_wptr_compute,
+ .parse_cs = NULL,
+ .emit_ib = gfx_v6_0_ring_emit_ib,
+ .emit_fence = gfx_v6_0_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
+ .emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = gfx_v6_0_ring_test_ring,
+ .test_ib = gfx_v6_0_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute,
+};
+
+static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ adev->gfx.gfx_ring[i].funcs = &gfx_v6_0_ring_funcs_gfx;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ adev->gfx.compute_ring[i].funcs = &gfx_v6_0_ring_funcs_compute;
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs = {
+ .set = gfx_v6_0_set_eop_interrupt_state,
+ .process = gfx_v6_0_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs = {
+ .set = gfx_v6_0_set_priv_reg_fault_state,
+ .process = gfx_v6_0_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs = {
+ .set = gfx_v6_0_set_priv_inst_fault_state,
+ .process = gfx_v6_0_priv_inst_irq,
+};
+
+static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+ adev->gfx.eop_irq.funcs = &gfx_v6_0_eop_irq_funcs;
+
+ adev->gfx.priv_reg_irq.num_types = 1;
+ adev->gfx.priv_reg_irq.funcs = &gfx_v6_0_priv_reg_irq_funcs;
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v6_0_priv_inst_irq_funcs;
+}
+
+static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
+{
+ int i, j, k, counter, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+
+ memset(cu_info, 0, sizeof(*cu_info));
+
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j);
+ cu_info->bitmap[i][j] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
+ if (bitmap & mask) {
+ if (counter < 2)
+ ao_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+ active_cu_number += counter;
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ cu_info->number = active_cu_number;
+ cu_info->ao_cu_mask = ao_cu_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
new file mode 100644
index 000000000000..b9657e72b248
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V6_0_H__
+#define __GFX_V6_0_H__
+
+extern const struct amd_ip_funcs gfx_v6_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 425413fcaf02..71116da9e782 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_HAWAII:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
+ PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
+ SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_KAVERI:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
/**
* gfx_v7_0_setup_rb - setup the RBs on the asic
*
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
@@ -2096,6 +2254,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, control);
}
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ uint32_t dw2 = 0;
+
+ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ /* set load_global_config & load_global_uconfig */
+ dw2 |= 0x8001;
+ /* set load_cs_sh_regs */
+ dw2 |= 0x01000000;
+ /* set load_per_context_state & load_gfx_sh_regs */
+ dw2 |= 0x10002;
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, dw2);
+ amdgpu_ring_write(ring, 0);
+}
+
/**
* gfx_v7_0_ring_test_ib - basic ring IB test
*
@@ -2443,7 +2620,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
return 0;
}
-static u32 gfx_v7_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->rptr_offs];
}
@@ -2463,11 +2640,6 @@ static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
(void)RREG32(mmCP_RB0_WPTR);
}
-static u32 gfx_v7_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
- return ring->adev->wb.wb[ring->rptr_offs];
-}
-
static u32 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
@@ -4182,6 +4354,41 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v7_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
+ 3; /* gfx_v7_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v7_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v7_0_ring_emit_gds_switch */
+ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v7_0_ring_emit_vm_flush */
+ 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v7_0_select_se_sh,
@@ -4471,24 +4678,21 @@ static int gfx_v7_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0,
- NULL, NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0,
- NULL, NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0,
- NULL, NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -4504,9 +4708,9 @@ static int gfx_v7_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+ amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4937,7 +5141,7 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
- .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
+ .get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
.parse_cs = NULL,
@@ -4952,10 +5156,13 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx,
+ .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
- .get_rptr = gfx_v7_0_ring_get_rptr_compute,
+ .get_rptr = gfx_v7_0_ring_get_rptr,
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
.parse_cs = NULL,
@@ -4970,6 +5177,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute,
+ .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8184617ca25..bb97182dc749 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
- mmATC_MISC_CG, 0xffffffff, 0x000c0200,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -703,7 +702,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
- if (adev->pdev->revision == 0xc7) {
+ if (adev->pdev->revision == 0xc7 &&
+ ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
+ (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
+ (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) {
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
}
@@ -1233,10 +1235,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
@@ -1248,7 +1249,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
adev->gfx.rlc.cp_table_obj = NULL;
}
@@ -1290,14 +1290,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
@@ -1332,7 +1332,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -1345,7 +1345,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
}
return 0;
@@ -1361,7 +1360,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
adev->gfx.mec.hpd_eop_obj = NULL;
}
@@ -2082,24 +2080,21 @@ static int gfx_v8_0_sw_init(void *handle)
}
/* reserve GDS, GWS and OA resource for gfx */
- r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
- NULL, &adev->gds.gds_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
+ &adev->gds.gds_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
- NULL, &adev->gds.gws_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
+ &adev->gds.gws_gfx_bo, NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0, NULL,
- NULL, &adev->gds.oa_gfx_bo);
+ r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
+ &adev->gds.oa_gfx_bo, NULL, NULL);
if (r)
return r;
@@ -2117,9 +2112,9 @@ static int gfx_v8_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_bo_unref(&adev->gds.oa_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gws_gfx_bo);
- amdgpu_bo_unref(&adev->gds.gds_gfx_bo);
+ amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2127,9 +2122,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v8_0_mec_fini(adev);
-
gfx_v8_0_rlc_fini(adev);
-
gfx_v8_0_free_microcode(adev);
return 0;
@@ -3465,19 +3458,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32(mmGRBM_GFX_INDEX, data);
}
@@ -3490,11 +3480,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_RB_BACKEND_DISABLE);
- data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ data = RREG32(mmCC_RB_BACKEND_DISABLE) |
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+ data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
@@ -3502,13 +3491,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) |
+ PKR_XSEL(1) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TOPAZ:
+ case CHIP_CARRIZO:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_POLARIS11:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_STONEY:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3520,10 +3659,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
@@ -3576,16 +3731,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- tmp = RREG32(mmGRBM_CNTL);
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(mmGRBM_CNTL, tmp);
-
+ WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
-
gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
@@ -3769,9 +3920,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
sizeof(indirect_start_offsets)/sizeof(int));
/* save and restore list */
- temp = RREG32(mmRLC_SRM_CNTL);
- temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
- WREG32(mmRLC_SRM_CNTL, temp);
+ WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
WREG32(mmRLC_SRM_ARAM_ADDR, 0);
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
@@ -3808,11 +3957,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- uint32_t data;
-
- data = RREG32(mmRLC_SRM_CNTL);
- data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(mmRLC_SRM_CNTL, data);
+ WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
@@ -3822,75 +3967,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG)) {
- data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
- data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
- data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
- WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
-
- data = 0;
- data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
- data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY, data);
+ WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
- data = RREG32(mmRLC_PG_DELAY_2);
- data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
- data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
- WREG32(mmRLC_PG_DELAY_2, data);
+ data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+ data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+ WREG32(mmRLC_PG_DELAY, data);
- data = RREG32(mmRLC_AUTO_PG_CTRL);
- data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
- data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
- WREG32(mmRLC_AUTO_PG_CTRL, data);
+ WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+ WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
}
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
}
static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
}
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- else
- data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
@@ -3927,36 +4031,26 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
}
}
-void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v8_0_wait_for_rlc_serdes(adev);
}
static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmGRBM_SOFT_RESET);
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(mmGRBM_SOFT_RESET, tmp);
+
+ WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(mmRLC_CNTL, tmp);
+ WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@@ -3992,20 +4086,26 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
gfx_v8_0_rlc_stop(adev);
/* disable CG */
- WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+ adev->asic_type == CHIP_POLARIS10) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+ }
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
gfx_v8_0_rlc_reset(adev);
-
gfx_v8_0_init_pg(adev);
if (!adev->pp_enabled) {
@@ -4300,12 +4400,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
gfx_v8_0_cp_gfx_start(adev);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
- if (r) {
+ if (r)
ring->ready = false;
- return r;
- }
- return 0;
+ return r;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4980,7 +5078,6 @@ static int gfx_v8_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
-
gfx_v8_0_gpu_init(adev);
r = gfx_v8_0_rlc_resume(adev);
@@ -4988,8 +5085,6 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
r = gfx_v8_0_cp_resume(adev);
- if (r)
- return r;
return r;
}
@@ -5037,25 +5132,22 @@ static bool gfx_v8_0_is_idle(void *handle)
static int gfx_v8_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v8_0_is_idle(handle))
return 0;
+
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v8_0_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
@@ -5064,16 +5156,12 @@ static int gfx_v8_0_soft_reset(void *handle)
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
- GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
+ GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
- }
-
- if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
- grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
- GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
}
@@ -5084,73 +5172,200 @@ static int gfx_v8_0_soft_reset(void *handle)
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
+ REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPF, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPC, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
+ SOFT_RESET_CPG, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
+ SOFT_RESET_GRBM, 1);
+ }
+
/* SRBM_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- /* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.grbm_soft_reset = grbm_soft_reset;
+ adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->gfx.grbm_soft_reset = 0;
+ adev->gfx.srbm_soft_reset = 0;
+ return false;
+ }
+}
+
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int i;
+
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+ u32 tmp;
+ tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+ DEQUEUE_REQ, 2);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ udelay(1);
+ }
+ }
+}
+static int gfx_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ /* stop the rlc */
+ gfx_v8_0_rlc_stop(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
/* Disable GFX parsing/prefetching */
gfx_v8_0_cp_gfx_enable(adev, false);
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_inactive_hqd(adev, ring);
+ }
/* Disable MEC parsing/prefetching */
gfx_v8_0_cp_compute_enable(adev, false);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 1);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 1);
- WREG32(mmGMCON_DEBUG, tmp);
+ return 0;
+}
- udelay(50);
- }
+static int gfx_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 tmp;
- if (grbm_soft_reset) {
- tmp = RREG32(mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
+ return 0;
- udelay(50);
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
- tmp &= ~grbm_soft_reset;
- WREG32(mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmGRBM_SOFT_RESET);
- }
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
+ WREG32(mmGMCON_DEBUG, tmp);
+ udelay(50);
+ }
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
+ if (grbm_soft_reset) {
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32(mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmGRBM_SOFT_RESET);
+ }
- if (grbm_soft_reset || srbm_soft_reset) {
- tmp = RREG32(mmGMCON_DEBUG);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_STALL, 0);
- tmp = REG_SET_FIELD(tmp,
- GMCON_DEBUG, GFX_CLEAR, 0);
- WREG32(mmGMCON_DEBUG, tmp);
- }
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
- /* Wait a little for things to settle down */
udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
}
+
+ if (grbm_soft_reset || srbm_soft_reset) {
+ tmp = RREG32(mmGMCON_DEBUG);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
+ tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
+ WREG32(mmGMCON_DEBUG, tmp);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+
+ return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(mmCP_HQD_PQ_RPTR, 0);
+ WREG32(mmCP_HQD_PQ_WPTR, 0);
+ vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
+ return 0;
+
+ grbm_soft_reset = adev->gfx.grbm_soft_reset;
+ srbm_soft_reset = adev->gfx.srbm_soft_reset;
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
+ gfx_v8_0_cp_gfx_resume(adev);
+
+ if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
+ REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ gfx_v8_0_init_hqd(adev, ring);
+ }
+ gfx_v8_0_cp_compute_resume(adev);
+ }
+ gfx_v8_0_rlc_start(adev);
+
return 0;
}
@@ -5269,8 +5484,6 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
if (adev->asic_type == CHIP_POLARIS11)
/* Send msg to SMU via Powerplay */
amdgpu_set_powergating_state(adev,
@@ -5278,83 +5491,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
enable ?
AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable static MGPG */
- if (enable)
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable dynamic MGPG */
- if (enable)
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- uint32_t data, temp;
-
- temp = data = RREG32(mmRLC_PG_CNTL);
- /* Enable quick PG */
- if (enable)
- data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
bool enable)
{
- u32 data, orig;
-
- orig = data = RREG32(mmRLC_PG_CNTL);
-
- if (enable)
- data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
-
- if (orig != data)
- WREG32(mmRLC_PG_CNTL, data);
+ WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
/* Read any GFX register to wake up GFX. */
if (!enable)
- data = RREG32(mmDB_RENDER_CONTROL);
+ RREG32(mmDB_RENDER_CONTROL);
}
static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -5430,15 +5595,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
data = RREG32(mmRLC_SERDES_WR_CTRL);
if (adev->asic_type == CHIP_STONEY)
- data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
- RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
- RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
- RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
- RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
- RLC_SERDES_WR_CTRL__POWER_UP_MASK |
- RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
- RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+ data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+ RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+ RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+ RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+ RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
else
data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
@@ -5461,10 +5626,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
-
-#define RLC_GPR_REG2__REQ_MASK 0x00000001
-#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
-#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__REQ__SHIFT 0
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
@@ -5494,7 +5659,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5522,7 +5687,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ))
break;
udelay(1);
}
@@ -5554,7 +5719,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5581,7 +5746,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
@@ -5622,21 +5787,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
/* 1 - RLC memory Light sleep */
- temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
- data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmRLC_MEM_SLP_CNTL, data);
- }
+ WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- /* 2 - CP memory Light sleep */
- temp = data = RREG32(mmCP_MEM_SLP_CNTL);
- data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmCP_MEM_SLP_CNTL, data);
- }
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
+ WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
}
/* 3 - RLC_CGTT_MGCG_OVERRIDE */
@@ -5834,6 +5990,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
+static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int gfx_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -5846,33 +6072,33 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
gfx_v8_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+ break;
default:
break;
}
return 0;
}
-static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+static u32 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
- rptr = ring->adev->wb.wb[ring->rptr_offs];
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs];
}
static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 wptr;
if (ring->use_doorbell)
/* XXX check if swapping is necessary on BE */
- wptr = ring->adev->wb.wb[ring->wptr_offs];
+ return ring->adev->wb.wb[ring->wptr_offs];
else
- wptr = RREG32(mmCP_RB0_WPTR);
-
- return wptr;
+ return RREG32(mmCP_RB0_WPTR);
}
static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
@@ -5939,12 +6165,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
{
u32 header, control = 0;
- /* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (ctx_switch) {
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
-
if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
@@ -5971,9 +6191,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- (ib->gpu_addr & 0xFFFFFFFC));
+ (ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
@@ -6014,14 +6234,6 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
-
- if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
}
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
@@ -6029,6 +6241,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */
+ if (usepfp)
+ amdgpu_ring_insert_nop(ring, 128);
+
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) |
@@ -6068,18 +6284,11 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
+ /* GFX8 emits 128 dw nop to prevent CE access VM before vm_flush finish */
+ amdgpu_ring_insert_nop(ring, 128);
}
}
-static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
-{
- return ring->adev->wb.wb[ring->rptr_offs];
-}
-
static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
return ring->adev->wb.wb[ring->wptr_offs];
@@ -6115,36 +6324,88 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, upper_32_bits(seq));
}
-static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
- enum amdgpu_interrupt_state state)
+static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
{
- u32 cp_int_cntl;
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+}
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+{
+ uint32_t dw2 = 0;
+
+ dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
+ /* set load_global_config & load_global_uconfig */
+ dw2 |= 0x8001;
+ /* set load_cs_sh_regs */
+ dw2 |= 0x01000000;
+ /* set load_per_context_state & load_gfx_sh_regs for GFX */
+ dw2 |= 0x10002;
+
+ /* set load_ce_ram if preamble presented */
+ if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
+ dw2 |= 0x10000000;
+ } else {
+ /* still load_ce_ram if this is the first time preamble presented
+ * although there is no context switch happens.
+ */
+ if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
+ dw2 |= 0x10000000;
}
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ amdgpu_ring_write(ring, dw2);
+ amdgpu_ring_write(ring, 0);
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v8_0_ring_emit_ib_gfx */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */
+ 2 + /* gfx_v8_ring_emit_sb */
+ 3; /* gfx_v8_ring_emit_cntxcntl */
+}
+
+static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* gfx_v8_0_ring_emit_ib_compute */
+}
+
+static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring)
+{
+ return
+ 20 + /* gfx_v8_0_ring_emit_gds_switch */
+ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
+ 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */
+ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
+ 17 + /* gfx_v8_0_ring_emit_vm_flush */
+ 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+}
+
+static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+ enum amdgpu_interrupt_state state)
+{
+ WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
int me, int pipe,
enum amdgpu_interrupt_state state)
{
- u32 mec_int_cntl, mec_int_cntl_reg;
-
/*
* amdgpu controls only pipe 0 of MEC1. That's why this function only
* handles the setting of interrupts for this specific pipe. All other
@@ -6154,7 +6415,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
if (me == 1) {
switch (pipe) {
case 0:
- mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
@@ -6165,22 +6425,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
return;
}
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- mec_int_cntl = RREG32(mec_int_cntl_reg);
- mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(mec_int_cntl_reg, mec_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
@@ -6188,24 +6434,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -6215,24 +6445,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
- break;
- default:
- break;
- }
+ WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
@@ -6338,13 +6552,16 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.resume = gfx_v8_0_resume,
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
+ .check_soft_reset = gfx_v8_0_check_soft_reset,
+ .pre_soft_reset = gfx_v8_0_pre_soft_reset,
.soft_reset = gfx_v8_0_soft_reset,
+ .post_soft_reset = gfx_v8_0_post_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
- .get_rptr = gfx_v8_0_ring_get_rptr_gfx,
+ .get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.parse_cs = NULL,
@@ -6359,10 +6576,14 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_switch_buffer = gfx_v8_ring_emit_sb,
+ .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
+ .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx,
+ .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
- .get_rptr = gfx_v8_0_ring_get_rptr_compute,
+ .get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.parse_cs = NULL,
@@ -6377,6 +6598,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute,
+ .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -6479,15 +6702,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG);
- data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
-
- data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
- data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
+ RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
- return (~data) & mask;
+ return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
}
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index bc82c794312c..ebed1f829297 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,6 +26,4 @@
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
new file mode 100644
index 000000000000..b13c8aaec078
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -0,0 +1,1071 @@
+
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "gmc_v6_0.h"
+#include "amdgpu_ucode.h"
+#include "si/sid.h"
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v6_0_wait_for_idle(void *handle);
+
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+
+static const u32 crtc_offsets[6] =
+{
+ SI_CRTC0_REGISTER_OFFSET,
+ SI_CRTC1_REGISTER_OFFSET,
+ SI_CRTC2_REGISTER_OFFSET,
+ SI_CRTC3_REGISTER_OFFSET,
+ SI_CRTC4_REGISTER_OFFSET,
+ SI_CRTC5_REGISTER_OFFSET
+};
+
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 blackout;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_stop_mc_access(adev, save);
+
+ gmc_v6_0_wait_for_idle((void *)adev);
+
+ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
+ /* Block CPU access */
+ WREG32(BIF_FB_EN, 0);
+ /* blackout the MC */
+ blackout = REG_SET_FIELD(blackout,
+ mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ /* wait for the MC to settle */
+ udelay(100);
+
+}
+
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+ u32 tmp;
+
+ /* unblackout the MC */
+ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
+ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+ /* allow CPU access */
+ tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
+ tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
+ WREG32(BIF_FB_EN, tmp);
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_resume_mc_access(adev, save);
+
+}
+
+static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->mc.fw);
+
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "si_mc: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
+ }
+ return err;
+}
+
+static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
+{
+ const __le32 *new_fw_data = NULL;
+ u32 running;
+ const __le32 *new_io_mc_regs = NULL;
+ int i, regs_size, ucode_size;
+ const struct mc_firmware_header_v1_0 *hdr;
+
+ if (!adev->mc.fw)
+ return -EINVAL;
+
+ hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
+
+ amdgpu_ucode_print_mc_hdr(&hdr->header);
+
+ adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+ if (running == 0) {
+
+ /* reset the engine and set to writable */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+ /* load mc io regs */
+ for (i = 0; i < regs_size; i++) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ }
+ /* load the MC ucode */
+ for (i = 0; i < ucode_size; i++) {
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ }
+
+ /* put the engine back into the active state */
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+ WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+ /* wait for training to complete */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+ break;
+ udelay(1);
+ }
+
+ }
+
+ return 0;
+}
+
+static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
+{
+ if (mc->mc_vram_size > 0xFFC0000000ULL) {
+ dev_warn(adev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xFFC0000000ULL;
+ mc->mc_vram_size = 0xFFC0000000ULL;
+ }
+ amdgpu_vram_location(adev, &adev->mc, 0);
+ adev->mc.gtt_base_align = 0;
+ amdgpu_gtt_location(adev, mc);
+}
+
+static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x6) {
+ WREG32((0xb05 + j), 0x00000000);
+ WREG32((0xb06 + j), 0x00000000);
+ WREG32((0xb07 + j), 0x00000000);
+ WREG32((0xb08 + j), 0x00000000);
+ WREG32((0xb09 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ gmc_v6_0_mc_stop(adev, &save);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->mc.vram_end >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+ adev->vram_scratch.gpu_addr >> 12);
+ tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v6_0_mc_resume(adev, &save);
+ amdgpu_display_set_vga_render_state(adev, false);
+}
+
+static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+{
+
+ u32 tmp;
+ int chansize, numchan;
+
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
+ /* size in MB on si */
+ adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+ adev->mc.visible_vram_size = adev->mc.aper_size;
+
+ /* unless the user had overridden it, set the gart
+ * size equal to the 1024 or vram, whichever is larger.
+ */
+ if (amdgpu_gart_size == -1)
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
+ else
+ adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
+
+ gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+
+ return 0;
+}
+
+static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid)
+{
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
+ void *cpu_pt_addr,
+ uint32_t gpu_page_idx,
+ uint64_t addr,
+ uint32_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ value = addr & 0xFFFFFFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+
+ return 0;
+}
+
+static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
+ xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+}
+
+static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ if (adev->gart.robj == NULL) {
+ dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = amdgpu_gart_table_vram_pin(adev);
+ if (r)
+ return r;
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ BANK_SELECT(4) |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+ WREG32(0x575, 0);
+ WREG32(0x576, 0);
+ WREG32(0x577, 0);
+
+ /* empty context1-15 */
+ /* set vm size, must be a multiple of 4 */
+ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
+ for (i = 1; i < 16; i++) {
+ if (i < 8)
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
+ adev->gart.table_addr >> 12);
+ else
+ WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
+ adev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-15 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(adev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT1_CNTL2, 4);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+ PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
+ RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+ PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+ VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+ READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+ WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+ gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
+ dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned long long)adev->gart.table_addr);
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->gart.robj) {
+ dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
+ return 0;
+ }
+ r = amdgpu_gart_init(adev);
+ if (r)
+ return r;
+ adev->gart.table_size = adev->gart.num_gpu_pages * 8;
+ return amdgpu_gart_table_vram_alloc(adev);
+}
+
+static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+{
+ /*unsigned i;
+
+ for (i = 1; i < 16; ++i) {
+ uint32_t reg;
+ if (i < 8)
+ reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
+ else
+ reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
+ adev->vm_manager.saved_table_addr[i] = RREG32(reg);
+ }*/
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+ amdgpu_gart_table_vram_unpin(adev);
+}
+
+static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
+{
+ amdgpu_gart_table_vram_free(adev);
+ amdgpu_gart_fini(adev);
+}
+
+static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
+{
+ /*
+ * number of VMs
+ * VMID 0 is reserved for System
+ * amdgpu graphics/compute will use VMIDs 1-7
+ * amdkfd will use VMIDs 8-15
+ */
+ adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ amdgpu_vm_manager_init(adev);
+
+ /* base offset of vram pages */
+ if (adev->flags & AMD_IS_APU) {
+ u64 tmp = RREG32(MC_VM_FB_OFFSET);
+ tmp <<= 22;
+ adev->vm_manager.vram_base_offset = tmp;
+ } else
+ adev->vm_manager.vram_base_offset = 0;
+
+ return 0;
+}
+
+static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
+{
+}
+
+static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
+ u32 status, u32 addr, u32 mc_client)
+{
+ u32 mc_id;
+ u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
+ u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxPROTECTIONS);
+ char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
+ (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
+
+ mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxMEMORY_CLIENT_ID);
+
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ protections, vmid, addr,
+ REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ xxMEMORY_CLIENT_RW) ?
+ "write" : "read", block, mc_client, mc_id);
+}
+
+/*
+static const u32 mc_cg_registers[] = {
+ MC_HUB_MISC_HUB_CG,
+ MC_HUB_MISC_SIP_CG,
+ MC_HUB_MISC_VM_CG,
+ MC_XPB_CLK_GAT,
+ ATC_MISC_CG,
+ MC_CITF_MISC_WR_CG,
+ MC_CITF_MISC_RD_CG,
+ MC_CITF_MISC_VM_CG,
+ VM_L2_CG,
+};
+
+static const u32 mc_cg_ls_en[] = {
+ MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
+ ATC_MISC_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
+ VM_L2_CG__MEM_LS_ENABLE_MASK,
+};
+
+static const u32 mc_cg_en[] = {
+ MC_HUB_MISC_HUB_CG__ENABLE_MASK,
+ MC_HUB_MISC_SIP_CG__ENABLE_MASK,
+ MC_HUB_MISC_VM_CG__ENABLE_MASK,
+ MC_XPB_CLK_GAT__ENABLE_MASK,
+ ATC_MISC_CG__ENABLE_MASK,
+ MC_CITF_MISC_WR_CG__ENABLE_MASK,
+ MC_CITF_MISC_RD_CG__ENABLE_MASK,
+ MC_CITF_MISC_VM_CG__ENABLE_MASK,
+ VM_L2_CG__ENABLE_MASK,
+};
+
+static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ data |= mc_cg_ls_en[i];
+ else
+ data &= ~mc_cg_ls_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ data |= mc_cg_en[i];
+ else
+ data &= ~mc_cg_en[i];
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE(ixPCIE_CNTL2);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
+ } else {
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
+ data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
+ }
+
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CNTL2, data);
+}
+
+static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
+ else
+ data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
+ else
+ data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+*/
+
+static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
+{
+ switch (mc_seq_vram_type) {
+ case MC_SEQ_MISC0__MT__GDDR1:
+ return AMDGPU_VRAM_TYPE_GDDR1;
+ case MC_SEQ_MISC0__MT__DDR2:
+ return AMDGPU_VRAM_TYPE_DDR2;
+ case MC_SEQ_MISC0__MT__GDDR3:
+ return AMDGPU_VRAM_TYPE_GDDR3;
+ case MC_SEQ_MISC0__MT__GDDR4:
+ return AMDGPU_VRAM_TYPE_GDDR4;
+ case MC_SEQ_MISC0__MT__GDDR5:
+ return AMDGPU_VRAM_TYPE_GDDR5;
+ case MC_SEQ_MISC0__MT__DDR3:
+ return AMDGPU_VRAM_TYPE_DDR3;
+ default:
+ return AMDGPU_VRAM_TYPE_UNKNOWN;
+ }
+}
+
+static int gmc_v6_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gmc_v6_0_set_gart_funcs(adev);
+ gmc_v6_0_set_irq_funcs(adev);
+
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(MC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
+static int gmc_v6_0_sw_init(void *handle)
+{
+ int r;
+ int dma_bits;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
+ if (r)
+ return r;
+
+ adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+
+ adev->mc.mc_mask = 0xffffffffffULL;
+
+ adev->need_dma32 = false;
+ dma_bits = adev->need_dma32 ? 32 : 40;
+ r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ adev->need_dma32 = true;
+ dma_bits = 32;
+ dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ }
+ r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
+ dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
+ }
+
+ r = gmc_v6_0_init_microcode(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to load mc firmware!\n");
+ return r;
+ }
+
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
+ r = gmc_v6_0_mc_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_init(adev);
+ if (r)
+ return r;
+
+ r = gmc_v6_0_gart_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v6_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static int gmc_v6_0_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->vm_manager.enabled) {
+ gmc_v6_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v6_0_gart_fini(adev);
+ amdgpu_gem_force_release(adev);
+ amdgpu_bo_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gmc_v6_0_mc_program(adev);
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = gmc_v6_0_mc_load_microcode(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to load MC firmware!\n");
+ return r;
+ }
+ }
+
+ r = gmc_v6_0_gart_enable(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gmc_v6_0_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
+ gmc_v6_0_gart_disable(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->vm_manager.enabled) {
+ gmc_v6_0_vm_fini(adev);
+ adev->vm_manager.enabled = false;
+ }
+ gmc_v6_0_hw_fini(adev);
+
+ return 0;
+}
+
+static int gmc_v6_0_resume(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = gmc_v6_0_hw_init(adev);
+ if (r)
+ return r;
+
+ if (!adev->vm_manager.enabled) {
+ r = gmc_v6_0_vm_init(adev);
+ if (r) {
+ dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
+ return r;
+ }
+ adev->vm_manager.enabled = true;
+ }
+
+ return r;
+}
+
+static bool gmc_v6_0_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int gmc_v6_0_wait_for_idle(void *handle)
+{
+ unsigned i;
+ u32 tmp;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
+ SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK |
+ SRBM_STATUS__MCD_BUSY_MASK |
+ SRBM_STATUS__VMC_BUSY_MASK);
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+
+}
+
+static int gmc_v6_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_mc_save save;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
+
+ if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+ SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
+ if (!(adev->flags & AMD_IS_APU))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
+ mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
+ }
+
+ if (srbm_soft_reset) {
+ gmc_v6_0_mc_stop(adev, &save);
+ if (gmc_v6_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ gmc_v6_0_mc_resume(adev, &save);
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp;
+ u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp &= ~bits;
+ WREG32(VM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp &= ~bits;
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ tmp = RREG32(VM_CONTEXT0_CNTL);
+ tmp |= bits;
+ WREG32(VM_CONTEXT0_CNTL, tmp);
+ tmp = RREG32(VM_CONTEXT1_CNTL);
+ tmp |= bits;
+ WREG32(VM_CONTEXT1_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 addr, status;
+
+ addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
+ status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+ gmc_v6_0_set_fault_enable_default(adev, false);
+
+ dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
+ entry->src_id, entry->src_data);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ addr);
+ dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ status);
+ gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+
+ return 0;
+}
+
+static int gmc_v6_0_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int gmc_v6_0_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
+ .name = "gmc_v6_0",
+ .early_init = gmc_v6_0_early_init,
+ .late_init = gmc_v6_0_late_init,
+ .sw_init = gmc_v6_0_sw_init,
+ .sw_fini = gmc_v6_0_sw_fini,
+ .hw_init = gmc_v6_0_hw_init,
+ .hw_fini = gmc_v6_0_hw_fini,
+ .suspend = gmc_v6_0_suspend,
+ .resume = gmc_v6_0_resume,
+ .is_idle = gmc_v6_0_is_idle,
+ .wait_for_idle = gmc_v6_0_wait_for_idle,
+ .soft_reset = gmc_v6_0_soft_reset,
+ .set_clockgating_state = gmc_v6_0_set_clockgating_state,
+ .set_powergating_state = gmc_v6_0_set_powergating_state,
+};
+
+static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
+};
+
+static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
+ .set = gmc_v6_0_vm_fault_interrupt_state,
+ .process = gmc_v6_0_process_interrupt,
+};
+
+static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+{
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+}
+
+static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->mc.vm_fault.num_types = 1;
+ adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
new file mode 100644
index 000000000000..42c4fc676cd4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GMC_V6_0_H__
+#define __GMC_V6_0_H__
+
+extern const struct amd_ip_funcs gmc_v6_0_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0b0f08641eed..aa0c4b964621 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -183,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -203,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -239,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -393,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -953,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2aee2c6f3cd5..a16b2201d52c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] =
static const u32 stoney_mgcg_cgcg_init[] =
{
+ mmATC_MISC_CG, 0xffffffff, 0x000c0200,
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
@@ -261,7 +262,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 running, blackout = 0;
+ u32 running;
int i, ucode_size, regs_size;
if (!adev->mc.fw)
@@ -269,8 +270,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
+ * Skip MC ucode loading on VF, because hypervisor will do that
+ * for this adaptor.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
@@ -287,11 +290,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
- if (running) {
- blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
@@ -323,9 +321,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
break;
udelay(1);
}
-
- if (running)
- WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -477,7 +472,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -957,6 +952,11 @@ static int gmc_v8_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_ttm_global_init(adev);
+ if (r) {
+ return r;
+ }
+
r = gmc_v8_0_mc_init(adev);
if (r)
return r;
@@ -1100,9 +1100,8 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
{
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1117,13 +1116,41 @@ static int gmc_v8_0_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
-
if (srbm_soft_reset) {
- gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle((void *)adev)) {
- dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
- }
+ adev->mc.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->mc.srbm_soft_reset = 0;
+ return false;
+ }
+}
+static int gmc_v8_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->mc.srbm_soft_reset)
+ return 0;
+
+ gmc_v8_0_mc_stop(adev, &adev->mc.save);
+ if (gmc_v8_0_wait_for_idle(adev)) {
+ dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
+ }
+
+ return 0;
+}
+
+static int gmc_v8_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->mc.srbm_soft_reset)
+ return 0;
+ srbm_soft_reset = adev->mc.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
@@ -1139,14 +1166,22 @@ static int gmc_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- gmc_v8_0_mc_resume(adev, &save);
- udelay(50);
}
return 0;
}
+static int gmc_v8_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->mc.srbm_soft_reset)
+ return 0;
+
+ gmc_v8_0_mc_resume(adev, &adev->mc.save);
+ return 0;
+}
+
static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -1414,7 +1449,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.resume = gmc_v8_0_resume,
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
+ .check_soft_reset = gmc_v8_0_check_soft_reset,
+ .pre_soft_reset = gmc_v8_0_pre_soft_reset,
.soft_reset = gmc_v8_0_soft_reset,
+ .post_soft_reset = gmc_v8_0_post_soft_reset,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad6095c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "iceland_smum.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int iceland_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/topaz_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int iceland_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = iceland_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int iceland_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int iceland_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = iceland_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int iceland_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- iceland_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int iceland_dpm_suspend(void *handle)
-{
- return 0;
-}
-
-static int iceland_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
-}
-
-static int iceland_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int iceland_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs iceland_dpm_ip_funcs = {
- .name = "iceland_dpm",
- .early_init = iceland_dpm_early_init,
- .late_init = NULL,
- .sw_init = iceland_dpm_sw_init,
- .sw_fini = iceland_dpm_sw_fini,
- .hw_init = iceland_dpm_hw_init,
- .hw_fini = iceland_dpm_hw_fini,
- .suspend = iceland_dpm_suspend,
- .resume = iceland_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = iceland_dpm_set_clockgating_state,
- .set_powergating_state = iceland_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &iceland_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index 211839913728..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "ppsmc.h"
-#include "iceland_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#define ICELAND_SMC_SIZE 0x20000
-
-static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
- uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
- uint32_t smc_start_address,
- const uint8_t *src,
- uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-void iceland_start_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-void iceland_reset_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static int iceland_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-void iceland_stop_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-void iceland_start_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc(adev, msg);
-}
-
-static int iceland_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t data;
- unsigned long flags;
- int i;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > ICELAND_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
- break;
- udelay(1);
- }
- val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
- WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
-
- iceland_stop_smc_clock(adev);
- iceland_reset_smc(adev);
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- while (byte_count >= 4) {
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
- WREG32(mmSMC_IND_DATA_0, data);
- src += 4;
- byte_count -= 4;
- }
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_smu_stop_smc(struct amdgpu_device *adev)
-{
- iceland_reset_smc(adev);
- iceland_stop_smc_clock(adev);
-
- return 0;
-}
-#endif
-
-static int iceland_smu_start_smc(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- iceland_program_jump_on_start(adev);
- iceland_start_smc_clock(adev);
- iceland_start_smc(adev);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
- break;
- udelay(1);
- }
- return 0;
-}
-
-static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
- entry->flags = 0;
-
- return 0;
-}
-
-static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK;
-
-
- if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int iceland_smu_start(struct amdgpu_device *adev)
-{
- int result;
-
- result = iceland_smu_upload_firmware_image(adev);
- if (result)
- return result;
- result = iceland_smu_start_smc(adev);
- if (result)
- return result;
-
- return iceland_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
- .check_fw_load_finish = iceland_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int iceland_smu_init(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
-
- return 0;
-}
-
-int iceland_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a845e883f5fa..71d2856222fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- pi->caps_sclk_ds = true;
+ if (amdgpu_sclk_deep_sleep_en)
+ pi->caps_sclk_ds = true;
+ else
+ pi->caps_sclk_ds = false;
+
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (amdgpu_bapm == 0)
@@ -3059,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ flush_work(&adev->pm.dpm.thermal.work);
+
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
new file mode 100644
index 000000000000..055321f61ca7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __R600_DPM_H__
+#define __R600_DPM_H__
+
+#define R600_ASI_DFLT 10000
+#define R600_BSP_DFLT 0x41EB
+#define R600_BSU_DFLT 0x2
+#define R600_AH_DFLT 5
+#define R600_RLP_DFLT 25
+#define R600_RMP_DFLT 65
+#define R600_LHP_DFLT 40
+#define R600_LMP_DFLT 15
+#define R600_TD_DFLT 0
+#define R600_UTC_DFLT_00 0x24
+#define R600_UTC_DFLT_01 0x22
+#define R600_UTC_DFLT_02 0x22
+#define R600_UTC_DFLT_03 0x22
+#define R600_UTC_DFLT_04 0x22
+#define R600_UTC_DFLT_05 0x22
+#define R600_UTC_DFLT_06 0x22
+#define R600_UTC_DFLT_07 0x22
+#define R600_UTC_DFLT_08 0x22
+#define R600_UTC_DFLT_09 0x22
+#define R600_UTC_DFLT_10 0x22
+#define R600_UTC_DFLT_11 0x22
+#define R600_UTC_DFLT_12 0x22
+#define R600_UTC_DFLT_13 0x22
+#define R600_UTC_DFLT_14 0x22
+#define R600_DTC_DFLT_00 0x24
+#define R600_DTC_DFLT_01 0x22
+#define R600_DTC_DFLT_02 0x22
+#define R600_DTC_DFLT_03 0x22
+#define R600_DTC_DFLT_04 0x22
+#define R600_DTC_DFLT_05 0x22
+#define R600_DTC_DFLT_06 0x22
+#define R600_DTC_DFLT_07 0x22
+#define R600_DTC_DFLT_08 0x22
+#define R600_DTC_DFLT_09 0x22
+#define R600_DTC_DFLT_10 0x22
+#define R600_DTC_DFLT_11 0x22
+#define R600_DTC_DFLT_12 0x22
+#define R600_DTC_DFLT_13 0x22
+#define R600_DTC_DFLT_14 0x22
+#define R600_VRC_DFLT 0x0000C003
+#define R600_VOLTAGERESPONSETIME_DFLT 1000
+#define R600_BACKBIASRESPONSETIME_DFLT 1000
+#define R600_VRU_DFLT 0x3
+#define R600_SPLLSTEPTIME_DFLT 0x1000
+#define R600_SPLLSTEPUNIT_DFLT 0x3
+#define R600_TPU_DFLT 0
+#define R600_TPC_DFLT 0x200
+#define R600_SSTU_DFLT 0
+#define R600_SST_DFLT 0x00C8
+#define R600_GICST_DFLT 0x200
+#define R600_FCT_DFLT 0x0400
+#define R600_FCTU_DFLT 0
+#define R600_CTXCGTT3DRPHC_DFLT 0x20
+#define R600_CTXCGTT3DRSDC_DFLT 0x40
+#define R600_VDDC3DOORPHC_DFLT 0x100
+#define R600_VDDC3DOORSDC_DFLT 0x7
+#define R600_VDDC3DOORSU_DFLT 0
+#define R600_MPLLLOCKTIME_DFLT 100
+#define R600_MPLLRESETTIME_DFLT 150
+#define R600_VCOSTEPPCT_DFLT 20
+#define R600_ENDINGVCOSTEPPCT_DFLT 5
+#define R600_REFERENCEDIVIDER_DFLT 4
+
+#define R600_PM_NUMBER_OF_TC 15
+#define R600_PM_NUMBER_OF_SCLKS 20
+#define R600_PM_NUMBER_OF_MCLKS 4
+#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define R600_TEMP_RANGE_MIN (90 * 1000)
+#define R600_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum r600_power_level {
+ R600_POWER_LEVEL_LOW = 0,
+ R600_POWER_LEVEL_MEDIUM = 1,
+ R600_POWER_LEVEL_HIGH = 2,
+ R600_POWER_LEVEL_CTXSW = 3,
+};
+
+enum r600_td {
+ R600_TD_AUTO,
+ R600_TD_UP,
+ R600_TD_DOWN,
+};
+
+enum r600_display_watermark {
+ R600_DISPLAY_WATERMARK_LOW = 0,
+ R600_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum r600_display_gap
+{
+ R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+ R600_PM_DISPLAY_GAP_VBLANK = 1,
+ R600_PM_DISPLAY_GAP_WATERMARK = 2,
+ R600_PM_DISPLAY_GAP_IGNORE = 3,
+};
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a64715d90503..565dab3c7218 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -190,12 +190,8 @@ out:
*/
static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
/* XXX check if swapping is necessary on BE */
- rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs] >> 2;
}
/**
@@ -749,24 +745,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -774,39 +762,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -822,40 +798,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -945,6 +902,22 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 6; /* sdma_v2_4_ring_emit_ib */
+}
+
+static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
+ 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
+ 12 + /* sdma_v2_4_ring_emit_vm_flush */
+ 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
+}
+
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1263,6 +1236,8 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
+ .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size,
+ .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 653ce5ed55ae..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -335,12 +335,8 @@ out:
*/
static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
- u32 rptr;
-
/* XXX check if swapping is necessary on BE */
- rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2;
-
- return rptr;
+ return ring->adev->wb.wb[ring->rptr_offs] >> 2;
}
/**
@@ -499,31 +495,6 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
-unsigned init_cond_exec(struct amdgpu_ring *ring)
-{
- unsigned ret;
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
- amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
- amdgpu_ring_write(ring, 1);
- ret = ring->wptr;/* this is the offset we need patch later */
- amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
- return ret;
-}
-
-void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
-{
- unsigned cur;
- BUG_ON(ring->ring[offset] != 0x55aa55aa);
-
- cur = ring->wptr - 1;
- if (likely(cur > offset))
- ring->ring[offset] = cur - offset;
- else
- ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
-}
-
-
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
@@ -976,24 +947,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
@@ -1001,39 +964,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
-static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
- const dma_addr_t *pages_addr, uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
-{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- value = amdgpu_vm_map_gart(pages_addr, addr);
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
+static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
}
}
@@ -1049,40 +1000,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib,
*
* Update the page tables using sDMA (CIK).
*/
-static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
- uint64_t pe,
+static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
- uint64_t value;
- unsigned ndw;
-
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & AMDGPU_PTE_VALID)
- value = addr;
- else
- value = 0;
-
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
-
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
@@ -1172,6 +1104,22 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 6; /* sdma_v3_0_ring_emit_ib */
+}
+
+static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
+ 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
+ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
+ 12 + /* sdma_v3_0_ring_emit_vm_flush */
+ 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
+}
+
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1320,28 +1268,77 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
+ (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
+ adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->sdma.srbm_soft_reset = 0;
+ return false;
+ }
+}
+
+static int sdma_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->sdma.srbm_soft_reset)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ if (!adev->sdma.srbm_soft_reset)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
+ REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
+ sdma_v3_0_gfx_resume(adev);
+ sdma_v3_0_rlc_resume(adev);
+ }
+
+ return 0;
+}
+
+static int sdma_v3_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp;
+
+ if (!adev->sdma.srbm_soft_reset)
+ return 0;
+
+ srbm_soft_reset = adev->sdma.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1559,6 +1556,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.resume = sdma_v3_0_resume,
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
+ .check_soft_reset = sdma_v3_0_check_soft_reset,
+ .pre_soft_reset = sdma_v3_0_pre_soft_reset,
+ .post_soft_reset = sdma_v3_0_post_soft_reset,
.soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
@@ -1579,6 +1579,8 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
+ .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
new file mode 100644
index 000000000000..dc9511c5ecb8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -0,0 +1,1965 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_atombios.h"
+#include "amdgpu_ih.h"
+#include "amdgpu_uvd.h"
+#include "amdgpu_vce.h"
+#include "atom.h"
+#include "amdgpu_powerplay.h"
+#include "si/sid.h"
+#include "si_ih.h"
+#include "gfx_v6_0.h"
+#include "gmc_v6_0.h"
+#include "si_dma.h"
+#include "dce_v6_0.h"
+#include "si.h"
+
+static const u32 tahiti_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x09df, 0x00000003, 0x000007ff,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x00000200, 0x000002fb,
+ 0x2b04, 0xffffffff, 0x0000543b,
+ 0x2b03, 0xffffffff, 0xa9210876,
+ 0x2234, 0xffffffff, 0x000fff40,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0504, 0x20000000, 0x20fffed8,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+ 0x0319, 0x00000001, 0x00000001
+};
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601005,
+ 0x311f, 0xffffffff, 0x10104040,
+ 0x3122, 0xffffffff, 0x0100000a,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000f4,
+ 0x3d2a, 0xffffffff, 0x00000000
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x2a00126a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f7,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x32761054,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601004,
+ 0x311f, 0xffffffff, 0x10102020,
+ 0x3122, 0xffffffff, 0x01000020,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000a4
+};
+
+static const u32 verde_pg_init[] =
+{
+ 0xd4f, 0xffffffff, 0x40000,
+ 0xd4e, 0xffffffff, 0x200010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x7007,
+ 0xd4e, 0xffffffff, 0x300010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x400000,
+ 0xd4e, 0xffffffff, 0x100010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x120200,
+ 0xd4e, 0xffffffff, 0x500010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x1e1e16,
+ 0xd4e, 0xffffffff, 0x600010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x171f1e,
+ 0xd4e, 0xffffffff, 0x700010ff,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4f, 0xffffffff, 0x0,
+ 0xd4e, 0xffffffff, 0x9ff,
+ 0xd40, 0xffffffff, 0x0,
+ 0xd41, 0xffffffff, 0x10000800,
+ 0xd41, 0xffffffff, 0xf,
+ 0xd41, 0xffffffff, 0xf,
+ 0xd40, 0xffffffff, 0x4,
+ 0xd41, 0xffffffff, 0x1000051e,
+ 0xd41, 0xffffffff, 0xffff,
+ 0xd41, 0xffffffff, 0xffff,
+ 0xd40, 0xffffffff, 0x8,
+ 0xd41, 0xffffffff, 0x80500,
+ 0xd40, 0xffffffff, 0x12,
+ 0xd41, 0xffffffff, 0x9050c,
+ 0xd40, 0xffffffff, 0x1d,
+ 0xd41, 0xffffffff, 0xb052c,
+ 0xd40, 0xffffffff, 0x2a,
+ 0xd41, 0xffffffff, 0x1053e,
+ 0xd40, 0xffffffff, 0x2d,
+ 0xd41, 0xffffffff, 0x10546,
+ 0xd40, 0xffffffff, 0x30,
+ 0xd41, 0xffffffff, 0xa054e,
+ 0xd40, 0xffffffff, 0x3c,
+ 0xd41, 0xffffffff, 0x1055f,
+ 0xd40, 0xffffffff, 0x3f,
+ 0xd41, 0xffffffff, 0x10567,
+ 0xd40, 0xffffffff, 0x42,
+ 0xd41, 0xffffffff, 0x1056f,
+ 0xd40, 0xffffffff, 0x45,
+ 0xd41, 0xffffffff, 0x10572,
+ 0xd40, 0xffffffff, 0x48,
+ 0xd41, 0xffffffff, 0x20575,
+ 0xd40, 0xffffffff, 0x4c,
+ 0xd41, 0xffffffff, 0x190801,
+ 0xd40, 0xffffffff, 0x67,
+ 0xd41, 0xffffffff, 0x1082a,
+ 0xd40, 0xffffffff, 0x6a,
+ 0xd41, 0xffffffff, 0x1b082d,
+ 0xd40, 0xffffffff, 0x87,
+ 0xd41, 0xffffffff, 0x310851,
+ 0xd40, 0xffffffff, 0xba,
+ 0xd41, 0xffffffff, 0x891,
+ 0xd40, 0xffffffff, 0xbc,
+ 0xd41, 0xffffffff, 0x893,
+ 0xd40, 0xffffffff, 0xbe,
+ 0xd41, 0xffffffff, 0x20895,
+ 0xd40, 0xffffffff, 0xc2,
+ 0xd41, 0xffffffff, 0x20899,
+ 0xd40, 0xffffffff, 0xc6,
+ 0xd41, 0xffffffff, 0x2089d,
+ 0xd40, 0xffffffff, 0xca,
+ 0xd41, 0xffffffff, 0x8a1,
+ 0xd40, 0xffffffff, 0xcc,
+ 0xd41, 0xffffffff, 0x8a3,
+ 0xd40, 0xffffffff, 0xce,
+ 0xd41, 0xffffffff, 0x308a5,
+ 0xd40, 0xffffffff, 0xd3,
+ 0xd41, 0xffffffff, 0x6d08cd,
+ 0xd40, 0xffffffff, 0x142,
+ 0xd41, 0xffffffff, 0x2000095a,
+ 0xd41, 0xffffffff, 0x1,
+ 0xd40, 0xffffffff, 0x144,
+ 0xd41, 0xffffffff, 0x301f095b,
+ 0xd40, 0xffffffff, 0x165,
+ 0xd41, 0xffffffff, 0xc094d,
+ 0xd40, 0xffffffff, 0x173,
+ 0xd41, 0xffffffff, 0xf096d,
+ 0xd40, 0xffffffff, 0x184,
+ 0xd41, 0xffffffff, 0x15097f,
+ 0xd40, 0xffffffff, 0x19b,
+ 0xd41, 0xffffffff, 0xc0998,
+ 0xd40, 0xffffffff, 0x1a9,
+ 0xd41, 0xffffffff, 0x409a7,
+ 0xd40, 0xffffffff, 0x1af,
+ 0xd41, 0xffffffff, 0xcdc,
+ 0xd40, 0xffffffff, 0x1b1,
+ 0xd41, 0xffffffff, 0x800,
+ 0xd42, 0xffffffff, 0x6c9b2000,
+ 0xd44, 0xfc00, 0x2000,
+ 0xd51, 0xffffffff, 0xfc0,
+ 0xa35, 0x00000100, 0x100
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x033f1005,
+ 0x311f, 0xffffffff, 0x10808020,
+ 0x3122, 0xffffffff, 0x00800008,
+ 0x30c5, 0xffffffff, 0x00001000,
+ 0x30c3, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f0, 0x00200000, 0x50100000,
+
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x2285, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0xa0d4, 0x3f3f3fff, 0x0000124a,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a3, 0x01ff1f3f, 0x00000000,
+ 0x23a2, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+
+ 0x23a1, 0x01ff1f3f, 0x00000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b01, 0x000003ff, 0x00000003,
+ 0x2b05, 0x000003ff, 0x00000003,
+ 0x2b05, 0x000003ff, 0x00000003,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2b03, 0xffffffff, 0x00001032,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x031e, 0x00000080, 0x00000000,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x16ec, 0x000000f0, 0x00000070,
+ 0x16f9, 0x00200000, 0x50100000,
+ 0x1c0c, 0x31000311, 0x00000011,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa293, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x00000082,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x07ffffff, 0x03000000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f3,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00003210,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+ 0x3109, 0xffffffff, 0x00601005,
+ 0x311f, 0xffffffff, 0x10104040,
+ 0x3122, 0xffffffff, 0x0100000a,
+ 0x30c5, 0xffffffff, 0x00000800,
+ 0x30c3, 0xffffffff, 0x800000f4
+};
+
+static const u32 hainan_golden_registers[] =
+{
+ 0x2684, 0x00010000, 0x00018208,
+ 0x260c, 0xffffffff, 0x00000000,
+ 0x260d, 0xf00fffff, 0x00000400,
+ 0x260e, 0x0002021c, 0x00020200,
+ 0x4595, 0xff000fff, 0x00000100,
+ 0x340c, 0x000300c0, 0x00800040,
+ 0x3630, 0xff000fff, 0x00000100,
+ 0x360c, 0x000300c0, 0x00800040,
+ 0x0ab9, 0x00073ffe, 0x000022a2,
+ 0x0903, 0x000007ff, 0x00000000,
+ 0x2285, 0xf000001f, 0x00000007,
+ 0x22c9, 0xffffffff, 0x00ffffff,
+ 0x22c4, 0x0000ff0f, 0x00000000,
+ 0xa393, 0x07ffffff, 0x4e000000,
+ 0xa0d4, 0x3f3f3fff, 0x00000000,
+ 0x000c, 0x000000ff, 0x0040,
+ 0x000d, 0x00000040, 0x00004040,
+ 0x2440, 0x03e00000, 0x03600000,
+ 0x2418, 0x0000007f, 0x00000020,
+ 0x2542, 0x00010000, 0x00010000,
+ 0x2b05, 0x000003ff, 0x000000f1,
+ 0x2b04, 0xffffffff, 0x00000000,
+ 0x2b03, 0xffffffff, 0x00003210,
+ 0x2235, 0x0000001f, 0x00000010,
+ 0x0570, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+ 0x263e, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2476, 0xffffffff, 0x00070006,
+ 0x2477, 0xffffffff, 0x00090008,
+ 0x2478, 0xffffffff, 0x0000000c,
+ 0x2479, 0xffffffff, 0x000b000a,
+ 0x247a, 0xffffffff, 0x000e000d,
+ 0x247b, 0xffffffff, 0x00080007,
+ 0x247c, 0xffffffff, 0x000a0009,
+ 0x247d, 0xffffffff, 0x0000000d,
+ 0x247e, 0xffffffff, 0x000c000b,
+ 0x247f, 0xffffffff, 0x000f000e,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2499, 0xffffffff, 0x000e000d,
+ 0x249a, 0xffffffff, 0x0010000f,
+ 0x249b, 0xffffffff, 0x00000013,
+ 0x249c, 0xffffffff, 0x00120011,
+ 0x249d, 0xffffffff, 0x00150014,
+ 0x249e, 0xffffffff, 0x000f000e,
+ 0x249f, 0xffffffff, 0x00110010,
+ 0x24a0, 0xffffffff, 0x00000014,
+ 0x24a1, 0xffffffff, 0x00130012,
+ 0x24a2, 0xffffffff, 0x00160015,
+ 0x24a3, 0xffffffff, 0x0010000f,
+ 0x24a4, 0xffffffff, 0x00120011,
+ 0x24a5, 0xffffffff, 0x00000015,
+ 0x24a6, 0xffffffff, 0x00140013,
+ 0x24a7, 0xffffffff, 0x00170016,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 verde_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2480, 0xffffffff, 0x00090008,
+ 0x2481, 0xffffffff, 0x000b000a,
+ 0x2482, 0xffffffff, 0x000c000f,
+ 0x2483, 0xffffffff, 0x000e000d,
+ 0x2484, 0xffffffff, 0x00110010,
+ 0x2485, 0xffffffff, 0x000a0009,
+ 0x2486, 0xffffffff, 0x000c000b,
+ 0x2487, 0xffffffff, 0x0000000f,
+ 0x2488, 0xffffffff, 0x000e000d,
+ 0x2489, 0xffffffff, 0x00110010,
+ 0x248a, 0xffffffff, 0x000b000a,
+ 0x248b, 0xffffffff, 0x000d000c,
+ 0x248c, 0xffffffff, 0x00000010,
+ 0x248d, 0xffffffff, 0x000f000e,
+ 0x248e, 0xffffffff, 0x00120011,
+ 0x248f, 0xffffffff, 0x000c000b,
+ 0x2490, 0xffffffff, 0x000e000d,
+ 0x2491, 0xffffffff, 0x00000011,
+ 0x2492, 0xffffffff, 0x0010000f,
+ 0x2493, 0xffffffff, 0x00130012,
+ 0x2494, 0xffffffff, 0x000d000c,
+ 0x2495, 0xffffffff, 0x000f000e,
+ 0x2496, 0xffffffff, 0x00100013,
+ 0x2497, 0xffffffff, 0x00120011,
+ 0x2498, 0xffffffff, 0x00150014,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 oland_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x40b, 0x00000101, 0x00000000,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0x1579, 0xff000fff, 0x00000100,
+ 0x157a, 0x00000001, 0x00000001,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+ 0x3100, 0xffffffff, 0xfffffffc,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2698, 0xffffffff, 0x00000100,
+ 0x24a9, 0xffffffff, 0x00000100,
+ 0x3059, 0xffffffff, 0x00000100,
+ 0x25dd, 0xffffffff, 0x00000100,
+ 0x2261, 0xffffffff, 0x06000100,
+ 0x2286, 0xffffffff, 0x00000100,
+ 0x24a8, 0xffffffff, 0x00000100,
+ 0x30e0, 0xffffffff, 0x00000100,
+ 0x22ca, 0xffffffff, 0x00000100,
+ 0x2451, 0xffffffff, 0x00000100,
+ 0x2362, 0xffffffff, 0x00000100,
+ 0x2363, 0xffffffff, 0x00000100,
+ 0x240c, 0xffffffff, 0x00000100,
+ 0x240d, 0xffffffff, 0x00000100,
+ 0x240e, 0xffffffff, 0x00000100,
+ 0x240f, 0xffffffff, 0x00000100,
+ 0x2b60, 0xffffffff, 0x00000100,
+ 0x2b15, 0xffffffff, 0x00000100,
+ 0x225f, 0xffffffff, 0x06000100,
+ 0x261a, 0xffffffff, 0x00000100,
+ 0x2544, 0xffffffff, 0x00000100,
+ 0x2bc1, 0xffffffff, 0x00000100,
+ 0x2b81, 0xffffffff, 0x00000100,
+ 0x2527, 0xffffffff, 0x00000100,
+ 0x200b, 0xffffffff, 0xe0000000,
+ 0x2458, 0xffffffff, 0x00010000,
+ 0x2459, 0xffffffff, 0x00030002,
+ 0x245a, 0xffffffff, 0x00040007,
+ 0x245b, 0xffffffff, 0x00060005,
+ 0x245c, 0xffffffff, 0x00090008,
+ 0x245d, 0xffffffff, 0x00020001,
+ 0x245e, 0xffffffff, 0x00040003,
+ 0x245f, 0xffffffff, 0x00000007,
+ 0x2460, 0xffffffff, 0x00060005,
+ 0x2461, 0xffffffff, 0x00090008,
+ 0x2462, 0xffffffff, 0x00030002,
+ 0x2463, 0xffffffff, 0x00050004,
+ 0x2464, 0xffffffff, 0x00000008,
+ 0x2465, 0xffffffff, 0x00070006,
+ 0x2466, 0xffffffff, 0x000a0009,
+ 0x2467, 0xffffffff, 0x00040003,
+ 0x2468, 0xffffffff, 0x00060005,
+ 0x2469, 0xffffffff, 0x00000009,
+ 0x246a, 0xffffffff, 0x00080007,
+ 0x246b, 0xffffffff, 0x000b000a,
+ 0x246c, 0xffffffff, 0x00050004,
+ 0x246d, 0xffffffff, 0x00070006,
+ 0x246e, 0xffffffff, 0x0008000b,
+ 0x246f, 0xffffffff, 0x000a0009,
+ 0x2470, 0xffffffff, 0x000d000c,
+ 0x2471, 0xffffffff, 0x00060005,
+ 0x2472, 0xffffffff, 0x00080007,
+ 0x2473, 0xffffffff, 0x0000000b,
+ 0x2474, 0xffffffff, 0x000a0009,
+ 0x2475, 0xffffffff, 0x000d000c,
+ 0x2454, 0xffffffff, 0x96940200,
+ 0x21c2, 0xffffffff, 0x00900100,
+ 0x311e, 0xffffffff, 0x00000080,
+ 0x3101, 0xffffffff, 0x0020003f,
+ 0xc, 0xffffffff, 0x0000001c,
+ 0xd, 0x000f0000, 0x000f0000,
+ 0x583, 0xffffffff, 0x00000100,
+ 0x409, 0xffffffff, 0x00000100,
+ 0x82a, 0xffffffff, 0x00000104,
+ 0x993, 0x000c0000, 0x000c0000,
+ 0x992, 0x000c0000, 0x000c0000,
+ 0xbd4, 0x00000001, 0x00000001,
+ 0xc33, 0xc0000fff, 0x00000104,
+ 0x3079, 0x00000001, 0x00000001,
+ 0x3430, 0xfffffff0, 0x00000100,
+ 0x3630, 0xfffffff0, 0x00000100
+};
+
+static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(AMDGPU_PCIE_INDEX, reg);
+ (void)RREG32(AMDGPU_PCIE_INDEX);
+ r = RREG32(AMDGPU_PCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(AMDGPU_PCIE_INDEX, reg);
+ (void)RREG32(AMDGPU_PCIE_INDEX);
+ WREG32(AMDGPU_PCIE_DATA, v);
+ (void)RREG32(AMDGPU_PCIE_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ r = RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+ (void)RREG32(PCIE_PORT_INDEX);
+ WREG32(PCIE_PORT_DATA, (v));
+ (void)RREG32(PCIE_PORT_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, (reg));
+ r = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+ return r;
+}
+
+static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, (reg));
+ WREG32(SMC_IND_DATA_0, (v));
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
+static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
+ {GRBM_STATUS, false},
+ {GB_ADDR_CONFIG, false},
+ {MC_ARB_RAMCFG, false},
+ {GB_TILE_MODE0, false},
+ {GB_TILE_MODE1, false},
+ {GB_TILE_MODE2, false},
+ {GB_TILE_MODE3, false},
+ {GB_TILE_MODE4, false},
+ {GB_TILE_MODE5, false},
+ {GB_TILE_MODE6, false},
+ {GB_TILE_MODE7, false},
+ {GB_TILE_MODE8, false},
+ {GB_TILE_MODE9, false},
+ {GB_TILE_MODE10, false},
+ {GB_TILE_MODE11, false},
+ {GB_TILE_MODE12, false},
+ {GB_TILE_MODE13, false},
+ {GB_TILE_MODE14, false},
+ {GB_TILE_MODE15, false},
+ {GB_TILE_MODE16, false},
+ {GB_TILE_MODE17, false},
+ {GB_TILE_MODE18, false},
+ {GB_TILE_MODE19, false},
+ {GB_TILE_MODE20, false},
+ {GB_TILE_MODE21, false},
+ {GB_TILE_MODE22, false},
+ {GB_TILE_MODE23, false},
+ {GB_TILE_MODE24, false},
+ {GB_TILE_MODE25, false},
+ {GB_TILE_MODE26, false},
+ {GB_TILE_MODE27, false},
+ {GB_TILE_MODE28, false},
+ {GB_TILE_MODE29, false},
+ {GB_TILE_MODE30, false},
+ {GB_TILE_MODE31, false},
+ {CC_RB_BACKEND_DISABLE, false, true},
+ {GC_USER_RB_BACKEND_DISABLE, false, true},
+ {PA_SC_RASTER_CONFIG, false, true},
+};
+
+static uint32_t si_read_indexed_register(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num,
+ u32 reg_offset)
+{
+ uint32_t val;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+
+ val = RREG32(reg_offset);
+
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+}
+
+static int si_read_register(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 reg_offset, u32 *value)
+{
+ uint32_t i;
+
+ *value = 0;
+ for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
+ if (reg_offset != si_allowed_read_registers[i].reg_offset)
+ continue;
+
+ if (!si_allowed_read_registers[i].untouched)
+ *value = si_allowed_read_registers[i].grbm_indexed ?
+ si_read_indexed_register(adev, se_num,
+ sh_num, reg_offset) :
+ RREG32(reg_offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static bool si_read_disabled_bios(struct amdgpu_device *adev)
+{
+ u32 bus_cntl;
+ u32 d1vga_control = 0;
+ u32 d2vga_control = 0;
+ u32 vga_render_control = 0;
+ u32 rom_cntl;
+ bool r;
+
+ bus_cntl = RREG32(R600_BUS_CNTL);
+ if (adev->mode_info.num_crtc) {
+ d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+ d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+ vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ }
+ rom_cntl = RREG32(R600_ROM_CNTL);
+
+ /* enable the rom */
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+ if (adev->mode_info.num_crtc) {
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(VGA_RENDER_CONTROL,
+ (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+ r = amdgpu_read_bios(adev);
+
+ /* restore regs */
+ WREG32(R600_BUS_CNTL, bus_cntl);
+ if (adev->mode_info.num_crtc) {
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(VGA_RENDER_CONTROL, vga_render_control);
+ }
+ WREG32(R600_ROM_CNTL, rom_cntl);
+ return r;
+}
+
+//xxx: not implemented
+static int si_asic_reset(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void si_vga_set_state(struct amdgpu_device *adev, bool state)
+{
+ uint32_t temp;
+
+ temp = RREG32(CONFIG_CNTL);
+ if (state == false) {
+ temp &= ~(1<<0);
+ temp |= (1<<1);
+ } else {
+ temp &= ~(1<<1);
+ }
+ WREG32(CONFIG_CNTL, temp);
+}
+
+static u32 si_get_xclk(struct amdgpu_device *adev)
+{
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 tmp;
+
+ tmp = RREG32(CG_CLKPIN_CNTL_2);
+ if (tmp & MUX_TCLK_TO_XCLK)
+ return TCLK;
+
+ tmp = RREG32(CG_CLKPIN_CNTL);
+ if (tmp & XTALIN_DIVIDE)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+//xxx:not implemented
+static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
+{
+ return 0;
+}
+
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static const struct amdgpu_asic_funcs si_asic_funcs =
+{
+ .read_disabled_bios = &si_read_disabled_bios,
+ .detect_hw_virtualization = si_detect_hw_virtualization,
+ .read_register = &si_read_register,
+ .reset = &si_asic_reset,
+ .set_vga_state = &si_vga_set_state,
+ .get_xclk = &si_get_xclk,
+ .set_uvd_clocks = &si_set_uvd_clocks,
+ .set_vce_clocks = NULL,
+};
+
+static uint32_t si_get_rev_id(struct amdgpu_device *adev)
+{
+ return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
+ >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
+}
+
+static int si_common_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->smc_rreg = &si_smc_rreg;
+ adev->smc_wreg = &si_smc_wreg;
+ adev->pcie_rreg = &si_pcie_rreg;
+ adev->pcie_wreg = &si_pcie_wreg;
+ adev->pciep_rreg = &si_pciep_rreg;
+ adev->pciep_wreg = &si_pciep_wreg;
+ adev->uvd_ctx_rreg = NULL;
+ adev->uvd_ctx_wreg = NULL;
+ adev->didt_rreg = NULL;
+ adev->didt_wreg = NULL;
+
+ adev->asic_funcs = &si_asic_funcs;
+
+ adev->rev_id = si_get_rev_id(adev);
+ adev->external_rev_id = 0xFF;
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+ case CHIP_PITCAIRN:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+
+ case CHIP_VERDE:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ //???
+ adev->external_rev_id = adev->rev_id + 0x14;
+ break;
+ case CHIP_OLAND:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+ case CHIP_HAINAN:
+ adev->cg_flags =
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
+ adev->pg_flags = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int si_common_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int si_common_sw_fini(void *handle)
+{
+ return 0;
+}
+
+
+static void si_init_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ tahiti_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ tahiti_golden_registers2,
+ (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+ break;
+ case CHIP_PITCAIRN:
+ amdgpu_program_register_sequence(adev,
+ pitcairn_golden_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ pitcairn_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ case CHIP_VERDE:
+ amdgpu_program_register_sequence(adev,
+ verde_golden_registers,
+ (const u32)ARRAY_SIZE(verde_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ verde_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ verde_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ verde_pg_init,
+ (const u32)ARRAY_SIZE(verde_pg_init));
+ break;
+ case CHIP_OLAND:
+ amdgpu_program_register_sequence(adev,
+ oland_golden_registers,
+ (const u32)ARRAY_SIZE(oland_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ oland_golden_rlc_registers,
+ (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+ amdgpu_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ case CHIP_HAINAN:
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers,
+ (const u32)ARRAY_SIZE(hainan_golden_registers));
+ amdgpu_program_register_sequence(adev,
+ hainan_golden_registers2,
+ (const u32)ARRAY_SIZE(hainan_golden_registers2));
+ amdgpu_program_register_sequence(adev,
+ hainan_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ break;
+
+
+ default:
+ BUG();
+ }
+}
+
+static void si_pcie_gen3_enable(struct amdgpu_device *adev)
+{
+ struct pci_dev *root = adev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
+
+ if (pci_is_root_bus(adev->pdev->bus))
+ return;
+
+ if (amdgpu_pcie_gen2 == 0)
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
+ }
+
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
+
+ gpu_pos = pci_pcie_cap(adev->pdev);
+ if (!gpu_pos)
+ return;
+
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE(PCIE_LC_STATUS1);
+ max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+ current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & LC_RENEGOTIATION_SUPPORT) {
+ tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+ tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+ tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
+
+ for (i = 0; i < 10; i++) {
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_REDO_EQ;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp &= ~LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3;
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2;
+ else
+ tmp16 |= 1;
+ pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+ r = RREG32(EVERGREEN_PIF_PHY0_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
+ WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+ r = RREG32(EVERGREEN_PIF_PHY1_DATA);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
+
+static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
+ WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+static void si_program_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (amdgpu_aspm == 0)
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ data &= ~LC_XMIT_N_FTS_MASK;
+ data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+ data |= LC_GO_TO_RECOVERY;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(PCIE_P_CNTL);
+ data |= P_IGNORE_EDB_ERR;
+ if (orig != data)
+ WREG32_PCIE(PCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+ data |= LC_PMI_TO_L1_DIS;
+ if (!disable_l0s)
+ data |= LC_L0S_INACTIVITY(7);
+
+ if (!disable_l1) {
+ data |= LC_L1_INACTIVITY(7);
+ data &= ~LC_PMI_TO_L1_DIS;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+ if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
+ data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
+ data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
+ data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
+ data &= ~PLL_RAMP_UP_TIME_1_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
+ data &= ~PLL_RAMP_UP_TIME_2_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
+ data &= ~PLL_RAMP_UP_TIME_3_MASK;
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
+ }
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+ data |= LC_DYN_LANES_PWR_STATE(3);
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+ if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+ if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+
+ if (!disable_clkreq &&
+ !pci_is_root_bus(adev->pdev->bus)) {
+ struct pci_dev *root = adev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+ data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+ orig = data = RREG32(THM_CLK_CNTL);
+ data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+ data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ if (orig != data)
+ WREG32(THM_CLK_CNTL, data);
+
+ orig = data = RREG32(MISC_CLK_CNTL);
+ data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+ data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ if (orig != data)
+ WREG32(MISC_CLK_CNTL, data);
+
+ orig = data = RREG32(CG_CLKPIN_CNTL);
+ data &= ~BCLK_AS_XCLK;
+ if (orig != data)
+ WREG32(CG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32(CG_CLKPIN_CNTL_2);
+ data &= ~FORCE_BIF_REFCLK_EN;
+ if (orig != data)
+ WREG32(CG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32(MPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_CLKOUT_SEL_MASK;
+ data |= MPLL_CLKOUT_SEL(4);
+ if (orig != data)
+ WREG32(MPLL_BYPASSCLK_SEL, data);
+
+ orig = data = RREG32(SPLL_CNTL_MODE);
+ data &= ~SPLL_REFCLK_SEL_MASK;
+ if (orig != data)
+ WREG32(SPLL_CNTL_MODE, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE(PCIE_CNTL2);
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ if (orig != data)
+ WREG32_PCIE(PCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+ data = RREG32_PCIE(PCIE_LC_STATUS1);
+ if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+ }
+ }
+}
+
+static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
+{
+ int readrq;
+ u16 v;
+
+ readrq = pcie_get_readrq(adev->pdev);
+ v = ffs(readrq) - 8;
+ if ((v == 0) || (v == 6) || (v == 7))
+ pcie_set_readrq(adev->pdev, 512);
+}
+
+static int si_common_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_fix_pci_max_read_req_size(adev);
+ si_init_golden_registers(adev);
+ si_pcie_gen3_enable(adev);
+ si_program_aspm(adev);
+
+ return 0;
+}
+
+static int si_common_hw_fini(void *handle)
+{
+ return 0;
+}
+
+static int si_common_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_common_hw_fini(adev);
+}
+
+static int si_common_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_common_hw_init(adev);
+}
+
+static bool si_common_is_idle(void *handle)
+{
+ return true;
+}
+
+static int si_common_wait_for_idle(void *handle)
+{
+ return 0;
+}
+
+static int si_common_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int si_common_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_common_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs si_common_ip_funcs = {
+ .name = "si_common",
+ .early_init = si_common_early_init,
+ .late_init = NULL,
+ .sw_init = si_common_sw_init,
+ .sw_fini = si_common_sw_fini,
+ .hw_init = si_common_hw_init,
+ .hw_fini = si_common_hw_fini,
+ .suspend = si_common_suspend,
+ .resume = si_common_resume,
+ .is_idle = si_common_is_idle,
+ .wait_for_idle = si_common_wait_for_idle,
+ .soft_reset = si_common_soft_reset,
+ .set_clockgating_state = si_common_set_clockgating_state,
+ .set_powergating_state = si_common_set_powergating_state,
+};
+
+static const struct amdgpu_ip_block_version verde_ip_blocks[] =
+{
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+ },
+/* {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &si_null_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_null_ip_funcs,
+ },
+ */
+};
+
+
+static const struct amdgpu_ip_block_version hainan_ip_blocks[] =
+{
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &si_dma_ip_funcs,
+ },
+};
+
+int si_set_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ adev->ip_blocks = verde_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks);
+ break;
+ case CHIP_HAINAN:
+ adev->ip_blocks = hainan_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks);
+ break;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h b/drivers/gpu/drm/amd/amdgpu/si.h
index 1cef03deeac3..959d7b63e0e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smum.h
+++ b/drivers/gpu/drm/amd/amdgpu/si.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,22 +21,13 @@
*
*/
-#ifndef FIJI_SMUMGR_H
-#define FIJI_SMUMGR_H
+#ifndef __SI_H__
+#define __SI_H__
-#include "fiji_ppsmc.h"
+extern const struct amd_ip_funcs si_common_ip_funcs;
-int fiji_smu_init(struct amdgpu_device *adev);
-int fiji_smu_fini(struct amdgpu_device *adev);
-int fiji_smu_start(struct amdgpu_device *adev);
-
-struct fiji_smu_private_data
-{
- uint8_t *header;
- uint32_t smu_buffer_addr_high;
- uint32_t smu_buffer_addr_low;
- uint32_t header_addr_high;
- uint32_t header_addr_low;
-};
+void si_srbm_select(struct amdgpu_device *adev,
+ u32 me, u32 pipe, u32 queue, u32 vmid);
+int si_set_ip_blocks(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
new file mode 100644
index 000000000000..de358193a8f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "si/sid.h"
+
+const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
+{
+ DMA0_REGISTER_OFFSET,
+ DMA1_REGISTER_OFFSET
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
+
+static uint32_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs>>2];
+}
+
+static uint32_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+ return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+}
+
+static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
+
+ WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
+}
+
+static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
+{
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+ amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
+ amdgpu_ring_write(ring, 1);
+}
+
+static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
+ * si_dma_ring_emit_fence - emit a fence on the DMA ring
+ *
+ * @ring: amdgpu ring pointer
+ * @fence: amdgpu fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (VI).
+ */
+static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+ unsigned flags)
+{
+
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ /* write the fence */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ amdgpu_ring_write(ring, seq);
+ /* optionally write high bits as well */
+ if (write64bit) {
+ addr += 4;
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ }
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
+}
+
+static void si_dma_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 rb_cntl;
+ unsigned i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ ring->ready = false;
+ }
+}
+
+static int si_dma_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
+ int i, r;
+ uint64_t rptr_addr;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
+
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+
+ WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
+ WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
+
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
+ WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+ }
+
+ return 0;
+}
+
+/**
+ * si_dma_ring_test_ring - simple async dma engine test
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (VI).
+ * Returns 0 for success, error for failure.
+ */
+static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned i;
+ unsigned index;
+ int r;
+ u32 tmp;
+ u64 gpu_addr;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+
+ r = amdgpu_ring_alloc(ring, 4);
+ if (r) {
+ DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ amdgpu_wb_free(adev, index);
+ return r;
+ }
+
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
+ amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_wb_free(adev, index);
+
+ return r;
+}
+
+/**
+ * si_dma_ring_test_ib - test an IB on the DMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (VI).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct fence *f = NULL;
+ unsigned index;
+ u32 tmp = 0;
+ u64 gpu_addr;
+ long r;
+
+ r = amdgpu_wb_get(adev, &index);
+ if (r) {
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ return r;
+ }
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ tmp = 0xCAFEDEAD;
+ adev->wb.wb[index] = cpu_to_le32(tmp);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ goto err0;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
+ if (r)
+ goto err1;
+
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+
+err1:
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
+err0:
+ amdgpu_wb_free(adev, index);
+ return r;
+}
+
+/**
+ * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using DMA (SI).
+ */
+static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned bytes = count * 8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+}
+
+/**
+ * si_dma_vm_write_pte - update PTEs by writing them manually
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @value: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ *
+ * Update PTEs by writing them manually using DMA (SI).
+ */
+static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr)
+{
+ unsigned ndw = count * 2;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ for (; ndw > 0; ndw -= 2) {
+ ib->ptr[ib->length_dw++] = lower_32_bits(value);
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ value += incr;
+ }
+}
+
+/**
+ * si_dma_vm_set_pte_pde - update the page tables using sDMA
+ *
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & AMDGPU_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * si_dma_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+{
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
+ (1 << 27)); /* Poll memory */
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, seq); /* value */
+ amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
+}
+
+/**
+ * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
+ *
+ * @ring: amdgpu_ring pointer
+ * @vm: amdgpu_vm pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (VI).
+ */
+static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm_id < 8)
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ else
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
+ amdgpu_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
+ amdgpu_ring_write(ring, 0xff << 16); /* retry */
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 0); /* value */
+ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
+}
+
+static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 7 + 3; /* si_dma_ring_emit_ib */
+}
+
+static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 3 + /* si_dma_ring_emit_hdp_flush */
+ 3 + /* si_dma_ring_emit_hdp_invalidate */
+ 6 + /* si_dma_ring_emit_pipeline_sync */
+ 12 + /* si_dma_ring_emit_vm_flush */
+ 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */
+}
+
+static int si_dma_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->sdma.num_instances = 2;
+
+ si_dma_set_ring_funcs(adev);
+ si_dma_set_buffer_funcs(adev);
+ si_dma_set_vm_pte_funcs(adev);
+ si_dma_set_irq_funcs(adev);
+
+ return 0;
+}
+
+static int si_dma_sw_init(void *handle)
+{
+ struct amdgpu_ring *ring;
+ int r, i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* DMA0 trap event */
+ r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
+ if (r)
+ return r;
+
+ /* DMA1 trap event */
+ r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ sprintf(ring->name, "sdma%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf,
+ &adev->sdma.trap_irq,
+ (i == 0) ?
+ AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+ AMDGPU_RING_TYPE_SDMA);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int si_dma_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+
+ return 0;
+}
+
+static int si_dma_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_start(adev);
+}
+
+static int si_dma_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_dma_stop(adev);
+
+ return 0;
+}
+
+static int si_dma_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_hw_fini(adev);
+}
+
+static int si_dma_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_dma_hw_init(adev);
+}
+
+static bool si_dma_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS2);
+
+ if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
+ return false;
+
+ return true;
+}
+
+static int si_dma_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (si_dma_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int si_dma_soft_reset(void *handle)
+{
+ DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
+ return 0;
+}
+
+static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_cntl;
+
+ switch (type) {
+ case AMDGPU_SDMA_IRQ_TRAP0:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl &= ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
+ sdma_cntl |= TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ case AMDGPU_SDMA_IRQ_TRAP1:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl &= ~TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
+ sdma_cntl |= TRAP_ENABLE;
+ WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int si_dma_process_trap_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ amdgpu_fence_process(&adev->sdma.instance[0].ring);
+
+ return 0;
+}
+
+static int si_dma_process_trap_irq_1(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ amdgpu_fence_process(&adev->sdma.instance[1].ring);
+
+ return 0;
+}
+
+static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in SDMA command stream\n");
+ schedule_work(&adev->reset_work);
+ return 0;
+}
+
+static int si_dma_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ u32 orig, data, offset;
+ int i;
+ bool enable;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data &= ~MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ }
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data |= MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+
+ orig = data = RREG32(DMA_CLK_CTRL + offset);
+ data = 0xff000000;
+ if (data != orig)
+ WREG32(DMA_CLK_CTRL + offset, data);
+ }
+ }
+
+ return 0;
+}
+
+static int si_dma_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ u32 tmp;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ WREG32(DMA_PGFSM_WRITE, 0x00002000);
+ WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
+
+ for (tmp = 0; tmp < 5; tmp++)
+ WREG32(DMA_PGFSM_WRITE, 0);
+
+ return 0;
+}
+
+const struct amd_ip_funcs si_dma_ip_funcs = {
+ .name = "si_dma",
+ .early_init = si_dma_early_init,
+ .late_init = NULL,
+ .sw_init = si_dma_sw_init,
+ .sw_fini = si_dma_sw_fini,
+ .hw_init = si_dma_hw_init,
+ .hw_fini = si_dma_hw_fini,
+ .suspend = si_dma_suspend,
+ .resume = si_dma_resume,
+ .is_idle = si_dma_is_idle,
+ .wait_for_idle = si_dma_wait_for_idle,
+ .soft_reset = si_dma_soft_reset,
+ .set_clockgating_state = si_dma_set_clockgating_state,
+ .set_powergating_state = si_dma_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
+ .get_rptr = si_dma_ring_get_rptr,
+ .get_wptr = si_dma_ring_get_wptr,
+ .set_wptr = si_dma_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = si_dma_ring_emit_ib,
+ .emit_fence = si_dma_ring_emit_fence,
+ .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
+ .emit_vm_flush = si_dma_ring_emit_vm_flush,
+ .emit_hdp_flush = si_dma_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
+ .test_ring = si_dma_ring_test_ring,
+ .test_ib = si_dma_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = si_dma_ring_pad_ib,
+ .get_emit_ib_size = si_dma_ring_get_emit_ib_size,
+ .get_dma_frame_size = si_dma_ring_get_dma_frame_size,
+};
+
+static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
+ .set = si_dma_set_trap_irq_state,
+ .process = si_dma_process_trap_irq,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = {
+ .set = si_dma_set_trap_irq_state,
+ .process = si_dma_process_trap_irq_1,
+};
+
+static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
+ .process = si_dma_process_illegal_inst_irq,
+};
+
+static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
+ adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1;
+ adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
+}
+
+/**
+ * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Copy GPU buffers using the DMA engine (VI).
+ * Used by the amdgpu ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, byte_count);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
+}
+
+/**
+ * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @src_data: value to write to buffer
+ * @dst_offset: dst GPU address
+ * @byte_count: number of bytes to xfer
+ *
+ * Fill GPU buffers using the DMA engine (VI).
+ */
+static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
+ uint32_t src_data,
+ uint64_t dst_offset,
+ uint32_t byte_count)
+{
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
+ 0, 0, 0, byte_count / 4);
+ ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+ ib->ptr[ib->length_dw++] = src_data;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
+}
+
+
+static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
+ .copy_max_bytes = 0xffff8,
+ .copy_num_dw = 5,
+ .emit_copy_buffer = si_dma_emit_copy_buffer,
+
+ .fill_max_bytes = 0xffff8,
+ .fill_num_dw = 4,
+ .emit_fill_buffer = si_dma_emit_fill_buffer,
+};
+
+static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mman.buffer_funcs == NULL) {
+ adev->mman.buffer_funcs = &si_dma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ }
+}
+
+static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte = si_dma_vm_copy_pte,
+ .write_pte = si_dma_vm_write_pte,
+ .set_pte_pde = si_dma_vm_set_pte_pde,
+};
+
+static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ if (adev->vm_manager.vm_pte_funcs == NULL) {
+ adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->vm_manager.vm_pte_rings[i] =
+ &adev->sdma.instance[i].ring;
+
+ adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h
new file mode 100644
index 000000000000..3a3e0c78a54b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_DMA_H__
+#define __SI_DMA_H__
+
+extern const struct amd_ip_funcs si_dma_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
new file mode 100644
index 000000000000..d6f85b1a0b93
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -0,0 +1,8041 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_atombios.h"
+#include "si/sid.h"
+#include "r600_dpm.h"
+#include "si_dpm.h"
+#include "atom.h"
+#include "../include/pptable.h"
+#include <linux/math64.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x20000
+
+#define SCLK_MIN_DEEPSLEEP_FREQ 1350
+
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+
+#define BIOS_SCRATCH_4 0x5cd
+
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+union fan_info {
+ struct _ATOM_PPLIB_FANTABLE fan;
+ struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
+{
+ R600_UTC_DFLT_00,
+ R600_UTC_DFLT_01,
+ R600_UTC_DFLT_02,
+ R600_UTC_DFLT_03,
+ R600_UTC_DFLT_04,
+ R600_UTC_DFLT_05,
+ R600_UTC_DFLT_06,
+ R600_UTC_DFLT_07,
+ R600_UTC_DFLT_08,
+ R600_UTC_DFLT_09,
+ R600_UTC_DFLT_10,
+ R600_UTC_DFLT_11,
+ R600_UTC_DFLT_12,
+ R600_UTC_DFLT_13,
+ R600_UTC_DFLT_14,
+};
+
+static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
+{
+ R600_DTC_DFLT_00,
+ R600_DTC_DFLT_01,
+ R600_DTC_DFLT_02,
+ R600_DTC_DFLT_03,
+ R600_DTC_DFLT_04,
+ R600_DTC_DFLT_05,
+ R600_DTC_DFLT_06,
+ R600_DTC_DFLT_07,
+ R600_DTC_DFLT_08,
+ R600_DTC_DFLT_09,
+ R600_DTC_DFLT_10,
+ R600_DTC_DFLT_11,
+ R600_DTC_DFLT_12,
+ R600_DTC_DFLT_13,
+ R600_DTC_DFLT_14,
+};
+
+static const struct si_cac_config_reg cac_weights_tahiti[] =
+{
+ { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_tahiti[] =
+{
+ { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+
+};
+
+static const struct si_cac_config_reg cac_override_tahiti[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_tahiti =
+{
+ ((1 << 16) | 27027),
+ 6,
+ 0,
+ 4,
+ 95,
+ {
+ 0UL,
+ 0UL,
+ 4521550UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 40
+ },
+ 595000000UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_tahiti =
+{
+ { 1159409, 0, 0, 0, 0 },
+ { 777, 0, 0, 0, 0 },
+ 2,
+ 54000,
+ 127000,
+ 25,
+ 2,
+ 10,
+ 13,
+ { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
+ { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
+ { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
+ 85,
+ false
+};
+
+#if 0
+static const struct si_dte_data dte_data_tahiti_le =
+{
+ { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
+ { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
+ 0x5,
+ 0xAFC8,
+ 0x64,
+ 0x32,
+ 1,
+ 0,
+ 0x10,
+ { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
+ { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
+ { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
+ 85,
+ true
+};
+#endif
+
+static const struct si_dte_data dte_data_tahiti_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_new_zealand =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
+ { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
+ 0x5,
+ 0xAFC8,
+ 0x69,
+ 0x32,
+ 1,
+ 0,
+ 0x10,
+ { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
+ 85,
+ true
+};
+
+static const struct si_dte_data dte_data_aruba_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_malta =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_pitcairn[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_pitcairn[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_pitcairn[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_pitcairn =
+{
+ ((1 << 16) | 27027),
+ 5,
+ 0,
+ 6,
+ 100,
+ {
+ 51600000UL,
+ 1800000UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_pitcairn =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_curacao_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_curacao_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_neptune_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 45000,
+ 100,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_heathrow[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_cape_verde[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_cape_verde[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_cape_verde[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_cape_verde =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_cape_verde =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_venus_xtx =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_venus_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_venus_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
+ 5,
+ 55000,
+ 0x69,
+ 0xA,
+ 1,
+ 0,
+ 0x3,
+ { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_cac_config_reg cac_weights_oland[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_mars_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_pro[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_weights_oland_xt[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_oland[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg lcac_mars_pro[] =
+{
+ { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
+ { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
+ { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_cac_config_reg cac_override_oland[] =
+{
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_oland =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_powertune_data powertune_data_mars_pro =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 7,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static const struct si_dte_data dte_data_oland =
+{
+ { 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0 },
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ 0,
+ false
+};
+
+static const struct si_dte_data dte_data_mars_pro =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 55000,
+ 105,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+static const struct si_dte_data dte_data_sun_xt =
+{
+ { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 5,
+ 55000,
+ 105,
+ 0xA,
+ 1,
+ 0,
+ 0x10,
+ { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
+ { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
+ { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
+ 90,
+ true
+};
+
+
+static const struct si_cac_config_reg cac_weights_hainan[] =
+{
+ { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
+ { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
+ { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
+ { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
+ { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
+ { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
+ { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
+ { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
+ { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
+ { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
+ { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
+ { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
+ { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
+ { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
+ { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
+ { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
+ { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
+ { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
+ { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
+ { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
+ { 0xFFFFFFFF }
+};
+
+static const struct si_powertune_data powertune_data_hainan =
+{
+ ((1 << 16) | 0x6993),
+ 5,
+ 0,
+ 9,
+ 105,
+ {
+ 0UL,
+ 0UL,
+ 7194395UL,
+ 309631529UL,
+ -1270850L,
+ 4513710L,
+ 100
+ },
+ 117830498UL,
+ 12,
+ {
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ },
+ true
+};
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
+static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
+static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+ u16 *std_voltage);
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value);
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk);
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
+
+static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
+{
+ struct si_power_info *pi = adev->pm.dpm.priv;
+ return pi;
+}
+
+static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
+ u16 v, s32 t, u32 ileakage, u32 *leakage)
+{
+ s64 kt, kv, leakage_w, i_leakage, vddc;
+ s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+ s64 tmp;
+
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+ vddc = div64_s64(drm_int2fixp(v), 1000);
+ temperature = div64_s64(drm_int2fixp(t), 1000);
+
+ t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
+ t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
+ av = div64_s64(drm_int2fixp(coeff->av), 100000000);
+ bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
+ t_ref = drm_int2fixp(coeff->t_ref);
+
+ tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+ kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+ kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
+ kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
+
+ leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+ *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
+ const struct ni_leakage_coeffients *coeff,
+ u16 v,
+ s32 t,
+ u32 i_leakage,
+ u32 *leakage)
+{
+ si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
+}
+
+static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
+ const u32 fixed_kt, u16 v,
+ u32 ileakage, u32 *leakage)
+{
+ s64 kt, kv, leakage_w, i_leakage, vddc;
+
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
+ vddc = div64_s64(drm_int2fixp(v), 1000);
+
+ kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
+ kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
+ drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
+
+ leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
+
+ *leakage = drm_fixp2int(leakage_w * 1000);
+}
+
+static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
+ const struct ni_leakage_coeffients *coeff,
+ const u32 fixed_kt,
+ u16 v,
+ u32 i_leakage,
+ u32 *leakage)
+{
+ si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
+}
+
+
+static void si_update_dte_from_pl2(struct amdgpu_device *adev,
+ struct si_dte_data *dte_data)
+{
+ u32 p_limit1 = adev->pm.dpm.tdp_limit;
+ u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
+ u32 k = dte_data->k;
+ u32 t_max = dte_data->max_t;
+ u32 t_split[5] = { 10, 15, 20, 25, 30 };
+ u32 t_0 = dte_data->t0;
+ u32 i;
+
+ if (p_limit2 != 0 && p_limit2 <= p_limit1) {
+ dte_data->tdep_count = 3;
+
+ for (i = 0; i < k; i++) {
+ dte_data->r[i] =
+ (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
+ (p_limit2 * (u32)100);
+ }
+
+ dte_data->tdep_r[1] = dte_data->r[4] * 2;
+
+ for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
+ dte_data->tdep_r[i] = dte_data->r[4];
+ }
+ } else {
+ DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
+ }
+}
+
+static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
+{
+ struct ni_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
+{
+ struct si_ps *ps = aps->ps_priv;
+
+ return ps;
+}
+
+static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ bool update_dte_from_pl2 = false;
+
+ if (adev->asic_type == CHIP_TAHITI) {
+ si_pi->cac_weights = cac_weights_tahiti;
+ si_pi->lcac_config = lcac_tahiti;
+ si_pi->cac_override = cac_override_tahiti;
+ si_pi->powertune_data = &powertune_data_tahiti;
+ si_pi->dte_data = dte_data_tahiti;
+
+ switch (adev->pdev->device) {
+ case 0x6798:
+ si_pi->dte_data.enable_dte_by_default = true;
+ break;
+ case 0x6799:
+ si_pi->dte_data = dte_data_new_zealand;
+ break;
+ case 0x6790:
+ case 0x6791:
+ case 0x6792:
+ case 0x679E:
+ si_pi->dte_data = dte_data_aruba_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x679B:
+ si_pi->dte_data = dte_data_malta;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x679A:
+ si_pi->dte_data = dte_data_tahiti_pro;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ if (si_pi->dte_data.enable_dte_by_default == true)
+ DRM_ERROR("DTE is not enabled!\n");
+ break;
+ }
+ } else if (adev->asic_type == CHIP_PITCAIRN) {
+ si_pi->cac_weights = cac_weights_pitcairn;
+ si_pi->lcac_config = lcac_pitcairn;
+ si_pi->cac_override = cac_override_pitcairn;
+ si_pi->powertune_data = &powertune_data_pitcairn;
+
+ switch (adev->pdev->device) {
+ case 0x6810:
+ case 0x6818:
+ si_pi->dte_data = dte_data_curacao_xt;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6819:
+ case 0x6811:
+ si_pi->dte_data = dte_data_curacao_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6800:
+ case 0x6806:
+ si_pi->dte_data = dte_data_neptune_xt;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ si_pi->dte_data = dte_data_pitcairn;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_VERDE) {
+ si_pi->lcac_config = lcac_cape_verde;
+ si_pi->cac_override = cac_override_cape_verde;
+ si_pi->powertune_data = &powertune_data_cape_verde;
+
+ switch (adev->pdev->device) {
+ case 0x683B:
+ case 0x683F:
+ case 0x6829:
+ case 0x6835:
+ si_pi->cac_weights = cac_weights_cape_verde_pro;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x682C:
+ si_pi->cac_weights = cac_weights_cape_verde_pro;
+ si_pi->dte_data = dte_data_sun_xt;
+ break;
+ case 0x6825:
+ case 0x6827:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x6824:
+ case 0x682D:
+ si_pi->cac_weights = cac_weights_chelsea_xt;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x682F:
+ si_pi->cac_weights = cac_weights_chelsea_pro;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ case 0x6820:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_venus_xtx;
+ break;
+ case 0x6821:
+ si_pi->cac_weights = cac_weights_heathrow;
+ si_pi->dte_data = dte_data_venus_xt;
+ break;
+ case 0x6823:
+ case 0x682B:
+ case 0x6822:
+ case 0x682A:
+ si_pi->cac_weights = cac_weights_chelsea_pro;
+ si_pi->dte_data = dte_data_venus_pro;
+ break;
+ default:
+ si_pi->cac_weights = cac_weights_cape_verde;
+ si_pi->dte_data = dte_data_cape_verde;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ si_pi->lcac_config = lcac_mars_pro;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_mars_pro;
+ si_pi->dte_data = dte_data_mars_pro;
+
+ switch (adev->pdev->device) {
+ case 0x6601:
+ case 0x6621:
+ case 0x6603:
+ case 0x6605:
+ si_pi->cac_weights = cac_weights_mars_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6600:
+ case 0x6606:
+ case 0x6620:
+ case 0x6604:
+ si_pi->cac_weights = cac_weights_mars_xt;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6611:
+ case 0x6613:
+ case 0x6608:
+ si_pi->cac_weights = cac_weights_oland_pro;
+ update_dte_from_pl2 = true;
+ break;
+ case 0x6610:
+ si_pi->cac_weights = cac_weights_oland_xt;
+ update_dte_from_pl2 = true;
+ break;
+ default:
+ si_pi->cac_weights = cac_weights_oland;
+ si_pi->lcac_config = lcac_oland;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_oland;
+ si_pi->dte_data = dte_data_oland;
+ break;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ si_pi->cac_weights = cac_weights_hainan;
+ si_pi->lcac_config = lcac_oland;
+ si_pi->cac_override = cac_override_oland;
+ si_pi->powertune_data = &powertune_data_hainan;
+ si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
+ } else {
+ DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
+ return;
+ }
+
+ ni_pi->enable_power_containment = false;
+ ni_pi->enable_cac = false;
+ ni_pi->enable_sq_ramping = false;
+ si_pi->enable_dte = false;
+
+ if (si_pi->powertune_data->enable_powertune_by_default) {
+ ni_pi->enable_power_containment = true;
+ ni_pi->enable_cac = true;
+ if (si_pi->dte_data.enable_dte_by_default) {
+ si_pi->enable_dte = true;
+ if (update_dte_from_pl2)
+ si_update_dte_from_pl2(adev, &si_pi->dte_data);
+
+ }
+ ni_pi->enable_sq_ramping = true;
+ }
+
+ ni_pi->driver_calculate_cac_leakage = true;
+ ni_pi->cac_configuration_required = true;
+
+ if (ni_pi->cac_configuration_required) {
+ ni_pi->support_cac_long_term_average = true;
+ si_pi->dyn_powertune_data.l2_lta_window_size =
+ si_pi->powertune_data->l2_lta_window_size_default;
+ si_pi->dyn_powertune_data.lts_truncate =
+ si_pi->powertune_data->lts_truncate_default;
+ } else {
+ ni_pi->support_cac_long_term_average = false;
+ si_pi->dyn_powertune_data.l2_lta_window_size = 0;
+ si_pi->dyn_powertune_data.lts_truncate = 0;
+ }
+
+ si_pi->dyn_powertune_data.disable_uvd_powertune = false;
+}
+
+static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
+{
+ return 1;
+}
+
+static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
+{
+ u32 xclk;
+ u32 wintime;
+ u32 cac_window;
+ u32 cac_window_size;
+
+ xclk = amdgpu_asic_get_xclk(adev);
+
+ if (xclk == 0)
+ return 0;
+
+ cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
+ cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
+
+ wintime = (cac_window_size * 100) / xclk;
+
+ return wintime;
+}
+
+static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
+{
+ return power_in_watts;
+}
+
+static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
+ bool adjust_polarity,
+ u32 tdp_adjustment,
+ u32 *tdp_limit,
+ u32 *near_tdp_limit)
+{
+ u32 adjustment_delta, max_tdp_limit;
+
+ if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
+ return -EINVAL;
+
+ max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
+
+ if (adjust_polarity) {
+ *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+ *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
+ } else {
+ *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
+ adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
+ if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
+ *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
+ else
+ *near_tdp_limit = 0;
+ }
+
+ if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
+ return -EINVAL;
+ if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (ni_pi->enable_power_containment) {
+ SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+ PP_SIslands_PAPMParameters *papm_parm;
+ struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
+ u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+ u32 tdp_limit;
+ u32 near_tdp_limit;
+ int ret;
+
+ if (scaling_factor == 0)
+ return -EINVAL;
+
+ memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+ ret = si_calculate_adjusted_tdp_limits(adev,
+ false, /* ??? */
+ adev->pm.dpm.tdp_adjustment,
+ &tdp_limit,
+ &near_tdp_limit);
+ if (ret)
+ return ret;
+
+ smc_table->dpm2Params.TDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
+ smc_table->dpm2Params.NearTDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
+ smc_table->dpm2Params.SafePowerLimit =
+ cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+ offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
+ (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
+ sizeof(u32) * 3,
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (si_pi->enable_ppm) {
+ papm_parm = &si_pi->papm_parm;
+ memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
+ papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
+ papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
+ papm_parm->dGPU_T_Warning = cpu_to_be32(95);
+ papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
+ papm_parm->PlatformPowerLimit = 0xffffffff;
+ papm_parm->NearTDPLimitPAPM = 0xffffffff;
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
+ (u8 *)papm_parm,
+ sizeof(PP_SIslands_PAPMParameters),
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (ni_pi->enable_power_containment) {
+ SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
+ u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
+ int ret;
+
+ memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
+
+ smc_table->dpm2Params.NearTDPLimit =
+ cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
+ smc_table->dpm2Params.SafePowerLimit =
+ cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ (si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
+ offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
+ (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
+ sizeof(u32) * 2,
+ si_pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
+ const u16 prev_std_vddc,
+ const u16 curr_std_vddc)
+{
+ u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
+ u64 prev_vddc = (u64)prev_std_vddc;
+ u64 curr_vddc = (u64)curr_std_vddc;
+ u64 pwr_efficiency_ratio, n, d;
+
+ if ((prev_vddc == 0) || (curr_vddc == 0))
+ return 0;
+
+ n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
+ d = prev_vddc * prev_vddc;
+ pwr_efficiency_ratio = div64_u64(n, d);
+
+ if (pwr_efficiency_ratio > (u64)0xFFFF)
+ return 0;
+
+ return (u16)pwr_efficiency_ratio;
+}
+
+static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
+ amdgpu_state->vclk && amdgpu_state->dclk)
+ return true;
+
+ return false;
+}
+
+struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *pi = adev->pm.dpm.priv;
+
+ return pi;
+}
+
+static int si_populate_power_containment_values(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ SISLANDS_SMC_VOLTAGE_VALUE vddc;
+ u32 prev_sclk;
+ u32 max_sclk;
+ u32 min_sclk;
+ u16 prev_std_vddc;
+ u16 curr_std_vddc;
+ int i;
+ u16 pwr_efficiency_ratio;
+ u8 max_ps_percent;
+ bool disable_uvd_power_tune;
+ int ret;
+
+ if (ni_pi->enable_power_containment == false)
+ return 0;
+
+ if (state->performance_level_count == 0)
+ return -EINVAL;
+
+ if (smc_state->levelCount != state->performance_level_count)
+ return -EINVAL;
+
+ disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
+
+ smc_state->levels[0].dpm2.MaxPS = 0;
+ smc_state->levels[0].dpm2.NearTDPDec = 0;
+ smc_state->levels[0].dpm2.AboveSafeInc = 0;
+ smc_state->levels[0].dpm2.BelowSafeInc = 0;
+ smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ for (i = 1; i < state->performance_level_count; i++) {
+ prev_sclk = state->performance_levels[i-1].sclk;
+ max_sclk = state->performance_levels[i].sclk;
+ if (i == 1)
+ max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
+ else
+ max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
+
+ if (prev_sclk > max_sclk)
+ return -EINVAL;
+
+ if ((max_ps_percent == 0) ||
+ (prev_sclk == max_sclk) ||
+ disable_uvd_power_tune)
+ min_sclk = max_sclk;
+ else if (i == 1)
+ min_sclk = prev_sclk;
+ else
+ min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
+
+ if (min_sclk < state->performance_levels[0].sclk)
+ min_sclk = state->performance_levels[0].sclk;
+
+ if (min_sclk == 0)
+ return -EINVAL;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ state->performance_levels[i-1].vddc, &vddc);
+ if (ret)
+ return ret;
+
+ ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
+ if (ret)
+ return ret;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ state->performance_levels[i].vddc, &vddc);
+ if (ret)
+ return ret;
+
+ ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
+ if (ret)
+ return ret;
+
+ pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
+ prev_std_vddc, curr_std_vddc);
+
+ smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
+ smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
+ smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
+ smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
+ smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
+ }
+
+ return 0;
+}
+
+static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ u32 sq_power_throttle, sq_power_throttle2;
+ bool enable_sq_ramping = ni_pi->enable_sq_ramping;
+ int i;
+
+ if (state->performance_level_count == 0)
+ return -EINVAL;
+
+ if (smc_state->levelCount != state->performance_level_count)
+ return -EINVAL;
+
+ if (adev->pm.dpm.sq_ramping_threshold == 0)
+ return -EINVAL;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
+ enable_sq_ramping = false;
+
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ enable_sq_ramping = false;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ sq_power_throttle = 0;
+ sq_power_throttle2 = 0;
+
+ if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
+ enable_sq_ramping) {
+ sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
+ sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
+ sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
+ sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
+ sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
+ } else {
+ sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
+ sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ }
+
+ smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
+ smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
+ }
+
+ return 0;
+}
+
+static int si_enable_power_containment(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ bool enable)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (ni_pi->enable_power_containment) {
+ if (enable) {
+ if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ ni_pi->pc_enabled = false;
+ } else {
+ ni_pi->pc_enabled = true;
+ }
+ }
+ } else {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ ni_pi->pc_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret = 0;
+ struct si_dte_data *dte_data = &si_pi->dte_data;
+ Smc_SIslands_DTE_Configuration *dte_tables = NULL;
+ u32 table_size;
+ u8 tdep_count;
+ u32 i;
+
+ if (dte_data == NULL)
+ si_pi->enable_dte = false;
+
+ if (si_pi->enable_dte == false)
+ return 0;
+
+ if (dte_data->k <= 0)
+ return -EINVAL;
+
+ dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
+ if (dte_tables == NULL) {
+ si_pi->enable_dte = false;
+ return -ENOMEM;
+ }
+
+ table_size = dte_data->k;
+
+ if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
+ table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
+
+ tdep_count = dte_data->tdep_count;
+ if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
+ tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
+
+ dte_tables->K = cpu_to_be32(table_size);
+ dte_tables->T0 = cpu_to_be32(dte_data->t0);
+ dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
+ dte_tables->WindowSize = dte_data->window_size;
+ dte_tables->temp_select = dte_data->temp_select;
+ dte_tables->DTE_mode = dte_data->dte_mode;
+ dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
+
+ if (tdep_count > 0)
+ table_size--;
+
+ for (i = 0; i < table_size; i++) {
+ dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
+ dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
+ }
+
+ dte_tables->Tdep_count = tdep_count;
+
+ for (i = 0; i < (u32)tdep_count; i++) {
+ dte_tables->T_limits[i] = dte_data->t_limits[i];
+ dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
+ dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
+ }
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
+ (u8 *)dte_tables,
+ sizeof(Smc_SIslands_DTE_Configuration),
+ si_pi->sram_end);
+ kfree(dte_tables);
+
+ return ret;
+}
+
+static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
+ u16 *max, u16 *min)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_cac_leakage_table *table =
+ &adev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+ u32 v0_loadline;
+
+ if (table == NULL)
+ return -EINVAL;
+
+ *max = 0;
+ *min = 0xFFFF;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].vddc > *max)
+ *max = table->entries[i].vddc;
+ if (table->entries[i].vddc < *min)
+ *min = table->entries[i].vddc;
+ }
+
+ if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
+ return -EINVAL;
+
+ v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
+
+ if (v0_loadline > 0xFFFFUL)
+ return -EINVAL;
+
+ *min = (u16)v0_loadline;
+
+ if ((*min > *max) || (*max == 0) || (*min == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
+{
+ return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
+ SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
+}
+
+static int si_init_dte_leakage_table(struct amdgpu_device *adev,
+ PP_SIslands_CacConfig *cac_tables,
+ u16 vddc_max, u16 vddc_min, u16 vddc_step,
+ u16 t0, u16 t_step)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 leakage;
+ unsigned int i, j;
+ s32 t;
+ u32 smc_leakage;
+ u32 scaling_factor;
+ u16 voltage;
+
+ scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+ for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
+ t = (1000 * (i * t_step + t0));
+
+ for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+ voltage = vddc_max - (vddc_step * j);
+
+ si_calculate_leakage_for_v_and_t(adev,
+ &si_pi->powertune_data->leakage_coefficients,
+ voltage,
+ t,
+ si_pi->dyn_powertune_data.cac_leakage,
+ &leakage);
+
+ smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+ if (smc_leakage > 0xFFFF)
+ smc_leakage = 0xFFFF;
+
+ cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+ cpu_to_be16((u16)smc_leakage);
+ }
+ }
+ return 0;
+}
+
+static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
+ PP_SIslands_CacConfig *cac_tables,
+ u16 vddc_max, u16 vddc_min, u16 vddc_step)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 leakage;
+ unsigned int i, j;
+ u32 smc_leakage;
+ u32 scaling_factor;
+ u16 voltage;
+
+ scaling_factor = si_get_smc_power_scaling_factor(adev);
+
+ for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
+ voltage = vddc_max - (vddc_step * j);
+
+ si_calculate_leakage_for_v(adev,
+ &si_pi->powertune_data->leakage_coefficients,
+ si_pi->powertune_data->fixed_kt,
+ voltage,
+ si_pi->dyn_powertune_data.cac_leakage,
+ &leakage);
+
+ smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
+
+ if (smc_leakage > 0xFFFF)
+ smc_leakage = 0xFFFF;
+
+ for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
+ cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
+ cpu_to_be16((u16)smc_leakage);
+ }
+ return 0;
+}
+
+static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PP_SIslands_CacConfig *cac_tables = NULL;
+ u16 vddc_max, vddc_min, vddc_step;
+ u16 t0, t_step;
+ u32 load_line_slope, reg;
+ int ret = 0;
+ u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
+
+ if (ni_pi->enable_cac == false)
+ return 0;
+
+ cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
+ if (!cac_tables)
+ return -ENOMEM;
+
+ reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
+ reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
+ WREG32(CG_CAC_CTRL, reg);
+
+ si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
+ si_pi->dyn_powertune_data.dc_pwr_value =
+ si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
+ si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
+ si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
+
+ si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
+
+ ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
+ if (ret)
+ goto done_free;
+
+ vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
+ vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
+ t_step = 4;
+ t0 = 60;
+
+ if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
+ ret = si_init_dte_leakage_table(adev, cac_tables,
+ vddc_max, vddc_min, vddc_step,
+ t0, t_step);
+ else
+ ret = si_init_simplified_leakage_table(adev, cac_tables,
+ vddc_max, vddc_min, vddc_step);
+ if (ret)
+ goto done_free;
+
+ load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
+
+ cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
+ cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
+ cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
+ cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
+ cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
+ cac_tables->R_LL = cpu_to_be32(load_line_slope);
+ cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
+ cac_tables->calculation_repeats = cpu_to_be32(2);
+ cac_tables->dc_cac = cpu_to_be32(0);
+ cac_tables->log2_PG_LKG_SCALE = 12;
+ cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
+ cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
+ cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
+ (u8 *)cac_tables,
+ sizeof(PP_SIslands_CacConfig),
+ si_pi->sram_end);
+
+ if (ret)
+ goto done_free;
+
+ ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
+
+done_free:
+ if (ret) {
+ ni_pi->enable_cac = false;
+ ni_pi->enable_power_containment = false;
+ }
+
+ kfree(cac_tables);
+
+ return ret;
+}
+
+static int si_program_cac_config_registers(struct amdgpu_device *adev,
+ const struct si_cac_config_reg *cac_config_regs)
+{
+ const struct si_cac_config_reg *config_regs = cac_config_regs;
+ u32 data = 0, offset;
+
+ if (!config_regs)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ switch (config_regs->type) {
+ case SISLANDS_CACCONFIG_CGIND:
+ offset = SMC_CG_IND_START + config_regs->offset;
+ if (offset < SMC_CG_IND_END)
+ data = RREG32_SMC(offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+
+ switch (config_regs->type) {
+ case SISLANDS_CACCONFIG_CGIND:
+ offset = SMC_CG_IND_START + config_regs->offset;
+ if (offset < SMC_CG_IND_END)
+ WREG32_SMC(offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset, data);
+ break;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+
+ if ((ni_pi->enable_cac == false) ||
+ (ni_pi->cac_configuration_required == false))
+ return 0;
+
+ ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
+ if (ret)
+ return ret;
+ ret = si_program_cac_config_registers(adev, si_pi->cac_override);
+ if (ret)
+ return ret;
+ ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si_enable_smc_cac(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ bool enable)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (ni_pi->enable_cac) {
+ if (enable) {
+ if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
+ if (ni_pi->support_cac_long_term_average) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ni_pi->support_cac_long_term_average = false;
+ }
+
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ ni_pi->cac_enabled = false;
+ } else {
+ ni_pi->cac_enabled = true;
+ }
+
+ if (si_pi->enable_dte) {
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ }
+ }
+ } else if (ni_pi->cac_enabled) {
+ if (si_pi->enable_dte)
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
+
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
+
+ ni_pi->cac_enabled = false;
+
+ if (ni_pi->support_cac_long_term_average)
+ smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
+ }
+ }
+ return ret;
+}
+
+static int si_init_smc_spll_table(struct amdgpu_device *adev)
+{
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
+ SISLANDS_SMC_SCLK_VALUE sclk_params;
+ u32 fb_div, p_div;
+ u32 clk_s, clk_v;
+ u32 sclk = 0;
+ int ret = 0;
+ u32 tmp;
+ int i;
+
+ if (si_pi->spll_table_start == 0)
+ return -EINVAL;
+
+ spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
+ if (spll_table == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < 256; i++) {
+ ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
+ if (ret)
+ break;
+ p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
+ fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
+ clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
+ clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
+
+ fb_div &= ~0x00001FFF;
+ fb_div >>= 1;
+ clk_v >>= 6;
+
+ if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
+ ret = -EINVAL;
+ if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
+ ret = -EINVAL;
+ if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ ret = -EINVAL;
+ if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
+ ret = -EINVAL;
+
+ if (ret)
+ break;
+
+ tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
+ ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
+ spll_table->freq[i] = cpu_to_be32(tmp);
+
+ tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
+ ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
+ spll_table->ss[i] = cpu_to_be32(tmp);
+
+ sclk += 512;
+ }
+
+
+ if (!ret)
+ ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
+ (u8 *)spll_table,
+ sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
+ si_pi->sram_end);
+
+ if (ret)
+ ni_pi->enable_power_containment = false;
+
+ kfree(spll_table);
+
+ return ret;
+}
+
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
+static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
+ u16 vce_voltage)
+{
+ u16 highest_leakage = 0;
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int i;
+
+ for (i = 0; i < si_pi->leakage_voltage.count; i++){
+ if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
+ highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
+ }
+
+ if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
+ return highest_leakage;
+
+ return vce_voltage;
+}
+
+static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
+ u32 evclk, u32 ecclk, u16 *voltage)
+{
+ u32 i;
+ int ret = -EINVAL;
+ struct amdgpu_vce_clock_voltage_dependency_table *table =
+ &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ if (((evclk == 0) && (ecclk == 0)) ||
+ (table && (table->count == 0))) {
+ *voltage = 0;
+ return 0;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if ((evclk <= table->entries[i].evclk) &&
+ (ecclk <= table->entries[i].ecclk)) {
+ *voltage = table->entries[i].v;
+ ret = 0;
+ break;
+ }
+ }
+
+ /* if no match return the highest voltage */
+ if (ret)
+ *voltage = table->entries[table->count - 1].v;
+
+ *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
+
+ return ret;
+}
+
+static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+{
+
+ u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
+ u32 arb_freq_src, u32 arb_freq_dest)
+{
+ u32 mc_arb_dram_timing;
+ u32 mc_arb_dram_timing2;
+ u32 burst_time;
+ u32 mc_cg_config;
+
+ switch (arb_freq_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F2:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
+ break;
+ case MC_CG_ARB_FREQ_F3:
+ mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
+ mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
+ burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_freq_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F2:
+ WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
+ break;
+ case MC_CG_ARB_FREQ_F3:
+ WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
+ WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
+ WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
+ WREG32(MC_CG_CONFIG, mc_cg_config);
+ WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
+
+ return 0;
+}
+
+static void ni_update_current_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *new_ps = si_get_ps(rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+ eg_pi->current_rps = *rps;
+ ni_pi->current_ps = *new_ps;
+ eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
+}
+
+static void ni_update_requested_ps(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *new_ps = si_get_ps(rps);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+
+ eg_pi->requested_rps = *rps;
+ ni_pi->requested_ps = *new_ps;
+ eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
+}
+
+static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_ps,
+ struct amdgpu_ps *old_ps)
+{
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
+
+ if ((new_ps->vclk == old_ps->vclk) &&
+ (new_ps->dclk == old_ps->dclk))
+ return;
+
+ if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
+ current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+ return;
+
+ amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
+ struct amdgpu_ps *new_ps,
+ struct amdgpu_ps *old_ps)
+{
+ struct si_ps *new_state = si_get_ps(new_ps);
+ struct si_ps *current_state = si_get_ps(old_ps);
+
+ if ((new_ps->vclk == old_ps->vclk) &&
+ (new_ps->dclk == old_ps->dclk))
+ return;
+
+ if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
+ current_state->performance_levels[current_state->performance_level_count - 1].sclk)
+ return;
+
+ amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
+}
+
+static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < table->count; i++)
+ if (voltage <= table->entries[i].value)
+ return table->entries[i].value;
+
+ return table->entries[table->count - 1].value;
+}
+
+static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
+ u32 max_clock, u32 requested_clock)
+{
+ unsigned int i;
+
+ if ((clocks == NULL) || (clocks->count == 0))
+ return (requested_clock < max_clock) ? requested_clock : max_clock;
+
+ for (i = 0; i < clocks->count; i++) {
+ if (clocks->values[i] >= requested_clock)
+ return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
+ }
+
+ return (clocks->values[clocks->count - 1] < max_clock) ?
+ clocks->values[clocks->count - 1] : max_clock;
+}
+
+static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
+ u32 max_mclk, u32 requested_mclk)
+{
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
+ max_mclk, requested_mclk);
+}
+
+static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
+ u32 max_sclk, u32 requested_sclk)
+{
+ return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
+ max_sclk, requested_sclk);
+}
+
+static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
+ u32 *max_clock)
+{
+ u32 i, clock = 0;
+
+ if ((table == NULL) || (table->count == 0)) {
+ *max_clock = clock;
+ return;
+ }
+
+ for (i = 0; i < table->count; i++) {
+ if (clock < table->entries[i].clk)
+ clock = table->entries[i].clk;
+ }
+ *max_clock = clock;
+}
+
+static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
+ u32 clock, u16 max_voltage, u16 *voltage)
+{
+ u32 i;
+
+ if ((table == NULL) || (table->count == 0))
+ return;
+
+ for (i= 0; i < table->count; i++) {
+ if (clock <= table->entries[i].clk) {
+ if (*voltage < table->entries[i].v)
+ *voltage = (u16)((table->entries[i].v < max_voltage) ?
+ table->entries[i].v : max_voltage);
+ return;
+ }
+ }
+
+ *voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
+}
+
+static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
+ const struct amdgpu_clock_and_voltage_limits *max_limits,
+ struct rv7xx_pl *pl)
+{
+
+ if ((pl->mclk == 0) || (pl->sclk == 0))
+ return;
+
+ if (pl->mclk == pl->sclk)
+ return;
+
+ if (pl->mclk > pl->sclk) {
+ if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
+ pl->sclk = btc_get_valid_sclk(adev,
+ max_limits->sclk,
+ (pl->mclk +
+ (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio);
+ } else {
+ if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
+ pl->mclk = btc_get_valid_mclk(adev,
+ max_limits->mclk,
+ pl->sclk -
+ adev->pm.dpm.dyn_state.sclk_mclk_delta);
+ }
+}
+
+static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
+ u16 max_vddc, u16 max_vddci,
+ u16 *vddc, u16 *vddci)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ u16 new_voltage;
+
+ if ((0 == *vddc) || (0 == *vddci))
+ return;
+
+ if (*vddc > *vddci) {
+ if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
+ (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
+ }
+ } else {
+ if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
+ new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
+ (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
+ *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
+ }
+ }
+}
+
+static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
+ u32 sys_mask,
+ enum amdgpu_pcie_gen asic_gen,
+ enum amdgpu_pcie_gen default_gen)
+{
+ switch (asic_gen) {
+ case AMDGPU_PCIE_GEN1:
+ return AMDGPU_PCIE_GEN1;
+ case AMDGPU_PCIE_GEN2:
+ return AMDGPU_PCIE_GEN2;
+ case AMDGPU_PCIE_GEN3:
+ return AMDGPU_PCIE_GEN3;
+ default:
+ if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+ return AMDGPU_PCIE_GEN3;
+ else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+ return AMDGPU_PCIE_GEN2;
+ else
+ return AMDGPU_PCIE_GEN1;
+ }
+ return AMDGPU_PCIE_GEN1;
+}
+
+static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
+ u32 *p, u32 *u)
+{
+ u32 b_c = 0;
+ u32 i_c;
+ u32 tmp;
+
+ i_c = (i * r_c) / 100;
+ tmp = i_c >> p_b;
+
+ while (tmp) {
+ b_c++;
+ tmp >>= 1;
+ }
+
+ *u = (b_c + 1) / 2;
+ *p = i_c / (1 << (2 * (*u)));
+}
+
+static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
+{
+ u32 k, a, ah, al;
+ u32 t1;
+
+ if ((fl == 0) || (fh == 0) || (fl > fh))
+ return -EINVAL;
+
+ k = (100 * fh) / fl;
+ t1 = (t * (k - 100));
+ a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
+ a = (a + 5) / 10;
+ ah = ((a * t) + 5000) / 10000;
+ al = a - ah;
+
+ *th = t - ah;
+ *tl = t + al;
+
+ return 0;
+}
+
+static bool r600_is_uvd_state(u32 class, u32 class2)
+{
+ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return true;
+ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return true;
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return true;
+ return false;
+}
+
+static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
+{
+ return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
+}
+
+static void rv770_get_max_vddc(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ u16 vddc;
+
+ if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
+ pi->max_vddc = 0;
+ else
+ pi->max_vddc = vddc;
+}
+
+static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct amdgpu_atom_ss ss;
+
+ pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
+
+static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *ps = si_get_ps(rps);
+ struct amdgpu_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
+ u32 mclk, sclk;
+ u16 vddc, vddci, min_vce_voltage = 0;
+ u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
+ int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* limit all SI kickers */
+ if (adev->asic_type == CHIP_PITCAIRN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (adev->asic_type == CHIP_VERDE) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (adev->asic_type == CHIP_HAINAN) {
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (adev->pdev->vendor == p->chip_vendor &&
+ adev->pdev->device == p->chip_device &&
+ adev->pdev->subsystem_vendor == p->subsys_vendor &&
+ adev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
+
+ if (rps->vce_active) {
+ rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
+ rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
+ si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
+ &min_vce_voltage);
+ } else {
+ rps->evclk = 0;
+ rps->ecclk = 0;
+ }
+
+ if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ si_dpm_vblank_too_short(adev))
+ disable_mclk_switching = true;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
+
+ if (adev->pm.dpm.ac_power)
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ for (i = ps->performance_level_count - 2; i >= 0; i--) {
+ if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
+ }
+ if (adev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ if (ps->performance_levels[i].vddc > max_limits->vddc)
+ ps->performance_levels[i].vddc = max_limits->vddc;
+ if (ps->performance_levels[i].vddci > max_limits->vddci)
+ ps->performance_levels[i].vddci = max_limits->vddci;
+ }
+ }
+
+ /* limit clocks to max supported clocks based on voltage dependency tables */
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ &max_sclk_vddc);
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &max_mclk_vddci);
+ btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &max_mclk_vddc);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (max_sclk_vddc) {
+ if (ps->performance_levels[i].sclk > max_sclk_vddc)
+ ps->performance_levels[i].sclk = max_sclk_vddc;
+ }
+ if (max_mclk_vddci) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddci)
+ ps->performance_levels[i].mclk = max_mclk_vddci;
+ }
+ if (max_mclk_vddc) {
+ if (ps->performance_levels[i].mclk > max_mclk_vddc)
+ ps->performance_levels[i].mclk = max_mclk_vddc;
+ }
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ vddci = ps->performance_levels[0].vddci;
+ }
+
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
+ if (rps->vce_active) {
+ if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
+ sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
+ if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
+ mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
+ }
+
+ /* adjusted low state */
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+ ps->performance_levels[0].vddc = vddc;
+ ps->performance_levels[0].vddci = vddci;
+
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
+ }
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[0].mclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (mclk < ps->performance_levels[i].mclk)
+ mclk = ps->performance_levels[i].mclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].mclk = mclk;
+ ps->performance_levels[i].vddci = vddci;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
+ ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
+ if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
+ ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
+ }
+ }
+
+ for (i = 0; i < ps->performance_level_count; i++)
+ btc_adjust_clock_combinations(adev, max_limits,
+ &ps->performance_levels[i]);
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].vddc < min_vce_voltage)
+ ps->performance_levels[i].vddc = min_vce_voltage;
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ ps->performance_levels[i].sclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ ps->performance_levels[i].mclk,
+ max_limits->vddci, &ps->performance_levels[i].vddci);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ ps->performance_levels[i].mclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+ adev->clock.current_dispclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ }
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ btc_apply_voltage_delta_rules(adev,
+ max_limits->vddc, max_limits->vddci,
+ &ps->performance_levels[i].vddc,
+ &ps->performance_levels[i].vddci);
+ }
+
+ ps->dc_compatible = true;
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
+ ps->dc_compatible = false;
+ }
+}
+
+#if 0
+static int si_read_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 *value)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ return amdgpu_si_read_smc_sram_dword(adev,
+ si_pi->soft_regs_start + reg_offset, value,
+ si_pi->sram_end);
+}
+#endif
+
+static int si_write_smc_soft_register(struct amdgpu_device *adev,
+ u16 reg_offset, u32 value)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ return amdgpu_si_write_smc_sram_dword(adev,
+ si_pi->soft_regs_start + reg_offset,
+ value, si_pi->sram_end);
+}
+
+static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
+{
+ bool ret = false;
+ u32 tmp, width, row, column, bank, density;
+ bool is_memory_gddr5, is_special;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+ is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
+ is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
+ & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
+
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
+ width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
+
+ tmp = RREG32(MC_ARB_RAMCFG);
+ row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
+ column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
+ bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
+
+ density = (1 << (row + column - 20 + bank)) * width;
+
+ if ((adev->pdev->device == 0x6819) &&
+ is_memory_gddr5 && is_special && (density == 0x400))
+ ret = true;
+
+ return ret;
+}
+
+static void si_get_leakage_vddc(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u16 vddc, count = 0;
+ int i, ret;
+
+ for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
+
+ if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
+ si_pi->leakage_voltage.entries[count].voltage = vddc;
+ si_pi->leakage_voltage.entries[count].leakage_index =
+ SISLANDS_LEAKAGE_INDEX0 + i;
+ count++;
+ }
+ }
+ si_pi->leakage_voltage.count = count;
+}
+
+static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
+ u32 index, u16 *leakage_voltage)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int i;
+
+ if (leakage_voltage == NULL)
+ return -EINVAL;
+
+ if ((index & 0xff00) != 0xff00)
+ return -EINVAL;
+
+ if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
+ return -EINVAL;
+
+ if (index < SISLANDS_LEAKAGE_INDEX0)
+ return -EINVAL;
+
+ for (i = 0; i < si_pi->leakage_voltage.count; i++) {
+ if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
+ *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
+ return 0;
+ }
+ }
+ return -EAGAIN;
+}
+
+static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ bool want_thermal_protection;
+ enum amdgpu_dpm_event_src dpm_event_src;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+ WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
+ if (pi->thermal_protection)
+ WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ } else {
+ WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+ }
+}
+
+static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
+ enum amdgpu_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void si_start_dpm(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_stop_dpm(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
+}
+
+static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
+ else
+ WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+
+}
+
+#if 0
+static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
+ u32 thermal_level)
+{
+ PPSMC_Result ret;
+
+ if (thermal_level == 0) {
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+ if (ret == PPSMC_Result_OK)
+ return 0;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
+{
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
+}
+#endif
+
+#if 0
+static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
+{
+ if (ac_power)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(SMC_SCRATCH0, parameter);
+ return amdgpu_si_send_msg_to_smc(adev, msg);
+}
+
+static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amdgpu_dpm_forced_level level)
+{
+ struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
+ struct si_ps *ps = si_get_ps(rps);
+ u32 levels = ps->performance_level_count;
+
+ if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ adev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+#if 0
+static int si_set_boot_state(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int si_set_sw_state(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_halt_smc(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_resume_smc(struct amdgpu_device *adev)
+{
+ if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static void si_dpm_start_smc(struct amdgpu_device *adev)
+{
+ amdgpu_si_program_jump_on_start(adev);
+ amdgpu_si_start_smc(adev);
+ amdgpu_si_smc_clock(adev, true);
+}
+
+static void si_dpm_stop_smc(struct amdgpu_device *adev)
+{
+ amdgpu_si_reset_smc(adev);
+ amdgpu_si_smc_clock(adev, false);
+}
+
+static int si_process_firmware_header(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->state_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->soft_regs_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->mc_reg_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->fan_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->arb_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->cac_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->dte_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->spll_table_start = tmp;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev,
+ SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
+ SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ si_pi->papm_cfg_table_start = tmp;
+
+ return ret;
+}
+
+static void si_read_clock_registers(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
+ si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
+ si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
+ si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
+ si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
+ si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+ si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+ si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+ si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+ si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+ si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+ si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+ si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+ si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void si_enable_thermal_protection(struct amdgpu_device *adev,
+ bool enable)
+{
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
+ else
+ WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
+}
+
+static void si_enable_acpi_power_management(struct amdgpu_device *adev)
+{
+ WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
+}
+
+#if 0
+static int si_enter_ulp_state(struct amdgpu_device *adev)
+{
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int si_exit_ulp_state(struct amdgpu_device *adev)
+{
+ int i;
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(SMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int si_notify_smc_display_change(struct amdgpu_device *adev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ?
+ PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static void si_program_response_times(struct amdgpu_device *adev)
+{
+ u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 vddc_dly, acpi_dly, vbi_dly;
+ u32 reference_clock;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
+
+ voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
+ backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
+
+ if (voltage_response_time == 0)
+ voltage_response_time = 1000;
+
+ acpi_delay_time = 15000;
+ vbi_time_out = 100000;
+
+ reference_clock = amdgpu_asic_get_xclk(adev);
+
+ vddc_dly = (voltage_response_time * reference_clock) / 100;
+ acpi_dly = (acpi_delay_time * reference_clock) / 100;
+ vbi_dly = (vbi_time_out * reference_clock) / 100;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
+}
+
+static void si_program_ds_registers(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ u32 tmp;
+
+ /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
+ if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
+ tmp = 0x10;
+ else
+ tmp = 0x1;
+
+ if (eg_pi->sclk_deep_sleep) {
+ WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
+ WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
+ ~AUTOSCALE_ON_SS_CLEAR);
+ }
+}
+
+static void si_program_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp, pipe;
+ int i;
+
+ tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ if (adev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+ if (adev->pm.dpm.new_active_crtc_count > 1)
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+ WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+
+ tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+ pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+ if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+ (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+ /* find the first active crtc */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->pm.dpm.new_active_crtcs & (1 << i))
+ break;
+ }
+ if (i == adev->mode_info.num_crtc)
+ pipe = 0;
+ else
+ pipe = i;
+
+ tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+ tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+ WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
+ }
+
+ /* Setting this to false forces the performance state to low if the crtcs are disabled.
+ * This can be a problem on PowerXpress systems or if you want to use the card
+ * for offscreen rendering or compute if there are no crtcs enabled.
+ */
+ si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+}
+
+static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ if (enable) {
+ if (pi->sclk_ss)
+ WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
+ } else {
+ WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
+ WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
+ }
+}
+
+static void si_setup_bsp(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ r600_calculate_u_and_p(pi->asi,
+ xclk,
+ 16,
+ &pi->bsp,
+ &pi->bsu);
+
+ r600_calculate_u_and_p(pi->pasi,
+ xclk,
+ 16,
+ &pi->pbsp,
+ &pi->pbsu);
+
+
+ pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+ pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
+
+ WREG32(CG_BSP, pi->dsp);
+}
+
+static void si_program_git(struct amdgpu_device *adev)
+{
+ WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
+}
+
+static void si_program_tp(struct amdgpu_device *adev)
+{
+ int i;
+ enum r600_td td = R600_TD_DFLT;
+
+ for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
+ WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
+
+ if (td == R600_TD_AUTO)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
+ else
+ WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
+
+ if (td == R600_TD_UP)
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
+
+ if (td == R600_TD_DOWN)
+ WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
+}
+
+static void si_program_tpp(struct amdgpu_device *adev)
+{
+ WREG32(CG_TPC, R600_TPC_DFLT);
+}
+
+static void si_program_sstp(struct amdgpu_device *adev)
+{
+ WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void si_enable_display_gap(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
+ tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
+ tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
+ DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
+ WREG32(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void si_program_vc(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ WREG32(CG_FTV, pi->vrc);
+}
+
+static void si_clear_vc(struct amdgpu_device *adev)
+{
+ WREG32(CG_FTV, 0);
+}
+
+static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+{
+ u8 mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
+ return mc_para_index;
+}
+
+static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+{
+ u8 mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (u8)((memory_clock - 60000) / 5000);
+ }
+ return mc_para_index;
+}
+
+static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ bool strobe_mode = false;
+ u8 result = 0;
+
+ if (mclk <= pi->mclk_strobe_mode_threshold)
+ strobe_mode = true;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
+ else
+ result = si_get_ddr3_mclk_frequency_ratio(mclk);
+
+ if (strobe_mode)
+ result |= SISLANDS_SMC_STROBE_ENABLE;
+
+ return result;
+}
+
+static int si_upload_firmware(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ amdgpu_si_reset_smc(adev);
+ amdgpu_si_smc_clock(adev, false);
+
+ return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
+}
+
+static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ const struct amdgpu_phase_shedding_limits_table *limits)
+{
+ u32 data, num_bits, num_levels;
+
+ if ((table == NULL) || (limits == NULL))
+ return false;
+
+ data = table->mask_low;
+
+ num_bits = hweight32(data);
+
+ if (num_bits == 0)
+ return false;
+
+ num_levels = (1 << num_bits);
+
+ if (table->count != num_levels)
+ return false;
+
+ if (limits->count != (num_levels - 1))
+ return false;
+
+ return true;
+}
+
+static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
+{
+ unsigned int i, diff;
+
+ if (voltage_table->count <= max_voltage_steps)
+ return;
+
+ diff = voltage_table->count - max_voltage_steps;
+
+ for (i= 0; i < max_voltage_steps; i++)
+ voltage_table->entries[i] = voltage_table->entries[i + diff];
+
+ voltage_table->count = max_voltage_steps;
+}
+
+static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int si_construct_voltage_tables(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+
+ if (pi->voltage_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ if (eg_pi->vddci_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddci_voltage_table);
+ }
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
+
+ if (ret) {
+ pi->mvdd_control = false;
+ return ret;
+ }
+
+ if (si_pi->mvdd_voltage_table.count == 0) {
+ pi->mvdd_control = false;
+ return -EINVAL;
+ }
+
+ if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(adev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &si_pi->mvdd_voltage_table);
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
+ if (ret)
+ si_pi->vddc_phase_shed_control = false;
+
+ if ((si_pi->vddc_phase_shed_table.count == 0) ||
+ (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
+ si_pi->vddc_phase_shed_control = false;
+ }
+
+ return 0;
+}
+
+static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
+ const struct atom_voltage_table *voltage_table,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < voltage_table->count; i++)
+ table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
+}
+
+static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u8 i;
+
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
+ }
+ }
+
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
+
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
+
+
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
+
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
+
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int si_populate_voltage_value(struct amdgpu_device *adev,
+ const struct atom_voltage_table *table,
+ u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < table->count; i++) {
+ if (value <= table->entries[i].value) {
+ voltage->index = (u8)i;
+ voltage->value = cpu_to_be16(table->entries[i].value);
+ break;
+ }
+ }
+
+ if (i >= table->count)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (pi->mvdd_control) {
+ if (mclk <= pi->mvdd_split_frequency)
+ voltage->index = 0;
+ else
+ voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
+
+ voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
+ }
+ return 0;
+}
+
+static int si_get_std_voltage_value(struct amdgpu_device *adev,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage,
+ u16 *std_voltage)
+{
+ u16 v_index;
+ bool voltage_found = false;
+ *std_voltage = be16_to_cpu(voltage->value);
+
+ if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
+ if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (be16_to_cpu(voltage->value) ==
+ (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+ else
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (be16_to_cpu(voltage->value) <=
+ (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
+ else
+ *std_voltage =
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
+ break;
+ }
+ }
+ }
+ } else {
+ if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
+ *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
+ }
+ }
+
+ return 0;
+}
+
+static int si_populate_std_voltage_value(struct amdgpu_device *adev,
+ u16 value, u8 index,
+ SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ voltage->index = index;
+ voltage->value = cpu_to_be16(value);
+
+ return 0;
+}
+
+static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
+ const struct amdgpu_phase_shedding_limits_table *limits,
+ u16 voltage, u32 sclk, u32 mclk,
+ SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
+{
+ unsigned int i;
+
+ for (i = 0; i < limits->count; i++) {
+ if ((voltage <= limits->entries[i].voltage) &&
+ (sclk <= limits->entries[i].sclk) &&
+ (mclk <= limits->entries[i].mclk))
+ break;
+ }
+
+ smc_voltage->phase_settings = (u8)i;
+
+ return 0;
+}
+
+static int si_init_arb_table_index(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
+ tmp, si_pi->sram_end);
+}
+
+static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
+{
+ return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int si_reset_to_default(struct amdgpu_device *adev)
+{
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ int ret;
+
+ ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
+ &tmp, si_pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp = (tmp >> 24) & 0xff;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
+ u32 engine_clock)
+{
+ u32 dram_rows;
+ u32 dram_refresh_rate;
+ u32 mc_arb_rfsh_rate;
+ u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+
+ if (tmp >= 4)
+ dram_rows = 16384;
+ else
+ dram_rows = 1 << (tmp + 10);
+
+ dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
+ mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
+
+ return mc_arb_rfsh_rate;
+}
+
+static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ arb_regs->mc_arb_rfsh_rate =
+ (u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
+
+ amdgpu_atombios_set_engine_dram_timings(adev,
+ pl->sclk,
+ pl->mclk);
+
+ dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+ arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
+ arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
+ arb_regs->mc_arb_burst_time = (u8)burst_time;
+
+ return 0;
+}
+
+static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ unsigned int first_arb_set)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+ int i, ret = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
+ if (ret)
+ break;
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->arb_table_start +
+ offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
+ (u8 *)&arb_regs,
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+ si_pi->sram_end);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
+ SISLANDS_DRIVER_STATE_ARB_INDEX);
+}
+
+static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
+ struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (pi->mvdd_control)
+ return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
+ si_pi->mvdd_bootup_value, voltage);
+
+ return 0;
+}
+
+static int si_populate_smc_initial_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_initial_state,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 reg;
+ int ret;
+
+ table->initialState.levels[0].mclk.vDLL_CNTL =
+ cpu_to_be32(si_pi->clock_registers.dll_cntl);
+ table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
+ table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
+ table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
+ table->initialState.levels[0].mclk.vMPLL_SS =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+ table->initialState.levels[0].mclk.vMPLL_SS2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+ table->initialState.levels[0].mclk.mclk_value =
+ cpu_to_be32(initial_state->performance_levels[0].mclk);
+
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
+ table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
+ table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
+ table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
+
+ table->initialState.levels[0].sclk.sclk_value =
+ cpu_to_be32(initial_state->performance_levels[0].sclk);
+
+ table->initialState.levels[0].arbRefreshState =
+ SISLANDS_INITIAL_STATE_ARB_INDEX;
+
+ table->initialState.levels[0].ACIndex = 0;
+
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ initial_state->performance_levels[0].vddc,
+ &table->initialState.levels[0].vddc);
+
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->initialState.levels[0].vddc,
+ &std_vddc);
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->initialState.levels[0].vddc.index,
+ &table->initialState.levels[0].std_vddc);
+ }
+
+ if (eg_pi->vddci_control)
+ si_populate_voltage_value(adev,
+ &eg_pi->vddci_voltage_table,
+ initial_state->performance_levels[0].vddci,
+ &table->initialState.levels[0].vddci);
+
+ if (si_pi->vddc_phase_shed_control)
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ initial_state->performance_levels[0].vddc,
+ initial_state->performance_levels[0].sclk,
+ initial_state->performance_levels[0].mclk,
+ &table->initialState.levels[0].vddc);
+
+ si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+
+ reg = CG_R(0xffff) | CG_L(0);
+ table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ table->initialState.levels[0].strobeMode =
+ si_get_strobe_mode_settings(adev,
+ initial_state->performance_levels[0].mclk);
+
+ if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
+ table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ else
+ table->initialState.levels[0].mcFlags = 0;
+ }
+
+ table->initialState.levelCount = 1;
+
+ table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
+
+ table->initialState.levels[0].dpm2.MaxPS = 0;
+ table->initialState.levels[0].dpm2.NearTDPDec = 0;
+ table->initialState.levels[0].dpm2.AboveSafeInc = 0;
+ table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+ table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+ reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+ return 0;
+}
+
+static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
+ SISLANDS_SMC_STATETABLE *table)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+ u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+ u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+ u32 reg;
+ int ret;
+
+ table->ACPIState = table->initialState;
+
+ table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc) {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->ACPIState.levels[0].vddc, &std_vddc);
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->ACPIState.levels[0].vddc.index,
+ &table->ACPIState.levels[0].std_vddc);
+ }
+ table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+
+ if (si_pi->vddc_phase_shed_control) {
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pi->acpi_vddc,
+ 0,
+ 0,
+ &table->ACPIState.levels[0].vddc);
+ }
+ } else {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
+ pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ if (!ret) {
+ u16 std_vddc;
+
+ ret = si_get_std_voltage_value(adev,
+ &table->ACPIState.levels[0].vddc, &std_vddc);
+
+ if (!ret)
+ si_populate_std_voltage_value(adev, std_vddc,
+ table->ACPIState.levels[0].vddc.index,
+ &table->ACPIState.levels[0].std_vddc);
+ }
+ table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ AMDGPU_PCIE_GEN1);
+
+ if (si_pi->vddc_phase_shed_control)
+ si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pi->min_vddc_in_table,
+ 0,
+ 0,
+ &table->ACPIState.levels[0].vddc);
+ }
+
+ if (pi->acpi_vddc) {
+ if (eg_pi->acpi_vddci)
+ si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+ eg_pi->acpi_vddci,
+ &table->ACPIState.levels[0].vddci);
+ }
+
+ mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+ table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ cpu_to_be32(dll_cntl);
+ table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ cpu_to_be32(mclk_pwrmgt_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ cpu_to_be32(mpll_ad_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ cpu_to_be32(mpll_dq_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ cpu_to_be32(mpll_func_cntl);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ cpu_to_be32(mpll_func_cntl_1);
+ table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ cpu_to_be32(mpll_func_cntl_2);
+ table->ACPIState.levels[0].mclk.vMPLL_SS =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss1);
+ table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ cpu_to_be32(si_pi->clock_registers.mpll_ss2);
+
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ cpu_to_be32(spll_func_cntl);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ cpu_to_be32(spll_func_cntl_2);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ cpu_to_be32(spll_func_cntl_3);
+ table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ cpu_to_be32(spll_func_cntl_4);
+
+ table->ACPIState.levels[0].mclk.mclk_value = 0;
+ table->ACPIState.levels[0].sclk.sclk_value = 0;
+
+ si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+
+ if (eg_pi->dynamic_ac_timing)
+ table->ACPIState.levels[0].ACIndex = 0;
+
+ table->ACPIState.levels[0].dpm2.MaxPS = 0;
+ table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
+ table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
+ table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+ table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+
+ reg = MIN_POWER_MASK | MAX_POWER_MASK;
+ table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+
+ reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
+ table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+
+ return 0;
+}
+
+static int si_populate_ulv_state(struct amdgpu_device *adev,
+ SISLANDS_SMC_SWSTATE *state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ u32 sclk_in_sr = 1350; /* ??? */
+ int ret;
+
+ ret = si_convert_power_level_to_smc(adev, &ulv->pl,
+ &state->levels[0]);
+ if (!ret) {
+ if (eg_pi->sclk_deep_sleep) {
+ if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+ state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ else
+ state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ }
+ if (ulv->one_pcie_lane_in_ulv)
+ state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
+ state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->levels[0].ACIndex = 1;
+ state->levels[0].std_vddc = state->levels[0].vddc;
+ state->levelCount = 1;
+
+ state->flags |= PPSMC_SWSTATE_FLAG_DC;
+ }
+
+ return ret;
+}
+
+static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
+ int ret;
+
+ ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
+ &arb_regs);
+ if (ret)
+ return ret;
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
+ ulv->volt_change_delay);
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->arb_table_start +
+ offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
+ (u8 *)&arb_regs,
+ sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
+ si_pi->sram_end);
+
+ return ret;
+}
+
+static void si_get_mvdd_configuration(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+
+ pi->mvdd_split_frequency = 30000;
+}
+
+static int si_init_smc_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+ SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
+ int ret;
+ u32 lane_width;
+ u32 vr_hot_gpio;
+
+ si_populate_smc_voltage_tables(adev, table);
+
+ switch (adev->pm.int_thermal_type) {
+ case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
+ break;
+ case THERMAL_TYPE_NONE:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
+ break;
+ default:
+ table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
+ break;
+ }
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
+ if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
+ table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
+ }
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
+ table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
+ table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
+ vr_hot_gpio = adev->pm.dpm.backbias_response_time;
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
+ vr_hot_gpio);
+ }
+
+ ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
+ if (ret)
+ return ret;
+
+ ret = si_populate_smc_acpi_state(adev, table);
+ if (ret)
+ return ret;
+
+ table->driverState = table->initialState;
+
+ ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
+ SISLANDS_INITIAL_STATE_ARB_INDEX);
+ if (ret)
+ return ret;
+
+ if (ulv->supported && ulv->pl.vddc) {
+ ret = si_populate_ulv_state(adev, &table->ULVState);
+ if (ret)
+ return ret;
+
+ ret = si_program_ulv_memory_timing_parameters(adev);
+ if (ret)
+ return ret;
+
+ WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
+ WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+
+ lane_width = amdgpu_get_pcie_lanes(adev);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+ } else {
+ table->ULVState = table->initialState;
+ }
+
+ return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
+ (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
+ si_pi->sram_end);
+}
+
+static int si_calculate_sclk_params(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
+ u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
+ u64 tmp;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+
+ tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
+ do_div(tmp, reference_clock);
+ fbdiv = (u32) tmp;
+
+ spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
+ spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
+ spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(2);
+
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
+
+ if (pi->sclk_ss) {
+ struct amdgpu_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~CLK_S_MASK;
+ cg_spll_spread_spectrum |= CLK_S(clk_s);
+ cg_spll_spread_spectrum |= SSEN;
+
+ cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ }
+ }
+
+ sclk->sclk_value = engine_clock;
+ sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
+ sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
+ sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
+ sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
+ sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
+ sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
+
+ return 0;
+}
+
+static int si_populate_sclk_value(struct amdgpu_device *adev,
+ u32 engine_clock,
+ SISLANDS_SMC_SCLK_VALUE *sclk)
+{
+ SISLANDS_SMC_SCLK_VALUE sclk_tmp;
+ int ret;
+
+ ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
+ if (!ret) {
+ sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
+ sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
+ sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
+ sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
+ sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
+ sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
+ sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
+ }
+
+ return ret;
+}
+
+static int si_populate_mclk_value(struct amdgpu_device *adev,
+ u32 engine_clock,
+ u32 memory_clock,
+ SISLANDS_SMC_MCLK_VALUE *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 dll_cntl = si_pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~BWCTRL_MASK;
+ mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+ mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+ mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+ CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+ mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+ YCLK_POST_DIV(mpll_param.post_div);
+ }
+
+ if (pi->mclk_ss) {
+ struct amdgpu_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = adev->clock.mpll.reference_freq;
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
+ freq_nom = memory_clock * 4;
+ else
+ freq_nom = memory_clock * 2;
+
+ tmp = freq_nom / reference_clock;
+ tmp = tmp * tmp;
+ if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
+
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+ else
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ mclk->mclk_value = cpu_to_be32(memory_clock);
+ mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
+ mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
+ mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
+ mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+ mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+ mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+ mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
+ mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
+ mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
+
+ return 0;
+}
+
+static void si_populate_smc_sp(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct si_ps *ps = si_get_ps(amdgpu_state);
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ int i;
+
+ for (i = 0; i < ps->performance_level_count - 1; i++)
+ smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
+
+ smc_state->levels[ps->performance_level_count - 1].bSP =
+ cpu_to_be32(pi->psp);
+}
+
+static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ int ret;
+ bool dll_state_on;
+ u16 std_vddc;
+ bool gmc_pg = false;
+
+ if (eg_pi->pcie_performance_request &&
+ (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+ level->gen2PCIE = (u8)si_pi->force_pcie_gen;
+ else
+ level->gen2PCIE = (u8)pl->pcie_gen;
+
+ ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
+ if (ret)
+ return ret;
+
+ level->mcFlags = 0;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+ !eg_pi->uvd_enabled &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+ if (gmc_pg)
+ level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
+ }
+
+ if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
+ if (pl->mclk > pi->mclk_edc_enable_threshold)
+ level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
+
+ if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
+ level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
+
+ level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
+
+ if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
+ if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
+ ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = false;
+ }
+ } else {
+ level->strobeMode = si_get_strobe_mode_settings(adev,
+ pl->mclk);
+
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = si_populate_mclk_value(adev,
+ pl->sclk,
+ pl->mclk,
+ &level->mclk,
+ (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
+ if (ret)
+ return ret;
+
+ ret = si_populate_voltage_value(adev,
+ &eg_pi->vddc_voltage_table,
+ pl->vddc, &level->vddc);
+ if (ret)
+ return ret;
+
+
+ ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
+ if (ret)
+ return ret;
+
+ ret = si_populate_std_voltage_value(adev, std_vddc,
+ level->vddc.index, &level->std_vddc);
+ if (ret)
+ return ret;
+
+ if (eg_pi->vddci_control) {
+ ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
+ pl->vddci, &level->vddci);
+ if (ret)
+ return ret;
+ }
+
+ if (si_pi->vddc_phase_shed_control) {
+ ret = si_populate_phase_shedding_value(adev,
+ &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ pl->vddc,
+ pl->sclk,
+ pl->mclk,
+ &level->vddc);
+ if (ret)
+ return ret;
+ }
+
+ level->MaxPoweredUpCU = si_pi->max_cu;
+
+ ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
+
+ return ret;
+}
+
+static int si_populate_smc_t(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ u32 a_t;
+ u32 t_l, t_h;
+ u32 high_bsp;
+ int i, ret;
+
+ if (state->performance_level_count >= 9)
+ return -EINVAL;
+
+ if (state->performance_level_count < 2) {
+ a_t = CG_R(0xffff) | CG_L(0);
+ smc_state->levels[0].aT = cpu_to_be32(a_t);
+ return 0;
+ }
+
+ smc_state->levels[0].aT = cpu_to_be32(0);
+
+ for (i = 0; i <= state->performance_level_count - 2; i++) {
+ ret = r600_calculate_at(
+ (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
+ 100 * R600_AH_DFLT,
+ state->performance_levels[i + 1].sclk,
+ state->performance_levels[i].sclk,
+ &t_l,
+ &t_h);
+
+ if (ret) {
+ t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
+ t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
+ }
+
+ a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
+ a_t |= CG_R(t_l * pi->bsp / 20000);
+ smc_state->levels[i].aT = cpu_to_be32(a_t);
+
+ high_bsp = (i == state->performance_level_count - 2) ?
+ pi->pbsp : pi->bsp;
+ a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
+ smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
+ }
+
+ return 0;
+}
+
+static int si_disable_ulv(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+
+ if (ulv->supported)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+
+ return 0;
+}
+
+static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ const struct si_power_info *si_pi = si_get_pi(adev);
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+ const struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+
+ if (state->performance_levels[0].mclk != ulv->pl.mclk)
+ return false;
+
+ /* XXX validate against display requirements! */
+
+ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+ if (adev->clock.current_dispclk <=
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+ if (ulv->pl.vddc <
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+ return false;
+ }
+ }
+
+ if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
+ return false;
+
+ return true;
+}
+
+static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ const struct si_power_info *si_pi = si_get_pi(adev);
+ const struct si_ulv_param *ulv = &si_pi->ulv;
+
+ if (ulv->supported) {
+ if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ return 0;
+}
+
+static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SISLANDS_SMC_SWSTATE *smc_state)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct ni_power_info *ni_pi = ni_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i, ret;
+ u32 threshold;
+ u32 sclk_in_sr = 1350; /* ??? */
+
+ if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
+ return -EINVAL;
+
+ threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
+
+ if (amdgpu_state->vclk && amdgpu_state->dclk) {
+ eg_pi->uvd_enabled = true;
+ if (eg_pi->smu_uvd_hs)
+ smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
+ } else {
+ eg_pi->uvd_enabled = false;
+ }
+
+ if (state->dc_compatible)
+ smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
+
+ smc_state->levelCount = 0;
+ for (i = 0; i < state->performance_level_count; i++) {
+ if (eg_pi->sclk_deep_sleep) {
+ if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
+ if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
+ smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ else
+ smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ }
+ }
+
+ ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
+ &smc_state->levels[i]);
+ smc_state->levels[i].arbRefreshState =
+ (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
+
+ if (ret)
+ return ret;
+
+ if (ni_pi->enable_power_containment)
+ smc_state->levels[i].displayWatermark =
+ (state->performance_levels[i].sclk < threshold) ?
+ PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+ else
+ smc_state->levels[i].displayWatermark = (i < 2) ?
+ PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ if (eg_pi->dynamic_ac_timing)
+ smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
+ else
+ smc_state->levels[i].ACIndex = 0;
+
+ smc_state->levelCount++;
+ }
+
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_watermark_threshold,
+ threshold / 512);
+
+ si_populate_smc_sp(adev, amdgpu_state, smc_state);
+
+ ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
+ if (ret)
+ ni_pi->enable_power_containment = false;
+
+ ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
+ if (ret)
+ ni_pi->enable_sq_ramping = false;
+
+ return si_populate_smc_t(adev, amdgpu_state, smc_state);
+}
+
+static int si_upload_sw_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+ int ret;
+ u32 address = si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, driverState);
+ u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+ ((new_state->performance_level_count - 1) *
+ sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
+ SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
+
+ memset(smc_state, 0, state_size);
+
+ ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
+ if (ret)
+ return ret;
+
+ return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+ state_size, si_pi->sram_end);
+}
+
+static int si_upload_ulv_state(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ int ret = 0;
+
+ if (ulv->supported && ulv->pl.vddc) {
+ u32 address = si_pi->state_table_start +
+ offsetof(SISLANDS_SMC_STATETABLE, ULVState);
+ SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+
+ memset(smc_state, 0, state_size);
+
+ ret = si_populate_ulv_state(adev, smc_state);
+ if (!ret)
+ ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
+ state_size, si_pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int si_upload_smc_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_crtc *amdgpu_crtc = NULL;
+ int i;
+
+ if (adev->pm.dpm.new_active_crtc_count == 0)
+ return 0;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+ amdgpu_crtc = adev->mode_info.crtcs[i];
+ break;
+ }
+ }
+
+ if (amdgpu_crtc == NULL)
+ return 0;
+
+ if (amdgpu_crtc->line_time <= 0)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+ return 0;
+
+ if (si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
+ return 0;
+
+ return 0;
+}
+
+static int si_set_mc_special_registers(struct amdgpu_device *adev,
+ struct si_mc_reg_table *table)
+{
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch (table->mc_reg_address[i].s1) {
+ case MC_SEQ_MISC1:
+ temp_reg = RREG32(MC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(MC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
+ table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case MC_SEQ_RESERVE_M:
+ temp_reg = RREG32(MC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
+ for(k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ j++;
+ if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+ switch (in_reg) {
+ case MC_SEQ_RAS_TIMING:
+ *out_reg = MC_SEQ_RAS_TIMING_LP;
+ break;
+ case MC_SEQ_CAS_TIMING:
+ *out_reg = MC_SEQ_CAS_TIMING_LP;
+ break;
+ case MC_SEQ_MISC_TIMING:
+ *out_reg = MC_SEQ_MISC_TIMING_LP;
+ break;
+ case MC_SEQ_MISC_TIMING2:
+ *out_reg = MC_SEQ_MISC_TIMING2_LP;
+ break;
+ case MC_SEQ_RD_CTL_D0:
+ *out_reg = MC_SEQ_RD_CTL_D0_LP;
+ break;
+ case MC_SEQ_RD_CTL_D1:
+ *out_reg = MC_SEQ_RD_CTL_D1_LP;
+ break;
+ case MC_SEQ_WR_CTL_D0:
+ *out_reg = MC_SEQ_WR_CTL_D0_LP;
+ break;
+ case MC_SEQ_WR_CTL_D1:
+ *out_reg = MC_SEQ_WR_CTL_D1_LP;
+ break;
+ case MC_PMG_CMD_EMRS:
+ *out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+ case MC_PMG_CMD_MRS:
+ *out_reg = MC_SEQ_PMG_CMD_MRS_LP;
+ break;
+ case MC_PMG_CMD_MRS1:
+ *out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+ case MC_SEQ_PMG_TIMING:
+ *out_reg = MC_SEQ_PMG_TIMING_LP;
+ break;
+ case MC_PMG_CMD_MRS2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+ case MC_SEQ_WR_CTL_2:
+ *out_reg = MC_SEQ_WR_CTL_2_LP;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void si_set_valid_flag(struct si_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++)
+ table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+
+}
+
+static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
+ struct si_mc_reg_table *si_table)
+{
+ u8 i, j;
+
+ if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ si_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ si_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ si_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+ si_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct atom_mc_reg_table *table;
+ struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
+ u8 module_index = rv770_get_memory_module_index(adev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+ WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+ WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+ WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+ WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+ WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+ WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+ WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+ WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+ WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+ ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = si_copy_vbios_mc_reg_table(table, si_table);
+ if (ret)
+ goto init_mc_done;
+
+ si_set_s0_mc_reg_index(si_table);
+
+ ret = si_set_mc_special_registers(adev, si_table);
+ if (ret)
+ goto init_mc_done;
+
+ si_set_valid_flag(si_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+
+}
+
+static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
+ SMC_SIslands_MCRegisters *mc_reg_table)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
+ if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+ break;
+ mc_reg_table->address[i].s0 =
+ cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+ mc_reg_table->last = (u8)i;
+}
+
+static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
+ SMC_SIslands_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for(i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
+ struct rv7xx_pl *pl,
+ SMC_SIslands_MCRegisterSet *mc_reg_table_data)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 i = 0;
+
+ for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
+ if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+}
+
+static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state,
+ SMC_SIslands_MCRegisters *mc_reg_table)
+{
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ si_convert_mc_reg_table_entry_to_smc(adev,
+ &state->performance_levels[i],
+ &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
+ }
+}
+
+static int si_populate_mc_reg_table(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_boot_state)
+{
+ struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ulv_param *ulv = &si_pi->ulv;
+ SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+ memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
+
+ si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
+
+ si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
+
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
+ si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+
+ if (ulv->supported && ulv->pl.vddc != 0)
+ si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
+ else
+ si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
+ &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
+ si_pi->mc_reg_table.last,
+ si_pi->mc_reg_table.valid_flag);
+
+ si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
+
+ return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
+ (u8 *)smc_mc_reg_table,
+ sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
+}
+
+static int si_upload_mc_reg_table(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state)
+{
+ struct si_ps *new_state = si_get_ps(amdgpu_new_state);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 address = si_pi->mc_reg_table_start +
+ offsetof(SMC_SIslands_MCRegisters,
+ data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
+ SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
+
+ memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
+
+ si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
+
+ return amdgpu_si_copy_bytes_to_smc(adev, address,
+ (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
+ sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
+ si_pi->sram_end);
+}
+
+static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
+{
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ else
+ WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+}
+
+static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
+{
+ struct si_ps *state = si_get_ps(amdgpu_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+ return max_speed;
+}
+
+static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
+{
+ u32 speed_cntl;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum amdgpu_pcie_gen current_link_speed;
+
+ if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+ current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
+ else
+ current_link_speed = si_pi->force_pcie_gen;
+
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+#if defined(CONFIG_ACPI)
+ case AMDGPU_PCIE_GEN3:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
+ if (current_link_speed == AMDGPU_PCIE_GEN2)
+ break;
+ case AMDGPU_PCIE_GEN2:
+ if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+#endif
+ default:
+ si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ si_pi->pspp_notify_required = true;
+ }
+}
+
+static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ u8 request;
+
+ if (si_pi->pspp_notify_required) {
+ if (target_link_speed == AMDGPU_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == AMDGPU_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (si_get_current_pcie_speed(adev) > 0))
+ return;
+
+#if defined(CONFIG_ACPI)
+ amdgpu_acpi_pcie_performance_request(adev, request, false);
+#endif
+ }
+}
+
+#if 0
+static int si_ds_request(struct amdgpu_device *adev,
+ bool ds_status_on, u32 count_write)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+
+ if (eg_pi->sclk_deep_sleep) {
+ if (ds_status_on)
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
+ PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
+ PPSMC_Result_OK) ? 0 : -EINVAL;
+ }
+ return 0;
+}
+#endif
+
+static void si_set_max_cu_value(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+
+ if (adev->asic_type == CHIP_VERDE) {
+ switch (adev->pdev->device) {
+ case 0x6820:
+ case 0x6825:
+ case 0x6821:
+ case 0x6823:
+ case 0x6827:
+ si_pi->max_cu = 10;
+ break;
+ case 0x682D:
+ case 0x6824:
+ case 0x682F:
+ case 0x6826:
+ si_pi->max_cu = 8;
+ break;
+ case 0x6828:
+ case 0x6830:
+ case 0x6831:
+ case 0x6838:
+ case 0x6839:
+ case 0x683D:
+ si_pi->max_cu = 10;
+ break;
+ case 0x683B:
+ case 0x683F:
+ case 0x6829:
+ si_pi->max_cu = 8;
+ break;
+ default:
+ si_pi->max_cu = 0;
+ break;
+ }
+ } else {
+ si_pi->max_cu = 0;
+ }
+}
+
+static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
+ struct amdgpu_clock_voltage_dependency_table *table)
+{
+ u32 i;
+ int j;
+ u16 leakage_voltage;
+
+ if (table) {
+ for (i = 0; i < table->count; i++) {
+ switch (si_get_leakage_voltage_from_leakage_index(adev,
+ table->entries[i].v,
+ &leakage_voltage)) {
+ case 0:
+ table->entries[i].v = leakage_voltage;
+ break;
+ case -EAGAIN:
+ return -EINVAL;
+ case -EINVAL:
+ default:
+ break;
+ }
+ }
+
+ for (j = (table->count - 2); j >= 0; j--) {
+ table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
+ table->entries[j].v : table->entries[j + 1].v;
+ }
+ }
+ return 0;
+}
+
+static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
+ ret = si_patch_single_dependency_table_based_on_leakage(adev,
+ &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ if (ret)
+ DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
+ return ret;
+}
+
+static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_new_state,
+ struct amdgpu_ps *amdgpu_current_state)
+{
+ u32 lane_width;
+ u32 new_lane_width =
+ (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+ u32 current_lane_width =
+ (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
+
+ if (new_lane_width != current_lane_width) {
+ amdgpu_set_pcie_lanes(adev, new_lane_width);
+ lane_width = amdgpu_get_pcie_lanes(adev);
+ si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
+ }
+}
+
+static void si_dpm_setup_asic(struct amdgpu_device *adev)
+{
+ si_read_clock_registers(adev);
+ si_enable_acpi_power_management(adev);
+}
+
+static int si_thermal_enable_alert(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 thermal_int = RREG32(CG_THERMAL_INT);
+
+ if (enable) {
+ PPSMC_Result result;
+
+ thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ WREG32(CG_THERMAL_INT, thermal_int);
+ result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
+ if (result != PPSMC_Result_OK) {
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+ return -EINVAL;
+ }
+ } else {
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ WREG32(CG_THERMAL_INT, thermal_int);
+ }
+
+ return 0;
+}
+
+static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
+ WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
+ WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
+
+ adev->pm.dpm.thermal.min_temp = low_temp;
+ adev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
+ si_pi->fan_ctrl_default_mode = tmp;
+ tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
+ si_pi->t_min = tmp;
+ si_pi->fan_ctrl_is_in_default_mode = false;
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(0);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
+ u32 duty100;
+ u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ u16 fdo_min, slope1, slope2;
+ u32 reference_clock, tmp;
+ int ret;
+ u64 tmp64;
+
+ if (!si_pi->fan_table_start) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0) {
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ return 0;
+ }
+
+ tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (u16)tmp64;
+
+ t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
+ t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
+
+ pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
+ pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
+
+ slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
+ fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
+ fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
+ fan_table.slope1 = cpu_to_be16(slope1);
+ fan_table.slope2 = cpu_to_be16(slope2);
+ fan_table.fdo_min = cpu_to_be16(fdo_min);
+ fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
+ fan_table.hys_up = cpu_to_be16(1);
+ fan_table.hys_slope = cpu_to_be16(1);
+ fan_table.temp_resp_lim = cpu_to_be16(5);
+ reference_clock = amdgpu_asic_get_xclk(adev);
+
+ fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
+ reference_clock) / 1600);
+ fan_table.fdo_max = cpu_to_be16((u16)duty100);
+
+ tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
+ fan_table.temp_src = (uint8_t)tmp;
+
+ ret = amdgpu_si_copy_bytes_to_smc(adev,
+ si_pi->fan_table_start,
+ (u8 *)(&fan_table),
+ sizeof(fan_table),
+ si_pi->sram_end);
+
+ if (ret) {
+ DRM_ERROR("Failed to load fan table to the SMC.");
+ adev->pm.dpm.fan.ucode_fan_control = false;
+ }
+
+ return ret;
+}
+
+static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result ret;
+
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = true;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ PPSMC_Result ret;
+
+ ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
+
+ if (ret == PPSMC_Result_OK) {
+ si_pi->fan_is_controlled_by_smc = false;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+ duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (u32)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+ u32 speed)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+ u32 duty, duty100;
+ u64 tmp64;
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (si_pi->fan_is_controlled_by_smc)
+ return -EINVAL;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (u64)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (u32)tmp64;
+
+ tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
+ tmp |= FDO_STATIC_DUTY(duty);
+ WREG32(CG_FDO_CTRL0, tmp);
+
+ return 0;
+}
+
+static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(adev);
+ si_fan_ctrl_set_static_mode(adev, mode);
+ } else {
+ /* restart auto-manage */
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_thermal_start_smc_fan_control(adev);
+ else
+ si_fan_ctrl_set_default_mode(adev);
+ }
+}
+
+static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (si_pi->fan_is_controlled_by_smc)
+ return 0;
+
+ tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ return (tmp >> FDO_PWM_MODE_SHIFT);
+}
+
+#if 0
+static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 *speed)
+{
+ u32 tach_period;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
+ if (tach_period == 0)
+ return -ENOENT;
+
+ *speed = 60 * xclk * 10000 / tach_period;
+
+ return 0;
+}
+
+static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
+ u32 speed)
+{
+ u32 tach_period, tmp;
+ u32 xclk = amdgpu_asic_get_xclk(adev);
+
+ if (adev->pm.no_fan)
+ return -ENOENT;
+
+ if (adev->pm.fan_pulses_per_revolution == 0)
+ return -ENOENT;
+
+ if ((speed < adev->pm.fan_min_rpm) ||
+ (speed > adev->pm.fan_max_rpm))
+ return -EINVAL;
+
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ si_fan_ctrl_stop_smc_fan_control(adev);
+
+ tach_period = 60 * xclk * 10000 / (8 * speed);
+ tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
+ tmp |= TARGET_PERIOD(tach_period);
+ WREG32(CG_TACH_CTRL, tmp);
+
+ si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
+
+ return 0;
+}
+#endif
+
+static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
+{
+ struct si_power_info *si_pi = si_get_pi(adev);
+ u32 tmp;
+
+ if (!si_pi->fan_ctrl_is_in_default_mode) {
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
+ tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
+ WREG32(CG_FDO_CTRL2, tmp);
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
+ tmp |= TMIN(si_pi->t_min);
+ WREG32(CG_FDO_CTRL2, tmp);
+ si_pi->fan_ctrl_is_in_default_mode = true;
+ }
+}
+
+static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ si_fan_ctrl_start_smc_fan_control(adev);
+ si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
+ }
+}
+
+static void si_thermal_initialize(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ if (adev->pm.fan_pulses_per_revolution) {
+ tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
+ tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
+ WREG32(CG_TACH_CTRL, tmp);
+ }
+
+ tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
+ tmp |= TACH_PWM_RESP_RATE(0x28);
+ WREG32(CG_FDO_CTRL2, tmp);
+}
+
+static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
+{
+ int ret;
+
+ si_thermal_initialize(adev);
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+ if (adev->pm.dpm.fan.ucode_fan_control) {
+ ret = si_halt_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_thermal_setup_fan_table(adev);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(adev);
+ if (ret)
+ return ret;
+ si_thermal_start_smc_fan_control(adev);
+ }
+
+ return 0;
+}
+
+static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
+{
+ if (!adev->pm.no_fan) {
+ si_fan_ctrl_set_default_mode(adev);
+ si_fan_ctrl_stop_smc_fan_control(adev);
+ }
+}
+
+static int si_dpm_enable(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+ int ret;
+
+ if (amdgpu_si_is_smc_running(adev))
+ return -EINVAL;
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
+ si_enable_voltage_control(adev, true);
+ if (pi->mvdd_control)
+ si_get_mvdd_configuration(adev);
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
+ ret = si_construct_voltage_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_initialize_mc_reg_table(adev);
+ if (ret)
+ eg_pi->dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ si_enable_spread_spectrum(adev, true);
+ if (pi->thermal_protection)
+ si_enable_thermal_protection(adev, true);
+ si_setup_bsp(adev);
+ si_program_git(adev);
+ si_program_tp(adev);
+ si_program_tpp(adev);
+ si_program_sstp(adev);
+ si_enable_display_gap(adev);
+ si_program_vc(adev);
+ ret = si_upload_firmware(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_firmware failed\n");
+ return ret;
+ }
+ ret = si_process_firmware_header(adev);
+ if (ret) {
+ DRM_ERROR("si_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = si_initial_switch_from_arb_f0_to_f1(adev);
+ if (ret) {
+ DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = si_init_smc_table(adev);
+ if (ret) {
+ DRM_ERROR("si_init_smc_table failed\n");
+ return ret;
+ }
+ ret = si_init_smc_spll_table(adev);
+ if (ret) {
+ DRM_ERROR("si_init_smc_spll_table failed\n");
+ return ret;
+ }
+ ret = si_init_arb_table_index(adev);
+ if (ret) {
+ DRM_ERROR("si_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_populate_mc_reg_table(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = si_initialize_smc_cac_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_smc_cac_tables failed\n");
+ return ret;
+ }
+ ret = si_initialize_hardware_cac_manager(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
+ return ret;
+ }
+ ret = si_initialize_smc_dte_tables(adev);
+ if (ret) {
+ DRM_ERROR("si_initialize_smc_dte_tables failed\n");
+ return ret;
+ }
+ ret = si_populate_smc_tdp_limits(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_smc_tdp_limits failed\n");
+ return ret;
+ }
+ ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
+ if (ret) {
+ DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
+ return ret;
+ }
+ si_program_response_times(adev);
+ si_program_ds_registers(adev);
+ si_dpm_start_smc(adev);
+ ret = si_notify_smc_display_change(adev, false);
+ if (ret) {
+ DRM_ERROR("si_notify_smc_display_change failed\n");
+ return ret;
+ }
+ si_enable_sclk_control(adev, true);
+ si_start_dpm(adev);
+
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_thermal_start_thermal_controller(adev);
+ ni_update_current_ps(adev, boot_ps);
+
+ return 0;
+}
+
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void si_dpm_disable(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return;
+ si_thermal_stop_thermal_controller(adev);
+ si_disable_ulv(adev);
+ si_clear_vc(adev);
+ if (pi->thermal_protection)
+ si_enable_thermal_protection(adev, false);
+ si_enable_power_containment(adev, boot_ps, false);
+ si_enable_smc_cac(adev, boot_ps, false);
+ si_enable_spread_spectrum(adev, false);
+ si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ si_stop_dpm(adev);
+ si_reset_to_default(adev);
+ si_dpm_stop_smc(adev);
+ si_force_switch_to_arb_f0(adev);
+
+ ni_update_current_ps(adev, boot_ps);
+}
+
+static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
+ struct amdgpu_ps *new_ps = &requested_ps;
+
+ ni_update_requested_ps(adev, new_ps);
+ si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
+
+ return 0;
+}
+
+static int si_power_control_set_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
+ int ret;
+
+ ret = si_restrict_performance_levels_before_switch(adev);
+ if (ret)
+ return ret;
+ ret = si_halt_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_populate_smc_tdp_limits(adev, new_ps);
+ if (ret)
+ return ret;
+ ret = si_populate_smc_tdp_limits_2(adev, new_ps);
+ if (ret)
+ return ret;
+ ret = si_resume_smc(adev);
+ if (ret)
+ return ret;
+ ret = si_set_sw_state(adev);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int si_dpm_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+ struct amdgpu_ps *old_ps = &eg_pi->current_rps;
+ int ret;
+
+ ret = si_disable_ulv(adev);
+ if (ret) {
+ DRM_ERROR("si_disable_ulv failed\n");
+ return ret;
+ }
+ ret = si_restrict_performance_levels_before_switch(adev);
+ if (ret) {
+ DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
+ return ret;
+ }
+ if (eg_pi->pcie_performance_request)
+ si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
+ ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
+ ret = si_enable_power_containment(adev, new_ps, false);
+ if (ret) {
+ DRM_ERROR("si_enable_power_containment failed\n");
+ return ret;
+ }
+ ret = si_enable_smc_cac(adev, new_ps, false);
+ if (ret) {
+ DRM_ERROR("si_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = si_halt_smc(adev);
+ if (ret) {
+ DRM_ERROR("si_halt_smc failed\n");
+ return ret;
+ }
+ ret = si_upload_sw_state(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_upload_sw_state failed\n");
+ return ret;
+ }
+ ret = si_upload_smc_data(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_smc_data failed\n");
+ return ret;
+ }
+ ret = si_upload_ulv_state(adev);
+ if (ret) {
+ DRM_ERROR("si_upload_ulv_state failed\n");
+ return ret;
+ }
+ if (eg_pi->dynamic_ac_timing) {
+ ret = si_upload_mc_reg_table(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = si_program_memory_timing_parameters(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
+
+ ret = si_resume_smc(adev);
+ if (ret) {
+ DRM_ERROR("si_resume_smc failed\n");
+ return ret;
+ }
+ ret = si_set_sw_state(adev);
+ if (ret) {
+ DRM_ERROR("si_set_sw_state failed\n");
+ return ret;
+ }
+ ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
+ if (eg_pi->pcie_performance_request)
+ si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
+ ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
+ if (ret) {
+ DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
+ return ret;
+ }
+ ret = si_enable_smc_cac(adev, new_ps, true);
+ if (ret) {
+ DRM_ERROR("si_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = si_enable_power_containment(adev, new_ps, true);
+ if (ret) {
+ DRM_ERROR("si_enable_power_containment failed\n");
+ return ret;
+ }
+
+ ret = si_power_control_set_level(adev);
+ if (ret) {
+ DRM_ERROR("si_power_control_set_level failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
+
+ ni_update_current_ps(adev, new_ps);
+}
+
+#if 0
+void si_dpm_reset_asic(struct amdgpu_device *adev)
+{
+ si_restrict_performance_levels_before_switch(adev);
+ si_disable_ulv(adev);
+ si_set_boot_state(adev);
+}
+#endif
+
+static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+{
+ si_program_display_gap(adev);
+}
+
+
+static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else if (r600_is_uvd_state(rps->class, rps->class2)) {
+ rps->vclk = RV770_DEFAULT_VCLK_FREQ;
+ rps->dclk = RV770_DEFAULT_DCLK_FREQ;
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ adev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ adev->pm.dpm.uvd_ps = rps;
+}
+
+static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(adev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_power_info *si_pi = si_get_pi(adev);
+ struct si_ps *ps = si_get_ps(rps);
+ u16 leakage_voltage;
+ struct rv7xx_pl *pl = &ps->performance_levels[index];
+ int ret;
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+ pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+ pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
+
+ pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
+ pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
+ pl->flags = le32_to_cpu(clock_info->si.ulFlags);
+ pl->pcie_gen = r600_get_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
+
+ /* patch up vddc if necessary */
+ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
+ &leakage_voltage);
+ if (ret == 0)
+ pl->vddc = leakage_voltage;
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_vddc = pl->vddc;
+ eg_pi->acpi_vddci = pl->vddci;
+ si_pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
+ index == 0) {
+ /* XXX disable for A0 tahiti */
+ si_pi->ulv.supported = false;
+ si_pi->ulv.pl = *pl;
+ si_pi->ulv.one_pcie_lane_in_ulv = false;
+ si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
+ si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
+ si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
+ }
+
+ if (pi->min_vddc_in_table > pl->vddc)
+ pi->min_vddc_in_table = pl->vddc;
+
+ if (pi->max_vddc_in_table < pl->vddc)
+ pi->max_vddc_in_table = pl->vddc;
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ u16 vddc, vddci, mvdd;
+ amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
+ pl->mclk = adev->clock.default_mclk;
+ pl->sclk = adev->clock.default_sclk;
+ pl->vddc = vddc;
+ pl->vddci = vddci;
+ si_pi->mvdd_bootup_value = mvdd;
+ }
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
+ }
+}
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static int si_parse_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct si_ps *ps;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ amdgpu_add_thermal_controller(adev);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!adev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(adev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.ps[i].ps_priv = ps;
+ si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ si_parse_pplib_clock_info(adev,
+ &adev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ adev->pm.dpm.num_ps = state_array->ucNumEntries;
+
+ /* fill in the vce power states */
+ for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
+ u32 sclk, mclk;
+ clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+ sclk |= clock_info->si.ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+ mclk |= clock_info->si.ucMemoryClockHigh << 16;
+ adev->pm.dpm.vce_states[i].sclk = sclk;
+ adev->pm.dpm.vce_states[i].mclk = mclk;
+ }
+
+ return 0;
+}
+
+static int si_dpm_init(struct amdgpu_device *adev)
+{
+ struct rv7xx_power_info *pi;
+ struct evergreen_power_info *eg_pi;
+ struct ni_power_info *ni_pi;
+ struct si_power_info *si_pi;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 mask;
+
+ si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
+ if (si_pi == NULL)
+ return -ENOMEM;
+ adev->pm.dpm.priv = si_pi;
+ ni_pi = &si_pi->ni;
+ eg_pi = &ni_pi->eg;
+ pi = &eg_pi->rv7xx;
+
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (ret)
+ si_pi->sys_pcie_mask = 0;
+ else
+ si_pi->sys_pcie_mask = mask;
+ si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
+
+ si_set_max_cu_value(adev);
+
+ rv770_get_max_vddc(adev);
+ si_get_leakage_vddc(adev);
+ si_patch_dependency_tables_based_on_leakage(adev);
+
+ pi->acpi_vddc = 0;
+ eg_pi->acpi_vddci = 0;
+ pi->min_vddc_in_table = 0;
+ pi->max_vddc_in_table = 0;
+
+ ret = amdgpu_get_platform_caps(adev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_parse_extended_power_table(adev);
+ if (ret)
+ return ret;
+
+ ret = si_parse_power_table(adev);
+ if (ret)
+ return ret;
+
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ if (adev->pm.dpm.voltage_response_time == 0)
+ adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
+ if (adev->pm.dpm.backbias_response_time == 0)
+ adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
+
+ ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
+ 0, false, &dividers);
+ if (ret)
+ pi->ref_div = dividers.ref_div + 1;
+ else
+ pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
+
+ eg_pi->smu_uvd_hs = false;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ if (si_is_special_1gb_platform(adev))
+ pi->mclk_stutter_mode_threshold = 0;
+ else
+ pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
+ pi->mclk_edc_enable_threshold = 40000;
+ eg_pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
+
+ pi->voltage_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
+
+ pi->mvdd_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+
+ eg_pi->vddci_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
+
+ si_pi->vddc_phase_shed_control =
+ amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
+
+ rv770_get_engine_memory_ss(adev);
+
+ pi->asi = RV770_ASI_DFLT;
+ pi->pasi = CYPRESS_HASI_DFLT;
+ pi->vrc = SISLANDS_VRC_DFLT;
+
+ pi->gfx_clock_gating = true;
+
+ eg_pi->sclk_deep_sleep = true;
+ si_pi->sclk_deep_sleep_above_low = false;
+
+ if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ eg_pi->dynamic_ac_timing = true;
+
+ eg_pi->light_sleep = true;
+#if defined(CONFIG_ACPI)
+ eg_pi->pcie_performance_request =
+ amdgpu_acpi_is_pcie_performance_request_supported(adev);
+#else
+ eg_pi->pcie_performance_request = false;
+#endif
+
+ si_pi->sram_end = SMC_RAM_END;
+
+ adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+ adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ si_initialize_powertune_defaults(adev);
+
+ /* make sure dc limits are valid */
+ if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ si_pi->fan_ctrl_is_in_default_mode = true;
+
+ return 0;
+}
+
+static void si_dpm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->pm.dpm.ps)
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ kfree(adev->pm.dpm.ps[i].ps_priv);
+ kfree(adev->pm.dpm.ps);
+ kfree(adev->pm.dpm.priv);
+ kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ amdgpu_free_extended_power_table(adev);
+}
+
+static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct amdgpu_ps *rps = &eg_pi->current_rps;
+ struct si_ps *ps = si_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
+ CURRENT_STATE_INDEX_SHIFT;
+
+ if (current_index >= ps->performance_level_count) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ pl = &ps->performance_levels[current_index];
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+ }
+}
+
+static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 cg_thermal_int;
+
+ switch (type) {
+ case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int |= THERM_INT_MASK_HIGH;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int &= ~THERM_INT_MASK_HIGH;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int |= THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
+ cg_thermal_int &= ~THERM_INT_MASK_LOW;
+ WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int si_dpm_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ bool queue_thermal = false;
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ switch (entry->src_id) {
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ adev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ adev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ default:
+ break;
+ }
+
+ if (queue_thermal)
+ schedule_work(&adev->pm.dpm.thermal.work);
+
+ return 0;
+}
+
+static int si_dpm_late_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ /* init the sysfs and debugfs files late */
+ ret = amdgpu_pm_sysfs_init(adev);
+ if (ret)
+ return ret;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
+ return 0;
+}
+
+/**
+ * si_dpm_init_microcode - load ucode images from disk
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use the firmware interface to load the ucode images into
+ * the driver (not loaded into hw).
+ * Returns 0 on success, error on failure.
+ */
+static int si_dpm_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ char fw_name[30];
+ int err;
+
+ DRM_DEBUG("\n");
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_name = "tahiti";
+ break;
+ case CHIP_PITCAIRN:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811) ||
+ (adev->pdev->device == 0x6816) ||
+ (adev->pdev->device == 0x6817) ||
+ (adev->pdev->device == 0x6806))
+ chip_name = "pitcairn_k";
+ else
+ chip_name = "pitcairn";
+ break;
+ case CHIP_VERDE:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6820) ||
+ (adev->pdev->device == 0x6821) ||
+ (adev->pdev->device == 0x6822) ||
+ (adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682A) ||
+ (adev->pdev->device == 0x682B))
+ chip_name = "verde_k";
+ else
+ chip_name = "verde";
+ break;
+ case CHIP_OLAND:
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605))
+ chip_name = "oland_k";
+ else
+ chip_name = "oland";
+ break;
+ case CHIP_HAINAN:
+ if ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6664) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667))
+ chip_name = "hainan_k";
+ else
+ chip_name = "hainan";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+ if (err) {
+ DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
+ err, fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+
+}
+
+static int si_dpm_sw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
+ if (ret)
+ return ret;
+
+ /* default to balanced state */
+ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
+ adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
+ adev->pm.default_sclk = adev->clock.default_sclk;
+ adev->pm.default_mclk = adev->clock.default_mclk;
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+ adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+ if (amdgpu_dpm == 0)
+ return 0;
+
+ ret = si_dpm_init_microcode(adev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
+ mutex_lock(&adev->pm.mutex);
+ ret = si_dpm_init(adev);
+ if (ret)
+ goto dpm_failed;
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ if (amdgpu_dpm == 1)
+ amdgpu_pm_print_power_states(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_INFO("amdgpu: dpm initialized\n");
+
+ return 0;
+
+dpm_failed:
+ si_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+ DRM_ERROR("amdgpu: dpm initialization failed\n");
+ return ret;
+}
+
+static int si_dpm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ flush_work(&adev->pm.dpm.thermal.work);
+
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_pm_sysfs_fini(adev);
+ si_dpm_fini(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
+}
+
+static int si_dpm_hw_init(void *handle)
+{
+ int ret;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!amdgpu_dpm)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_setup_asic(adev);
+ ret = si_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+static int si_dpm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_disable(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return 0;
+}
+
+static int si_dpm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ /* disable dpm */
+ si_dpm_disable(adev);
+ /* reset the power state */
+ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
+ }
+ return 0;
+}
+
+static int si_dpm_resume(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pm.dpm_enabled) {
+ /* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
+ si_dpm_setup_asic(adev);
+ ret = si_dpm_enable(adev);
+ if (ret)
+ adev->pm.dpm_enabled = false;
+ else
+ adev->pm.dpm_enabled = true;
+ mutex_unlock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
+ amdgpu_pm_compute_clocks(adev);
+ }
+ return 0;
+}
+
+static bool si_dpm_is_idle(void *handle)
+{
+ /* XXX */
+ return true;
+}
+
+static int si_dpm_wait_for_idle(void *handle)
+{
+ /* XXX */
+ return 0;
+}
+
+static int si_dpm_soft_reset(void *handle)
+{
+ return 0;
+}
+
+static int si_dpm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_dpm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* get temperature in millidegrees */
+static int si_dpm_get_temp(struct amdgpu_device *adev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = (actual_temp * 1000);
+
+ return actual_temp;
+}
+
+static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
+ struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
+
+static void si_dpm_print_power_state(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ struct si_ps *ps = si_get_ps(rps);
+ struct rv7xx_pl *pl;
+ int i;
+
+ amdgpu_dpm_print_class_info(rps->class, rps->class2);
+ amdgpu_dpm_print_cap_info(rps->caps);
+ DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ if (adev->asic_type >= CHIP_TAHITI)
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
+ i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
+ else
+ DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ }
+ amdgpu_dpm_print_ps_status(adev, rps);
+}
+
+static int si_dpm_early_init(void *handle)
+{
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_dpm_set_dpm_funcs(adev);
+ si_dpm_set_irq_funcs(adev);
+ return 0;
+}
+
+
+const struct amd_ip_funcs si_dpm_ip_funcs = {
+ .name = "si_dpm",
+ .early_init = si_dpm_early_init,
+ .late_init = si_dpm_late_init,
+ .sw_init = si_dpm_sw_init,
+ .sw_fini = si_dpm_sw_fini,
+ .hw_init = si_dpm_hw_init,
+ .hw_fini = si_dpm_hw_fini,
+ .suspend = si_dpm_suspend,
+ .resume = si_dpm_resume,
+ .is_idle = si_dpm_is_idle,
+ .wait_for_idle = si_dpm_wait_for_idle,
+ .soft_reset = si_dpm_soft_reset,
+ .set_clockgating_state = si_dpm_set_clockgating_state,
+ .set_powergating_state = si_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+ .get_temperature = &si_dpm_get_temp,
+ .pre_set_power_state = &si_dpm_pre_set_power_state,
+ .set_power_state = &si_dpm_set_power_state,
+ .post_set_power_state = &si_dpm_post_set_power_state,
+ .display_configuration_changed = &si_dpm_display_configuration_changed,
+ .get_sclk = &si_dpm_get_sclk,
+ .get_mclk = &si_dpm_get_mclk,
+ .print_power_state = &si_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &si_dpm_force_performance_level,
+ .vblank_too_short = &si_dpm_vblank_too_short,
+ .set_fan_control_mode = &si_dpm_set_fan_control_mode,
+ .get_fan_control_mode = &si_dpm_get_fan_control_mode,
+ .set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
+ .get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
+};
+
+static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
+{
+ if (adev->pm.funcs == NULL)
+ adev->pm.funcs = &si_dpm_funcs;
+}
+
+static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
+ .set = si_dpm_set_interrupt_state,
+ .process = si_dpm_process_interrupt,
+};
+
+static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
+ adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
new file mode 100644
index 000000000000..51ce21c5f4fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -0,0 +1,1015 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SI_DPM_H__
+#define __SI_DPM_H__
+
+#include "amdgpu_atombios.h"
+#include "sislands_smc.h"
+
+#define MC_CG_CONFIG 0x96f
+#define MC_ARB_CG 0x9fa
+#define CG_ARB_REQ(x) ((x) << 0)
+#define CG_ARB_REQ_MASK (0xff << 0)
+
+#define MC_ARB_DRAM_TIMING_1 0x9fc
+#define MC_ARB_DRAM_TIMING_2 0x9fd
+#define MC_ARB_DRAM_TIMING_3 0x9fe
+#define MC_ARB_DRAM_TIMING2_1 0x9ff
+#define MC_ARB_DRAM_TIMING2_2 0xa00
+#define MC_ARB_DRAM_TIMING2_3 0xa01
+
+#define MAX_NO_OF_MVDD_VALUES 2
+#define MAX_NO_VREG_STEPS 32
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
+#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+#define RV770_ASI_DFLT 1000
+#define CYPRESS_HASI_DFLT 400000
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
+#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
+
+#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
+
+#define RV770_SMC_TABLE_ADDRESS 0xB000
+#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
+
+#define SMC_STROBE_RATIO 0x0F
+#define SMC_STROBE_ENABLE 0x10
+
+#define SMC_MC_EDC_RD_FLAG 0x01
+#define SMC_MC_EDC_WR_FLAG 0x02
+#define SMC_MC_RTT_ENABLE 0x04
+#define SMC_MC_STUTTER_EN 0x08
+
+#define RV770_SMC_VOLTAGEMASK_VDDC 0
+#define RV770_SMC_VOLTAGEMASK_MVDD 1
+#define RV770_SMC_VOLTAGEMASK_VDDCI 2
+#define RV770_SMC_VOLTAGEMASK_MAX 4
+
+#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+#define NISLANDS_SMC_STROBE_RATIO 0x0F
+#define NISLANDS_SMC_STROBE_ENABLE 0x10
+
+#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
+#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
+#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
+#define NISLANDS_SMC_MC_STUTTER_EN 0x08
+
+#define MAX_NO_VREG_STEPS 32
+
+#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
+#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
+#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
+
+#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
+#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
+#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
+#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3
+
+#define SISLANDS_LEAKAGE_INDEX0 0xff01
+#define SISLANDS_MAX_LEAKAGE_COUNT 4
+
+#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5
+#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
+#define SISLANDS_ACPI_STATE_ARB_INDEX 1
+#define SISLANDS_ULV_STATE_ARB_INDEX 2
+#define SISLANDS_DRIVER_STATE_ARB_INDEX 3
+
+#define SISLANDS_DPM2_MAX_PULSE_SKIP 256
+
+#define SISLANDS_DPM2_NEAR_TDP_DEC 10
+#define SISLANDS_DPM2_ABOVE_SAFE_INC 5
+#define SISLANDS_DPM2_BELOW_SAFE_INC 20
+
+#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80
+
+#define SISLANDS_DPM2_MAXPS_PERCENT_H 99
+#define SISLANDS_DPM2_MAXPS_PERCENT_M 99
+
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
+#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12
+#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
+#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E
+#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF
+
+#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10
+
+#define SISLANDS_VRC_DFLT 0xC000B3
+#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687
+#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550
+
+#define SI_ASI_DFLT 10000
+#define SI_BSP_DFLT 0x41EB
+#define SI_BSU_DFLT 0x2
+#define SI_AH_DFLT 5
+#define SI_RLP_DFLT 25
+#define SI_RMP_DFLT 65
+#define SI_LHP_DFLT 40
+#define SI_LMP_DFLT 15
+#define SI_TD_DFLT 0
+#define SI_UTC_DFLT_00 0x24
+#define SI_UTC_DFLT_01 0x22
+#define SI_UTC_DFLT_02 0x22
+#define SI_UTC_DFLT_03 0x22
+#define SI_UTC_DFLT_04 0x22
+#define SI_UTC_DFLT_05 0x22
+#define SI_UTC_DFLT_06 0x22
+#define SI_UTC_DFLT_07 0x22
+#define SI_UTC_DFLT_08 0x22
+#define SI_UTC_DFLT_09 0x22
+#define SI_UTC_DFLT_10 0x22
+#define SI_UTC_DFLT_11 0x22
+#define SI_UTC_DFLT_12 0x22
+#define SI_UTC_DFLT_13 0x22
+#define SI_UTC_DFLT_14 0x22
+#define SI_DTC_DFLT_00 0x24
+#define SI_DTC_DFLT_01 0x22
+#define SI_DTC_DFLT_02 0x22
+#define SI_DTC_DFLT_03 0x22
+#define SI_DTC_DFLT_04 0x22
+#define SI_DTC_DFLT_05 0x22
+#define SI_DTC_DFLT_06 0x22
+#define SI_DTC_DFLT_07 0x22
+#define SI_DTC_DFLT_08 0x22
+#define SI_DTC_DFLT_09 0x22
+#define SI_DTC_DFLT_10 0x22
+#define SI_DTC_DFLT_11 0x22
+#define SI_DTC_DFLT_12 0x22
+#define SI_DTC_DFLT_13 0x22
+#define SI_DTC_DFLT_14 0x22
+#define SI_VRC_DFLT 0x0000C003
+#define SI_VOLTAGERESPONSETIME_DFLT 1000
+#define SI_BACKBIASRESPONSETIME_DFLT 1000
+#define SI_VRU_DFLT 0x3
+#define SI_SPLLSTEPTIME_DFLT 0x1000
+#define SI_SPLLSTEPUNIT_DFLT 0x3
+#define SI_TPU_DFLT 0
+#define SI_TPC_DFLT 0x200
+#define SI_SSTU_DFLT 0
+#define SI_SST_DFLT 0x00C8
+#define SI_GICST_DFLT 0x200
+#define SI_FCT_DFLT 0x0400
+#define SI_FCTU_DFLT 0
+#define SI_CTXCGTT3DRPHC_DFLT 0x20
+#define SI_CTXCGTT3DRSDC_DFLT 0x40
+#define SI_VDDC3DOORPHC_DFLT 0x100
+#define SI_VDDC3DOORSDC_DFLT 0x7
+#define SI_VDDC3DOORSU_DFLT 0
+#define SI_MPLLLOCKTIME_DFLT 100
+#define SI_MPLLRESETTIME_DFLT 150
+#define SI_VCOSTEPPCT_DFLT 20
+#define SI_ENDINGVCOSTEPPCT_DFLT 5
+#define SI_REFERENCEDIVIDER_DFLT 4
+
+#define SI_PM_NUMBER_OF_TC 15
+#define SI_PM_NUMBER_OF_SCLKS 20
+#define SI_PM_NUMBER_OF_MCLKS 4
+#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4
+#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3
+
+/* XXX are these ok? */
+#define SI_TEMP_RANGE_MIN (90 * 1000)
+#define SI_TEMP_RANGE_MAX (120 * 1000)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+enum ni_dc_cac_level
+{
+ NISLANDS_DCCAC_LEVEL_0 = 0,
+ NISLANDS_DCCAC_LEVEL_1,
+ NISLANDS_DCCAC_LEVEL_2,
+ NISLANDS_DCCAC_LEVEL_3,
+ NISLANDS_DCCAC_LEVEL_4,
+ NISLANDS_DCCAC_LEVEL_5,
+ NISLANDS_DCCAC_LEVEL_6,
+ NISLANDS_DCCAC_LEVEL_7,
+ NISLANDS_DCCAC_MAX_LEVELS
+};
+
+enum si_cac_config_reg_type
+{
+ SISLANDS_CACCONFIG_MMR = 0,
+ SISLANDS_CACCONFIG_CGIND,
+ SISLANDS_CACCONFIG_MAX
+};
+
+enum si_power_level {
+ SI_POWER_LEVEL_LOW = 0,
+ SI_POWER_LEVEL_MEDIUM = 1,
+ SI_POWER_LEVEL_HIGH = 2,
+ SI_POWER_LEVEL_CTXSW = 3,
+};
+
+enum si_td {
+ SI_TD_AUTO,
+ SI_TD_UP,
+ SI_TD_DOWN,
+};
+
+enum si_display_watermark {
+ SI_DISPLAY_WATERMARK_LOW = 0,
+ SI_DISPLAY_WATERMARK_HIGH = 1,
+};
+
+enum si_display_gap
+{
+ SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
+ SI_PM_DISPLAY_GAP_VBLANK = 1,
+ SI_PM_DISPLAY_GAP_WATERMARK = 2,
+ SI_PM_DISPLAY_GAP_IGNORE = 3,
+};
+
+extern const struct amd_ip_funcs si_dpm_ip_funcs;
+
+struct ni_leakage_coeffients
+{
+ u32 at;
+ u32 bt;
+ u32 av;
+ u32 bv;
+ s32 t_slope;
+ s32 t_intercept;
+ u32 t_ref;
+};
+
+struct SMC_Evergreen_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
+
+struct evergreen_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct evergreen_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_Evergreen_MCRegisterSet
+{
+ uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
+
+struct SMC_Evergreen_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
+ SMC_Evergreen_MCRegisterSet data[5];
+};
+
+typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
+
+struct SMC_NIslands_MCRegisterSet
+{
+ uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
+
+struct ni_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct SMC_NIslands_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
+
+struct SMC_NIslands_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+ SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
+
+struct evergreen_ulv_param {
+ bool supported;
+ struct rv7xx_pl *pl;
+};
+
+struct evergreen_arb_registers {
+ u32 mc_arb_dram_timing;
+ u32 mc_arb_dram_timing2;
+ u32 mc_arb_rfsh_rate;
+ u32 mc_arb_burst_time;
+};
+
+struct at {
+ u32 rlp;
+ u32 rmp;
+ u32 lhp;
+ u32 lmp;
+};
+
+struct ni_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_ad_func_cntl_2;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_dq_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct RV770_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
+
+struct RV770_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_AD_FUNC_CNTL_2;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL_2;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
+
+
+struct RV730_SMC_MCLK_VALUE
+{
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL2;
+ uint32_t vMPLL_FUNC_CNTL3;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
+
+struct RV770_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t padding;
+};
+
+typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
+
+union RV7XX_SMC_MCLK_VALUE
+{
+ RV770_SMC_MCLK_VALUE mclk770;
+ RV730_SMC_MCLK_VALUE mclk730;
+};
+
+typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
+
+struct RV770_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t arbValue;
+ union{
+ uint8_t seqValue;
+ uint8_t ACIndex;
+ };
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t gen2XSP;
+ uint8_t backbias;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint32_t aT;
+ uint32_t bSP;
+ RV770_SMC_SCLK_VALUE sclk;
+ RV7XX_SMC_MCLK_VALUE mclk;
+ RV770_SMC_VOLTAGE_VALUE vddc;
+ RV770_SMC_VOLTAGE_VALUE mvdd;
+ RV770_SMC_VOLTAGE_VALUE vddci;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t stateFlags;
+ uint8_t padding;
+};
+
+typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
+
+struct RV770_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t padding1;
+ uint8_t padding2;
+ uint8_t padding3;
+ RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
+
+struct RV770_SMC_VOLTAGEMASKTABLE
+{
+ uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
+ uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
+
+struct RV770_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[MAX_NO_VREG_STEPS];
+ RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ RV770_SMC_SWSTATE initialState;
+ RV770_SMC_SWSTATE ACPIState;
+ RV770_SMC_SWSTATE driverState;
+ RV770_SMC_SWSTATE ULVState;
+};
+
+typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
+
+struct vddc_table_entry {
+ u16 vddc;
+ u8 vddc_index;
+ u8 high_smio;
+ u32 low_smio;
+};
+
+struct rv770_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_ad_func_cntl_2;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_dq_func_cntl_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct rv730_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 mclk_pwrmgt_cntl;
+ u32 dll_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl2;
+ u32 mpll_func_cntl3;
+ u32 mpll_ss;
+ u32 mpll_ss2;
+};
+
+union r7xx_clock_registers {
+ struct rv770_clock_registers rv770;
+ struct rv730_clock_registers rv730;
+};
+
+struct rv7xx_power_info {
+ /* flags */
+ bool mem_gddr5;
+ bool pcie_gen2;
+ bool dynamic_pcie_gen2;
+ bool acpi_pcie_gen2;
+ bool boot_in_gen2;
+ bool voltage_control; /* vddc */
+ bool mvdd_control;
+ bool sclk_ss;
+ bool mclk_ss;
+ bool dynamic_ss;
+ bool gfx_clock_gating;
+ bool mg_clock_gating;
+ bool mgcgtssm;
+ bool power_gating;
+ bool thermal_protection;
+ bool display_gap;
+ bool dcodt;
+ bool ulps;
+ /* registers */
+ union r7xx_clock_registers clk_regs;
+ u32 s0_vid_lower_smio_cntl;
+ /* voltage */
+ u32 vddc_mask_low;
+ u32 mvdd_mask_low;
+ u32 mvdd_split_frequency;
+ u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
+ u16 max_vddc;
+ u16 max_vddc_in_table;
+ u16 min_vddc_in_table;
+ struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
+ u8 valid_vddc_entries;
+ /* dc odt */
+ u32 mclk_odt_threshold;
+ u8 odt_value_0[2];
+ u8 odt_value_1[2];
+ /* stored values */
+ u32 boot_sclk;
+ u16 acpi_vddc;
+ u32 ref_div;
+ u32 active_auto_throttle_sources;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 bsp;
+ u32 bsu;
+ u32 pbsp;
+ u32 pbsu;
+ u32 dsp;
+ u32 psp;
+ u32 asi;
+ u32 pasi;
+ u32 vrc;
+ u32 restricted_levels;
+ u32 rlp;
+ u32 rmp;
+ u32 lhp;
+ u32 lmp;
+ /* smc offsets */
+ u16 state_table_start;
+ u16 soft_regs_start;
+ u16 sram_end;
+ /* scratch structs */
+ RV770_SMC_STATETABLE smc_statetable;
+};
+
+struct rv7xx_pl {
+ u32 sclk;
+ u32 mclk;
+ u16 vddc;
+ u16 vddci; /* eg+ only */
+ u32 flags;
+ enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+};
+
+struct rv7xx_ps {
+ struct rv7xx_pl high;
+ struct rv7xx_pl medium;
+ struct rv7xx_pl low;
+ bool dc_compatible;
+};
+
+struct si_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
+};
+
+struct ni_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ni_cac_data
+{
+ struct ni_leakage_coeffients leakage_coefficients;
+ u32 i_leakage;
+ s32 leakage_minimum_temperature;
+ u32 pwr_const;
+ u32 dc_cac_value;
+ u32 bif_cac_value;
+ u32 lkge_pwr;
+ u8 mc_wr_weight;
+ u8 mc_rd_weight;
+ u8 allow_ovrflw;
+ u8 num_win_tdp;
+ u8 l2num_win_tdp;
+ u8 lts_truncate_n;
+};
+
+struct evergreen_power_info {
+ /* must be first! */
+ struct rv7xx_power_info rv7xx;
+ /* flags */
+ bool vddci_control;
+ bool dynamic_ac_timing;
+ bool abm;
+ bool mcls;
+ bool light_sleep;
+ bool memory_transition;
+ bool pcie_performance_request;
+ bool pcie_performance_request_registered;
+ bool sclk_deep_sleep;
+ bool dll_default_on;
+ bool ls_clock_gating;
+ bool smu_uvd_hs;
+ bool uvd_enabled;
+ /* stored values */
+ u16 acpi_vddci;
+ u8 mvdd_high_index;
+ u8 mvdd_low_index;
+ u32 mclk_edc_wr_enable_threshold;
+ struct evergreen_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct evergreen_arb_registers bootup_arb_registers;
+ struct evergreen_ulv_param ulv;
+ struct at ats[2];
+ /* smc offsets */
+ u16 mc_reg_table_start;
+ struct amdgpu_ps current_rps;
+ struct rv7xx_ps current_ps;
+ struct amdgpu_ps requested_rps;
+ struct rv7xx_ps requested_ps;
+};
+
+struct PP_NIslands_Dpm2PerfLevel
+{
+ uint8_t MaxPS;
+ uint8_t TgtAct;
+ uint8_t MaxPS_StepInc;
+ uint8_t MaxPS_StepDec;
+ uint8_t PSST;
+ uint8_t NearTDPDec;
+ uint8_t AboveSafeInc;
+ uint8_t BelowSafeInc;
+ uint8_t PSDeltaLimit;
+ uint8_t PSDeltaWin;
+ uint8_t Reserved[6];
+};
+
+typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
+
+struct PP_NIslands_DPM2Parameters
+{
+ uint32_t TDPLimit;
+ uint32_t NearTDPLimit;
+ uint32_t SafePowerLimit;
+ uint32_t PowerBoostLimit;
+};
+typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
+
+struct NISLANDS_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_FUNC_CNTL_4;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
+
+struct NISLANDS_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL_1;
+ uint32_t vMPLL_FUNC_CNTL_2;
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_AD_FUNC_CNTL_2;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL_2;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
+
+struct NISLANDS_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t padding;
+};
+
+typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
+
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t arbValue;
+ uint8_t ACIndex;
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint32_t aT;
+ uint32_t bSP;
+ NISLANDS_SMC_SCLK_VALUE sclk;
+ NISLANDS_SMC_MCLK_VALUE mclk;
+ NISLANDS_SMC_VOLTAGE_VALUE vddc;
+ NISLANDS_SMC_VOLTAGE_VALUE mvdd;
+ NISLANDS_SMC_VOLTAGE_VALUE vddci;
+ NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
+ uint32_t powergate_en;
+ uint8_t hUp;
+ uint8_t hDown;
+ uint8_t stateFlags;
+ uint8_t arbRefreshState;
+ uint32_t SQPowerThrottle;
+ uint32_t SQPowerThrottle_2;
+ uint32_t reserved[2];
+ PP_NIslands_Dpm2PerfLevel dpm2;
+};
+
+typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct NISLANDS_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
+};
+
+typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+
+struct NISLANDS_SMC_VOLTAGEMASKTABLE
+{
+ uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+ uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define NISLANDS_MAX_NO_VREG_STEPS 32
+
+struct NISLANDS_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ PP_NIslands_DPM2Parameters dpm2Params;
+ NISLANDS_SMC_SWSTATE initialState;
+ NISLANDS_SMC_SWSTATE ACPIState;
+ NISLANDS_SMC_SWSTATE ULVState;
+ NISLANDS_SMC_SWSTATE driverState;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
+
+struct ni_power_info {
+ /* must be first! */
+ struct evergreen_power_info eg;
+ struct ni_clock_registers clock_registers;
+ struct ni_mc_reg_table mc_reg_table;
+ u32 mclk_rtt_mode_threshold;
+ /* flags */
+ bool use_power_boost_limit;
+ bool support_cac_long_term_average;
+ bool cac_enabled;
+ bool cac_configuration_required;
+ bool driver_calculate_cac_leakage;
+ bool pc_enabled;
+ bool enable_power_containment;
+ bool enable_cac;
+ bool enable_sq_ramping;
+ /* smc offsets */
+ u16 arb_table_start;
+ u16 fan_table_start;
+ u16 cac_table_start;
+ u16 spll_table_start;
+ /* CAC stuff */
+ struct ni_cac_data cac_data;
+ u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
+ const struct ni_cac_weights *cac_weights;
+ u8 lta_window_size;
+ u8 lts_truncate;
+ struct si_ps current_ps;
+ struct si_ps requested_ps;
+ /* scratch structs */
+ SMC_NIslands_MCRegisters smc_mc_reg_table;
+ NISLANDS_SMC_STATETABLE smc_statetable;
+};
+
+struct si_cac_config_reg
+{
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum si_cac_config_reg_type type;
+};
+
+struct si_powertune_data
+{
+ u32 cac_window;
+ u32 l2_lta_window_size_default;
+ u8 lts_truncate_default;
+ u8 shift_n_default;
+ u8 operating_temp;
+ struct ni_leakage_coeffients leakage_coefficients;
+ u32 fixed_kt;
+ u32 lkge_lut_v0_percent;
+ u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS];
+ bool enable_powertune_by_default;
+};
+
+struct si_dyn_powertune_data
+{
+ u32 cac_leakage;
+ s32 leakage_minimum_temperature;
+ u32 wintime;
+ u32 l2_lta_window_size;
+ u8 lts_truncate;
+ u8 shift_n;
+ u8 dc_pwr_value;
+ bool disable_uvd_powertune;
+};
+
+struct si_dte_data
+{
+ u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ u32 k;
+ u32 t0;
+ u32 max_t;
+ u8 window_size;
+ u8 temp_select;
+ u8 dte_mode;
+ u8 tdep_count;
+ u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ u32 t_threshold;
+ bool enable_dte_by_default;
+};
+
+struct si_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct si_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct si_leakage_voltage_entry
+{
+ u16 voltage;
+ u16 leakage_index;
+};
+
+struct si_leakage_voltage
+{
+ u16 count;
+ struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+
+struct si_ulv_param {
+ bool supported;
+ u32 cg_ulv_control;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct rv7xx_pl pl;
+ bool one_pcie_lane_in_ulv;
+};
+
+struct si_power_info {
+ /* must be first! */
+ struct ni_power_info ni;
+ struct si_clock_registers clock_registers;
+ struct si_mc_reg_table mc_reg_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct atom_voltage_table vddc_phase_shed_table;
+ struct si_leakage_voltage leakage_voltage;
+ u16 mvdd_bootup_value;
+ struct si_ulv_param ulv;
+ u32 max_cu;
+ /* pcie gen */
+ enum amdgpu_pcie_gen force_pcie_gen;
+ enum amdgpu_pcie_gen boot_pcie_gen;
+ enum amdgpu_pcie_gen acpi_pcie_gen;
+ u32 sys_pcie_mask;
+ /* flags */
+ bool enable_dte;
+ bool enable_ppm;
+ bool vddc_phase_shed_control;
+ bool pspp_notify_required;
+ bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
+ /* smc offsets */
+ u32 sram_end;
+ u32 state_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 arb_table_start;
+ u32 cac_table_start;
+ u32 dte_table_start;
+ u32 spll_table_start;
+ u32 papm_cfg_table_start;
+ u32 fan_table_start;
+ /* CAC stuff */
+ const struct si_cac_config_reg *cac_weights;
+ const struct si_cac_config_reg *lcac_config;
+ const struct si_cac_config_reg *cac_override;
+ const struct si_powertune_data *powertune_data;
+ struct si_dyn_powertune_data dyn_powertune_data;
+ /* DTE stuff */
+ struct si_dte_data dte_data;
+ /* scratch structs */
+ SMC_SIslands_MCRegisters smc_mc_reg_table;
+ SISLANDS_SMC_STATETABLE smc_statetable;
+ PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
+ /* fan control */
+ bool fan_ctrl_is_in_default_mode;
+ u32 t_min;
+ u32 fan_ctrl_default_mode;
+ bool fan_is_controlled_by_smc;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
new file mode 100644
index 000000000000..8fae3d4a2360
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "amdgpu.h"
+#include "amdgpu_ih.h"
+#include "si/sid.h"
+#include "si_ih.h"
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev);
+
+static void si_ih_enable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ adev->irq.ih.enabled = true;
+}
+
+static void si_ih_disable_interrupts(struct amdgpu_device *adev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ adev->irq.ih.enabled = false;
+ adev->irq.ih.rptr = 0;
+}
+
+static int si_ih_irq_init(struct amdgpu_device *adev)
+{
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+ u64 wptr_off;
+
+ si_ih_disable_interrupts(adev);
+ WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
+ rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
+
+ ih_rb_cntl = IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1) |
+ IH_WPTR_WRITEBACK_ENABLE;
+
+ wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
+ WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
+ WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+ if (adev->irq.msi_enabled)
+ ih_cntl |= RPTR_REARM;
+ WREG32(IH_CNTL, ih_cntl);
+
+ pci_set_master(adev->pdev);
+ si_ih_enable_interrupts(adev);
+
+ return 0;
+}
+
+static void si_ih_irq_disable(struct amdgpu_device *adev)
+{
+ si_ih_disable_interrupts(adev);
+ mdelay(1);
+}
+
+static u32 si_ih_get_wptr(struct amdgpu_device *adev)
+{
+ u32 wptr, tmp;
+
+ wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+
+ if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
+ wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
+ adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ return (wptr & adev->irq.ih.ptr_mask);
+}
+
+static void si_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ uint32_t dw[4];
+
+ dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+
+ entry->src_id = dw[0] & 0xff;
+ entry->src_data = dw[1] & 0xfffffff;
+ entry->ring_id = dw[2] & 0xff;
+ entry->vm_id = (dw[2] >> 8) & 0xff;
+
+ adev->irq.ih.rptr += 16;
+}
+
+static void si_ih_set_rptr(struct amdgpu_device *adev)
+{
+ WREG32(IH_RB_RPTR, adev->irq.ih.rptr);
+}
+
+static int si_ih_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_ih_set_interrupt_funcs(adev);
+
+ return 0;
+}
+
+static int si_ih_sw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ if (r)
+ return r;
+
+ return amdgpu_irq_init(adev);
+}
+
+static int si_ih_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev);
+
+ return 0;
+}
+
+static int si_ih_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_irq_init(adev);
+}
+
+static int si_ih_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ si_ih_irq_disable(adev);
+
+ return 0;
+}
+
+static int si_ih_suspend(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_hw_fini(adev);
+}
+
+static int si_ih_resume(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ return si_ih_hw_init(adev);
+}
+
+static bool si_ih_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ return false;
+
+ return true;
+}
+
+static int si_ih_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (si_ih_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int si_ih_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(SRBM_STATUS);
+
+ if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(SRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(SRBM_SOFT_RESET, tmp);
+ tmp = RREG32(SRBM_SOFT_RESET);
+
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int si_ih_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int si_ih_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+const struct amd_ip_funcs si_ih_ip_funcs = {
+ .name = "si_ih",
+ .early_init = si_ih_early_init,
+ .late_init = NULL,
+ .sw_init = si_ih_sw_init,
+ .sw_fini = si_ih_sw_fini,
+ .hw_init = si_ih_hw_init,
+ .hw_fini = si_ih_hw_fini,
+ .suspend = si_ih_suspend,
+ .resume = si_ih_resume,
+ .is_idle = si_ih_is_idle,
+ .wait_for_idle = si_ih_wait_for_idle,
+ .soft_reset = si_ih_soft_reset,
+ .set_clockgating_state = si_ih_set_clockgating_state,
+ .set_powergating_state = si_ih_set_powergating_state,
+};
+
+static const struct amdgpu_ih_funcs si_ih_funcs = {
+ .get_wptr = si_ih_get_wptr,
+ .decode_iv = si_ih_decode_iv,
+ .set_rptr = si_ih_set_rptr
+};
+
+static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
+{
+ if (adev->irq.ih_funcs == NULL)
+ adev->irq.ih_funcs = &si_ih_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h
new file mode 100644
index 000000000000..f3e3a954369c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SI_IH_H__
+#define __SI_IH_H__
+
+extern const struct amd_ip_funcs si_ih_ip_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
new file mode 100644
index 000000000000..668ba99d6c05
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "si/sid.h"
+#include "ppsmc.h"
+#include "amdgpu_ucode.h"
+#include "sislands_smc.h"
+
+static int si_set_smc_sram_address(struct amdgpu_device *adev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 data, original_data, addr, extra_shift;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = si_set_smc_sram_address(adev, addr, limit);
+ if (ret)
+ goto done;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+
+done:
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+void amdgpu_si_start_smc(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~RST_REG;
+
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void amdgpu_si_reset_smc(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
+ RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
+{
+ static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
+
+ return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ if (enable)
+ tmp &= ~CK_DISABLE;
+ else
+ tmp |= CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
+{
+ u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+ u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ if (!(rst & RST_REG) && !(clk & CK_DISABLE))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
+ PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+
+ return (PPSMC_Result)RREG32(SMC_RESP_0);
+}
+
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ int i;
+
+ if (!amdgpu_si_is_smc_running(adev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
+{
+ const struct smc_firmware_header_v1_0 *hdr;
+ unsigned long flags;
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!adev->pm.fw)
+ return -EINVAL;
+
+ hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ WREG32(SMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return 0;
+}
+
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = si_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ *value = RREG32(SMC_IND_DATA_0);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
+
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 value, u32 limit)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&adev->smc_idx_lock, flags);
+ ret = si_set_smc_sram_address(adev, smc_address, limit);
+ if (ret == 0)
+ WREG32(SMC_IND_DATA_0, value);
+ spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
new file mode 100644
index 000000000000..d2930eceaf3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef PP_SISLANDS_SMC_H
+#define PP_SISLANDS_SMC_H
+
+#include "ppsmc.h"
+
+#pragma pack(push, 1)
+
+#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
+
+struct PP_SIslands_Dpm2PerfLevel
+{
+ uint8_t MaxPS;
+ uint8_t TgtAct;
+ uint8_t MaxPS_StepInc;
+ uint8_t MaxPS_StepDec;
+ uint8_t PSSamplingTime;
+ uint8_t NearTDPDec;
+ uint8_t AboveSafeInc;
+ uint8_t BelowSafeInc;
+ uint8_t PSDeltaLimit;
+ uint8_t PSDeltaWin;
+ uint16_t PwrEfficiencyRatio;
+ uint8_t Reserved[4];
+};
+
+typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel;
+
+struct PP_SIslands_DPM2Status
+{
+ uint32_t dpm2Flags;
+ uint8_t CurrPSkip;
+ uint8_t CurrPSkipPowerShift;
+ uint8_t CurrPSkipTDP;
+ uint8_t CurrPSkipOCP;
+ uint8_t MaxSPLLIndex;
+ uint8_t MinSPLLIndex;
+ uint8_t CurrSPLLIndex;
+ uint8_t InfSweepMode;
+ uint8_t InfSweepDir;
+ uint8_t TDPexceeded;
+ uint8_t reserved;
+ uint8_t SwitchDownThreshold;
+ uint32_t SwitchDownCounter;
+ uint32_t SysScalingFactor;
+};
+
+typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status;
+
+struct PP_SIslands_DPM2Parameters
+{
+ uint32_t TDPLimit;
+ uint32_t NearTDPLimit;
+ uint32_t SafePowerLimit;
+ uint32_t PowerBoostLimit;
+ uint32_t MinLimitDelta;
+};
+typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters;
+
+struct PP_SIslands_PAPMStatus
+{
+ uint32_t EstimatedDGPU_T;
+ uint32_t EstimatedDGPU_P;
+ uint32_t EstimatedAPU_T;
+ uint32_t EstimatedAPU_P;
+ uint8_t dGPU_T_Limit_Exceeded;
+ uint8_t reserved[3];
+};
+typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus;
+
+struct PP_SIslands_PAPMParameters
+{
+ uint32_t NearTDPLimitTherm;
+ uint32_t NearTDPLimitPAPM;
+ uint32_t PlatformPowerLimit;
+ uint32_t dGPU_T_Limit;
+ uint32_t dGPU_T_Warning;
+ uint32_t dGPU_T_Hysteresis;
+};
+typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters;
+
+struct SISLANDS_SMC_SCLK_VALUE
+{
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_FUNC_CNTL_4;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t sclk_value;
+};
+
+typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE;
+
+struct SISLANDS_SMC_MCLK_VALUE
+{
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL_1;
+ uint32_t vMPLL_FUNC_CNTL_2;
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vDLL_CNTL;
+ uint32_t vMPLL_SS;
+ uint32_t vMPLL_SS2;
+ uint32_t mclk_value;
+};
+
+typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE;
+
+struct SISLANDS_SMC_VOLTAGE_VALUE
+{
+ uint16_t value;
+ uint8_t index;
+ uint8_t phase_settings;
+};
+
+typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE;
+
+struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL
+{
+ uint8_t ACIndex;
+ uint8_t displayWatermark;
+ uint8_t gen2PCIE;
+ uint8_t UVDWatermark;
+ uint8_t VCEWatermark;
+ uint8_t strobeMode;
+ uint8_t mcFlags;
+ uint8_t padding;
+ uint32_t aT;
+ uint32_t bSP;
+ SISLANDS_SMC_SCLK_VALUE sclk;
+ SISLANDS_SMC_MCLK_VALUE mclk;
+ SISLANDS_SMC_VOLTAGE_VALUE vddc;
+ SISLANDS_SMC_VOLTAGE_VALUE mvdd;
+ SISLANDS_SMC_VOLTAGE_VALUE vddci;
+ SISLANDS_SMC_VOLTAGE_VALUE std_vddc;
+ uint8_t hysteresisUp;
+ uint8_t hysteresisDown;
+ uint8_t stateFlags;
+ uint8_t arbRefreshState;
+ uint32_t SQPowerThrottle;
+ uint32_t SQPowerThrottle_2;
+ uint32_t MaxPoweredUpCU;
+ SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc;
+ SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc;
+ uint32_t reserved[2];
+ PP_SIslands_Dpm2PerfLevel dpm2;
+};
+
+#define SISLANDS_SMC_STROBE_RATIO 0x0F
+#define SISLANDS_SMC_STROBE_ENABLE 0x10
+
+#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01
+#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02
+#define SISLANDS_SMC_MC_RTT_ENABLE 0x04
+#define SISLANDS_SMC_MC_STUTTER_EN 0x08
+#define SISLANDS_SMC_MC_PG_EN 0x10
+
+typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL;
+
+struct SISLANDS_SMC_SWSTATE
+{
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[1];
+};
+
+typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
+#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
+#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
+#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
+
+struct SISLANDS_SMC_VOLTAGEMASKTABLE
+{
+ uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX];
+};
+
+typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
+
+#define SISLANDS_MAX_NO_VREG_STEPS 32
+
+struct SISLANDS_SMC_STATETABLE
+{
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ SISLANDS_SMC_SWSTATE initialState;
+ SISLANDS_SMC_SWSTATE ACPIState;
+ SISLANDS_SMC_SWSTATE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+};
+
+typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
+
+#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0
+#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC
+#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28
+#define SI_SMC_SOFT_REGISTER_seq_index 0x5C
+#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60
+#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70
+#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78
+#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88
+#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C
+#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98
+#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8
+#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8
+#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC
+#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
+#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
+#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
+
+struct PP_SIslands_FanTable
+{
+ uint8_t fdo_mode;
+ uint8_t padding;
+ int16_t temp_min;
+ int16_t temp_med;
+ int16_t temp_max;
+ int16_t slope1;
+ int16_t slope2;
+ int16_t fdo_min;
+ int16_t hys_up;
+ int16_t hys_down;
+ int16_t hys_slope;
+ int16_t temp_resp_lim;
+ int16_t temp_curr;
+ int16_t slope_curr;
+ int16_t pwm_curr;
+ uint32_t refresh_period;
+ int16_t fdo_max;
+ uint8_t temp_src;
+ int8_t padding2;
+};
+
+typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
+
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
+
+#define SMC_SISLANDS_SCALE_I 7
+#define SMC_SISLANDS_SCALE_R 12
+
+struct PP_SIslands_CacConfig
+{
+ uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
+ uint32_t lkge_lut_V0;
+ uint32_t lkge_lut_Vstep;
+ uint32_t WinTime;
+ uint32_t R_LL;
+ uint32_t calculation_repeats;
+ uint32_t l2numWin_TDP;
+ uint32_t dc_cac;
+ uint8_t lts_truncate_n;
+ uint8_t SHIFT_N;
+ uint8_t log2_PG_LKG_SCALE;
+ uint8_t cac_temp;
+ uint32_t lkge_lut_T0;
+ uint32_t lkge_lut_Tstep;
+};
+
+typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig;
+
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16
+#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
+
+struct SMC_SIslands_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress;
+
+struct SMC_SIslands_MCRegisterSet
+{
+ uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet;
+
+struct SMC_SIslands_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE];
+ SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters;
+
+struct SMC_SIslands_MCArbDramTimingRegisterSet
+{
+ uint32_t mc_arb_dram_timing;
+ uint32_t mc_arb_dram_timing2;
+ uint8_t mc_arb_rfsh_rate;
+ uint8_t mc_arb_burst_time;
+ uint8_t padding[2];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet;
+
+struct SMC_SIslands_MCArbDramTimingRegisters
+{
+ uint8_t arb_current;
+ uint8_t reserved[3];
+ SMC_SIslands_MCArbDramTimingRegisterSet data[16];
+};
+
+typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters;
+
+struct SMC_SISLANDS_SPLL_DIV_TABLE
+{
+ uint32_t freq[256];
+ uint32_t ss[256];
+};
+
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000
+#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20
+
+typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE;
+
+#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5
+
+#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16
+
+struct Smc_SIslands_DTE_Configuration
+{
+ uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
+ uint32_t K;
+ uint32_t T0;
+ uint32_t MaxT;
+ uint8_t WindowSize;
+ uint8_t Tdep_count;
+ uint8_t temp_select;
+ uint8_t DTE_mode;
+ uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE];
+ uint32_t Tthreshold;
+};
+
+typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration;
+
+#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000
+
+#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0
+#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4
+#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC
+#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10
+#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14
+#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24
+#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30
+#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38
+#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40
+#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48
+
+#pragma pack(pop)
+
+int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void amdgpu_si_start_smc(struct amdgpu_device *adev);
+void amdgpu_si_reset_smc(struct amdgpu_device *adev);
+int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev);
+void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable);
+bool amdgpu_si_is_smc_running(struct amdgpu_device *adev);
+PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
+PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev);
+int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
+int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 *value, u32 limit);
+int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
+ u32 value, u32 limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4dc3a8..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_smum.h"
-
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int tonga_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/tonga_smc.bin";
- int err;
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int tonga_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = tonga_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int tonga_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int tonga_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = tonga_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int tonga_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- tonga_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int tonga_dpm_suspend(void *handle)
-{
- return tonga_dpm_hw_fini(handle);
-}
-
-static int tonga_dpm_resume(void *handle)
-{
- return tonga_dpm_hw_init(handle);
-}
-
-static int tonga_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int tonga_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs tonga_dpm_ip_funcs = {
- .name = "tonga_dpm",
- .early_init = tonga_dpm_early_init,
- .late_init = NULL,
- .sw_init = tonga_dpm_sw_init,
- .sw_fini = tonga_dpm_sw_fini,
- .hw_init = tonga_dpm_hw_init,
- .hw_fini = tonga_dpm_hw_fini,
- .suspend = tonga_dpm_suspend,
- .resume = tonga_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = tonga_dpm_set_clockgating_state,
- .set_powergating_state = tonga_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &tonga_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index c92055805a45..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
{
- u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -384,6 +384,46 @@ static int tonga_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
+ adev->irq.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->irq.srbm_soft_reset = 0;
+ return false;
+ }
+}
+
+static int tonga_ih_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->irq.srbm_soft_reset)
+ return 0;
+
+ return tonga_ih_hw_fini(adev);
+}
+
+static int tonga_ih_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->irq.srbm_soft_reset)
+ return 0;
+
+ return tonga_ih_hw_init(adev);
+}
+
+static int tonga_ih_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->irq.srbm_soft_reset)
+ return 0;
+ srbm_soft_reset = adev->irq.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -427,7 +467,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
.resume = tonga_ih_resume,
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
+ .check_soft_reset = tonga_ih_check_soft_reset,
+ .pre_soft_reset = tonga_ih_pre_soft_reset,
.soft_reset = tonga_ih_soft_reset,
+ .post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de1836f8f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_ppsmc.h"
-#include "tonga_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#define TONGA_SMC_SIZE 0x20000
-
-static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!tonga_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc(adev, msg);
-}
-
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > TONGA_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- tonga_send_msg_to_smc_offset(adev);
-
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- tonga_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int tonga_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- result = tonga_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return tonga_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
- .check_fw_load_finish = tonga_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int tonga_smu_init(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
-
- return 0;
-}
-
-int tonga_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 132e613ed674..f6c941550b8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -526,6 +526,20 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 4; /* uvd_v4_2_ring_emit_ib */
+}
+
+static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v4_2_ring_emit_hdp_flush */
+ 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
+ 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */
+}
+
/**
* uvd_v4_2_mc_resume - memory controller programming
*
@@ -756,6 +770,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 101de136ba63..400c16fe579e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -577,6 +577,20 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 6; /* uvd_v5_0_ring_emit_ib */
+}
+
+static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v5_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
+ 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */
+}
+
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -807,6 +821,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7f21102bfb99..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uvd_v6_0_mc_resume(adev);
- /* Set dynamic clock gating in S/W control mode */
- if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
- uvd_v6_0_set_sw_clock_gating(adev);
- } else {
- /* disable clock gating */
- uint32_t data = RREG32(mmUVD_CGC_CTRL);
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, data);
- }
+ /* disable clock gating */
+ WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0);
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
@@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* take UVD block out of reset */
- WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
mdelay(5);
/* initialize UVD memory controller */
@@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+ WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
@@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
mdelay(10);
- WREG32_P(mmUVD_SOFT_RESET, 0,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
mdelay(10);
r = -1;
}
@@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* clear the bit 4 of UVD_STATUS */
WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- /* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
@@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
- WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
+ WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
return 0;
}
@@ -735,6 +725,31 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
+static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 8; /* uvd_v6_0_ring_emit_ib */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */
+}
+
+static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+ return
+ 2 + /* uvd_v6_0_ring_emit_hdp_flush */
+ 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
+ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
+ 20 + /* uvd_v6_0_ring_emit_vm_flush */
+ 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */
+}
+
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -748,20 +763,82 @@ static int uvd_v6_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
+ if (uvd_v6_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
-static int uvd_v6_0_soft_reset(void *handle)
+#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
+static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+ u32 tmp = RREG32(mmSRBM_STATUS);
+
+ if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
+ REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
+ (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
+
+ if (srbm_soft_reset) {
+ adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->uvd.srbm_soft_reset = 0;
+ return false;
+ }
+}
+
+static int uvd_v6_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->uvd.srbm_soft_reset)
+ return 0;
uvd_v6_0_stop(adev);
+ return 0;
+}
+
+static int uvd_v6_0_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset;
+
+ if (!adev->uvd.srbm_soft_reset)
+ return 0;
+ srbm_soft_reset = adev->uvd.srbm_soft_reset;
+
+ if (srbm_soft_reset) {
+ u32 tmp;
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int uvd_v6_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->uvd.srbm_soft_reset)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
return uvd_v6_0_start(adev);
@@ -902,21 +979,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
if (adev->asic_type == CHIP_FIJI ||
- adev->asic_type == CHIP_POLARIS10)
- uvd_v6_set_bypass_mode(adev, enable);
+ adev->asic_type == CHIP_POLARIS10)
+ uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
- if (enable) {
+ if (state == AMD_CG_STATE_GATE) {
/* disable HW gating and enable Sw gating */
uvd_v6_0_set_sw_clock_gating(adev);
} else {
@@ -946,6 +1017,8 @@ static int uvd_v6_0_set_powergating_state(void *handle,
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
+ WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
+
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
return 0;
@@ -966,7 +1039,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.resume = uvd_v6_0_resume,
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
+ .check_soft_reset = uvd_v6_0_check_soft_reset,
+ .pre_soft_reset = uvd_v6_0_pre_soft_reset,
.soft_reset = uvd_v6_0_soft_reset,
+ .post_soft_reset = uvd_v6_0_post_soft_reset,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
@@ -986,6 +1062,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@@ -1005,6 +1083,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
+ .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 80a37a602181..76e64ad04a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -30,16 +30,17 @@
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "cikd.h"
-
#include "vce/vce_2_0_d.h"
#include "vce/vce_2_0_sh_mask.h"
-
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#define VCE_V2_0_FW_SIZE (256 * 1024)
#define VCE_V2_0_STACK_SIZE (64 * 1024)
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmVCE_RB_WPTR2, ring->wptr);
}
+static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_LMI_STATUS);
+
+ if (status & 0x337f)
+ return 0;
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v2_0_start - start VCE block
*
@@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
static int vce_v2_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int i, j, r;
+ int r;
vce_v2_0_mc_resume(adev);
@@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(100);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ r = vce_v2_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1);
@@ -173,6 +193,8 @@ static int vce_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->vce.num_rings = 2;
+
vce_v2_0_set_ring_funcs(adev);
vce_v2_0_set_irq_funcs(adev);
@@ -182,7 +204,7 @@ static int vce_v2_0_early_init(void *handle)
static int vce_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- int r;
+ int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
@@ -199,19 +221,14 @@ static int vce_v2_0_sw_init(void *handle)
if (r)
return r;
- ring = &adev->vce.ring[0];
- sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
-
- ring = &adev->vce.ring[1];
- sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+ }
return r;
}
@@ -234,29 +251,23 @@ static int vce_v2_0_sw_fini(void *handle)
static int vce_v2_0_hw_init(void *handle)
{
- struct amdgpu_ring *ring;
- int r;
+ int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vce_v2_0_start(adev);
+ /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
if (r)
-/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
return 0;
- ring = &adev->vce.ring[0];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
- }
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].ready = false;
- ring = &adev->vce.ring[1];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ else
+ adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -338,47 +349,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
- u32 orig, tmp;
+ if (vce_v2_0_wait_for_idle(adev)) {
+ DRM_INFO("VCE is busy, Can't set clock gateing");
+ return;
+ }
- if (gated) {
- if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
- return;
- }
- WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
- WREG32(mmVCE_STATUS, 0);
- } else {
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(100);
+ WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
+
+ if (vce_v2_0_lmi_clean(adev)) {
+ DRM_INFO("LMI is busy, Can't set clock gateing");
+ return;
}
- tmp = RREG32(mmVCE_CLOCK_GATING_B);
- tmp &= ~0x00060006;
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32(mmVCE_STATUS, 0);
+
+ if (gated)
+ WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
if (gated) {
- tmp |= 0xe10000;
+ /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
} else {
- tmp |= 0xe1;
- tmp &= ~0xe10000;
+ /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
+ WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
}
- WREG32(mmVCE_CLOCK_GATING_B, tmp);
- orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
- tmp &= ~0x1fe000;
- tmp &= ~0xff000000;
- if (tmp != orig)
- WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+ /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
+ WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
- orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- tmp &= ~0x3fc;
- if (tmp != orig)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
- if (gated)
- WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
+ if(!gated) {
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ mdelay(100);
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ vce_v2_0_firmware_loaded(adev);
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ }
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -458,9 +472,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
vce_v2_0_init_cg(adev);
}
@@ -474,11 +486,11 @@ static bool vce_v2_0_is_idle(void *handle)
static int vce_v2_0_wait_for_idle(void *handle)
{
- unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+ if (vce_v2_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
@@ -488,8 +500,7 @@ static int vce_v2_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
- ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+ WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
return vce_v2_0_start(adev);
@@ -516,10 +527,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: VCE\n");
switch (entry->src_data) {
case 0:
- amdgpu_fence_process(&adev->vce.ring[0]);
- break;
case 1:
- amdgpu_fence_process(&adev->vce.ring[1]);
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -530,11 +539,28 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+ else
+ tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
+
static int vce_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+
+ vce_v2_0_set_bypass_mode(adev, enable);
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -596,12 +622,16 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size,
+ .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
- adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index c271abffd8dd..6feed726e299 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,6 +37,9 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -49,6 +52,8 @@
#define VCE_V3_0_STACK_SIZE (64 * 1024)
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
+#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -67,8 +72,10 @@ static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_RPTR);
- else
+ else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_RPTR2);
+ else
+ return RREG32(mmVCE_RB_RPTR3);
}
/**
@@ -84,8 +91,10 @@ static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
return RREG32(mmVCE_RB_WPTR);
- else
+ else if (ring == &adev->vce.ring[1])
return RREG32(mmVCE_RB_WPTR2);
+ else
+ return RREG32(mmVCE_RB_WPTR3);
}
/**
@@ -101,108 +110,80 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring == &adev->vce.ring[0])
WREG32(mmVCE_RB_WPTR, ring->wptr);
- else
+ else if (ring == &adev->vce.ring[1])
WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ else
+ WREG32(mmVCE_RB_WPTR3, ring->wptr);
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
- u32 tmp, data;
-
- tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
- if (override)
- data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
- else
- data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
- if (tmp != data)
- WREG32(mmVCE_RB_ARB_CTRL, data);
+ WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
}
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
- u32 tmp, data;
+ u32 data;
+
/* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true);
- if (!gated) {
- /* Force CLOCK ON for VCE_CLOCK_GATING_B,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- * VREG can be FORCE ON or set to Dynamic, but can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ /* This function enables MGCG which is controlled by firmware.
+ With the clocks in the gated state the core is still
+ accessible but the firmware will throttle the clocks on the
+ fly as necessary.
+ */
+ if (gated) {
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
- * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000;
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2;
- data &= ~0x2;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+ data &= ~0x00010000;
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8;
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8;
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else {
- /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
- * {*, *_FORCE_OFF} = {*, 1}
- * set VREG to Dynamic, as it can't be OFF
- */
- tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
+ data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010;
data |= 0xe70008;
- if (tmp != data)
- WREG32(mmVCE_CLOCK_GATING_B, data);
- /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
- * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
- * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
- */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
+ WREG32(mmVCE_CLOCK_GATING_B, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING, data);
- /* Set VCE_UENC_CLOCK_GATING_2 */
- tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
+ WREG32(mmVCE_UENC_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000;
- if (tmp != data)
- WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
- /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
- tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
+
+ data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000;
- if (tmp != data)
- WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
- /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
- tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
+
+ data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8);
- if (tmp != data)
- WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
+ VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
+ VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
+ 0x8);
+ WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
}
vce_v3_0_override_vce_clock_gating(adev, false);
}
@@ -221,12 +202,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(10);
}
@@ -259,43 +237,34 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+ ring = &adev->vce.ring[2];
+ WREG32(mmVCE_RB_RPTR3, ring->wptr);
+ WREG32(mmVCE_RB_WPTR3, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
- if (idx == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
-
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
vce_v3_0_mc_resume(adev, idx);
-
- WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
- ~VCE_STATUS__JOB_BUSY_MASK);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
- WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
- ~VCE_VCPU_CNTL__CLK_EN_MASK);
-
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(100);
r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
-
- /* Set Clock-Gating off */
- if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
- vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
@@ -304,7 +273,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -319,33 +288,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- if (idx == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
else
- WREG32_P(mmVCE_VCPU_CNTL, 0,
- ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
+
/* hold on ECPU */
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
/* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -399,6 +360,8 @@ static int vce_v3_0_early_init(void *handle)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ adev->vce.num_rings = 3;
+
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
@@ -409,7 +372,7 @@ static int vce_v3_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int r;
+ int r, i;
/* VCE */
r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
@@ -421,23 +384,22 @@ static int vce_v3_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_vce_resume(adev);
- if (r)
- return r;
+ /* 52.8.3 required for 3 ring support */
+ if (adev->vce.fw_version < FW_52_8_3)
+ adev->vce.num_rings = 2;
- ring = &adev->vce.ring[0];
- sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ r = amdgpu_vce_resume(adev);
if (r)
return r;
- ring = &adev->vce.ring[1];
- sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
- &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
- if (r)
- return r;
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
+ &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
+ if (r)
+ return r;
+ }
return r;
}
@@ -467,10 +429,10 @@ static int vce_v3_0_hw_init(void *handle)
if (r)
return r;
- adev->vce.ring[0].ready = false;
- adev->vce.ring[1].ready = false;
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].ready = false;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < adev->vce.num_rings; i++) {
r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
if (r)
return r;
@@ -534,7 +496,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
- WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
+ WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
@@ -573,9 +535,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
}
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
-
- WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
static bool vce_v3_0_is_idle(void *handle)
@@ -601,20 +561,107 @@ static int vce_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
+#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
+#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
+ VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
+
+static bool vce_v3_0_check_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0;
+
+ /* According to VCE team , we should use VCE_STATUS instead
+ * SRBM_STATUS.VCE_BUSY bit for busy status checking.
+ * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
+ * instance's registers are accessed
+ * (0 for 1st instance, 10 for 2nd instance).
+ *
+ *VCE_STATUS
+ *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
+ *|----+----+-----------+----+----+----+----------+---------+----|
+ *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
+ *
+ * VCE team suggest use bit 3--bit 6 for busy status check
+ */
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
+ srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
+ }
+ WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (srbm_soft_reset) {
+ adev->vce.srbm_soft_reset = srbm_soft_reset;
+ return true;
+ } else {
+ adev->vce.srbm_soft_reset = 0;
+ return false;
+ }
+}
+
static int vce_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 mask = 0;
+ u32 srbm_soft_reset;
+
+ if (!adev->vce.srbm_soft_reset)
+ return 0;
+ srbm_soft_reset = adev->vce.srbm_soft_reset;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+ if (srbm_soft_reset) {
+ u32 tmp;
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+
+ return 0;
+}
+
+static int vce_v3_0_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->vce.srbm_soft_reset)
+ return 0;
- WREG32_P(mmSRBM_SOFT_RESET, mask,
- ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
- SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
mdelay(5);
- return vce_v3_0_start(adev);
+ return vce_v3_0_suspend(adev);
+}
+
+
+static int vce_v3_0_post_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->vce.srbm_soft_reset)
+ return 0;
+
+ mdelay(5);
+
+ return vce_v3_0_resume(adev);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -637,13 +684,12 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(mmVCE_SYS_INT_STATUS,
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
+ WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
switch (entry->src_data) {
case 0:
case 1:
+ case 2:
amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
break;
default:
@@ -655,7 +701,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
@@ -674,8 +720,10 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
- if (adev->asic_type == CHIP_POLARIS10)
- vce_v3_set_bypass_mode(adev, enable);
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_TONGA) ||
+ (adev->asic_type == CHIP_FIJI))
+ vce_v3_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -686,13 +734,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- if (i == 0)
- WREG32_P(mmGRBM_GFX_INDEX, 0,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
- else
- WREG32_P(mmGRBM_GFX_INDEX,
- GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
- ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -711,7 +753,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -739,6 +781,60 @@ static int vce_v3_0_set_powergating_state(void *handle,
return vce_v3_0_start(adev);
}
+static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+ amdgpu_ring_write(ring, VCE_CMD_IB_VM);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
+static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, VCE_CMD_END);
+}
+
+static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+}
+
+static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
+{
+ return
+ 5; /* vce_v3_0_ring_emit_ib */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
+{
+ return
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */
+}
+
+static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring)
+{
+ return
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */
+}
+
const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
@@ -751,12 +847,15 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.resume = vce_v3_0_resume,
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
+ .check_soft_reset = vce_v3_0_check_soft_reset,
+ .pre_soft_reset = vce_v3_0_pre_soft_reset,
.soft_reset = vce_v3_0_soft_reset,
+ .post_soft_reset = vce_v3_0_post_soft_reset,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
};
-static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
+static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
@@ -769,12 +868,42 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size,
+};
+
+static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ .get_rptr = vce_v3_0_ring_get_rptr,
+ .get_wptr = vce_v3_0_ring_get_wptr,
+ .set_wptr = vce_v3_0_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = vce_v3_0_ring_emit_ib,
+ .emit_vm_flush = vce_v3_0_emit_vm_flush,
+ .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+ .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size,
+ .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
- adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
+ int i;
+
+ if (adev->asic_type >= CHIP_STONEY) {
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ DRM_INFO("VCE enabled in VM mode\n");
+ } else {
+ for (i = 0; i < adev->vce.num_rings; i++)
+ adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ DRM_INFO("VCE enabled in physical mode\n");
+ }
}
static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 03a31c53aec3..f62f1a74f890 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,7 +77,13 @@
#if defined(CONFIG_DRM_AMD_ACP)
#include "amdgpu_acp.h"
#endif
+#include "dce_virtual.h"
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -444,18 +450,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- u32 caps = 0;
- u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
-
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
- return caps;
+ uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (reg & 1)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (reg == 0) {
+ if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
}
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -822,6 +831,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 7,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gmc_v7_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &iceland_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 2,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &sdma_v2_4_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -890,6 +953,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 5,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v5_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -958,6 +1089,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 5,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 10,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1026,6 +1225,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -1103,34 +1370,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
#endif
};
+static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &dce_virtual_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+#if defined(CONFIG_DRM_AMD_ACP)
+ {
+ .type = AMD_IP_BLOCK_TYPE_ACP,
+ .major = 2,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &acp_ip_funcs,
+ },
+#endif
+};
+
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_TOPAZ:
- adev->ip_blocks = topaz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
- break;
- case CHIP_FIJI:
- adev->ip_blocks = fiji_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
- break;
- case CHIP_TONGA:
- adev->ip_blocks = tonga_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- adev->ip_blocks = polaris11_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->ip_blocks = cz_ip_blocks;
- adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ if (adev->enable_virtual_display) {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd);
+ break;
+
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks_vd;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ adev->ip_blocks = topaz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
+ break;
+ case CHIP_FIJI:
+ adev->ip_blocks = fiji_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+ break;
+ case CHIP_TONGA:
+ adev->ip_blocks = tonga_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->ip_blocks = cz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
}
return 0;
@@ -1154,13 +1529,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
+ .detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- .get_virtual_caps = &vi_get_virtual_caps,
};
static int vi_common_early_init(void *handle)
@@ -1248,8 +1623,17 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ /* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
+ if (adev->rev_id != 0x00) {
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
+ }
adev->external_rev_id = adev->rev_id + 0x1;
break;
case CHIP_STONEY:
@@ -1267,14 +1651,24 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
- adev->external_rev_id = adev->rev_id + 0x1;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCE_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_PIPELINE |
+ AMD_PG_SUPPORT_UVD |
+ AMD_PG_SUPPORT_VCE;
+ adev->external_rev_id = adev->rev_id + 0x61;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
+ /* in early init stage, vbios code won't work */
+ if (adev->asic_funcs->detect_hw_virtualization)
+ amdgpu_asic_detect_hw_virtualization(adev);
+
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
@@ -1418,6 +1812,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
}
+static int vi_common_set_clockgating_state_by_smu(void *handle,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int vi_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1443,6 +1894,10 @@ static int vi_common_set_clockgating_state(void *handle,
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 062ee1676480..11746f22d0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -369,4 +369,45 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
+#define VCE_CMD_IB_VM 0x00000102
+#define VCE_CMD_WAIT_GE 0x00000106
+#define VCE_CMD_UPDATE_PTB 0x00000107
+#define VCE_CMD_FLUSH_TLB 0x00000108
+
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index a7d3cb3fead0..453c5d66e5c3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -142,13 +142,15 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("mapping doorbell page:\n");
- pr_debug(" target user address == 0x%08llX\n",
- (unsigned long long) vma->vm_start);
- pr_debug(" physical address == 0x%08llX\n", address);
- pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags);
- pr_debug(" size == 0x%04lX\n",
- doorbell_process_allocation());
+ pr_debug("kfd: mapping doorbell page in %s\n"
+ " target user address == 0x%08llX\n"
+ " physical address == 0x%08llX\n"
+ " vm_flags == 0x%04lX\n"
+ " size == 0x%04lX\n",
+ __func__,
+ (unsigned long long) vma->vm_start, address, vma->vm_flags,
+ doorbell_process_allocation());
+
return io_remap_pfn_range(vma,
vma->vm_start,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9beae87aadd5..d135cd002a95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -47,6 +47,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
__func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+ memset(&prop, 0, sizeof(prop));
+ memset(&nop, 0, sizeof(nop));
+
nop.opcode = IT_NOP;
nop.type = PM4_TYPE_3;
nop.u32all |= PM4_COUNT_ZERO;
@@ -121,7 +124,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
- if (init_queue(&kq->queue, prop) != 0)
+ if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
kq->queue->device = dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 80113c335966..4750cabe4252 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -619,7 +619,7 @@ int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-int init_queue(struct queue **q, struct queue_properties properties);
+int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 4f3849ac8c07..ef7c8de7060e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -404,58 +404,47 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
- int idx, i;
BUG_ON(dev == NULL);
- idx = srcu_read_lock(&kfd_processes_srcu);
-
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
* is a bug. Unfortunately, there is no way to tell...
*/
- hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
- if (p->pasid == pasid) {
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
-
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ p = kfd_lookup_process_by_pasid(pasid);
+ if (!p)
+ return;
- mutex_lock(&p->mutex);
-
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
-
- pqm_uninit(&p->pqm);
+ pr_debug("Unbinding process %d from IOMMU\n", pasid);
- pdd = kfd_get_process_device_data(dev, p);
+ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+ kfd_dbgmgr_destroy(dev->dbgmgr);
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
- }
+ pqm_uninit(&p->pqm);
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ pdd = kfd_get_process_device_data(dev, p);
- /*
- * Just mark pdd as unbound, because we still need it
- * to call amd_iommu_unbind_pasid() in when the
- * process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ if (!pdd) {
+ mutex_unlock(&p->mutex);
+ return;
+ }
- mutex_unlock(&p->mutex);
+ if (pdd->reset_wavefronts) {
+ dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ pdd->reset_wavefronts = false;
+ }
- return;
- }
+ /*
+ * Just mark pdd as unbound, because we still need it
+ * to call amd_iommu_unbind_pasid() in when the
+ * process exits.
+ * We don't call amd_iommu_unbind_pasid() here
+ * because the IOMMU called us.
+ */
+ pdd->bound = false;
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ mutex_unlock(&p->mutex);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7b69070f7ecc..e1fb40b84c72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -129,7 +129,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
q_properties->vmid = 0;
q_properties->queue_id = qid;
- retval = init_queue(q, *q_properties);
+ retval = init_queue(q, q_properties);
if (retval != 0)
goto err_init_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 9a0c90b0702e..0ab197077f2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -63,7 +63,7 @@ void print_queue(struct queue *q)
pr_debug("Queue Device Address: 0x%p\n", q->device);
}
-int init_queue(struct queue **q, struct queue_properties properties)
+int init_queue(struct queue **q, const struct queue_properties *properties)
{
struct queue *tmp;
@@ -73,7 +73,7 @@ int init_queue(struct queue **q, struct queue_properties properties)
if (!tmp)
return -ENOMEM;
- memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
+ memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
*q = tmp;
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 884c96f50c3d..1e5064749959 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1090,19 +1090,21 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
{
uint32_t hashout;
uint32_t buf[7];
+ uint64_t local_mem_size;
int i;
if (!gpu)
return 0;
+ local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor;
buf[2] = gpu->pdev->subsystem_device;
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
- buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
- & 0xffffffff);
- buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
+ buf[5] = lower_32_bits(local_mem_size);
+ buf[6] = upper_32_bits(local_mem_size);
for (i = 0, hashout = 0; i < 7; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index a74a0d2ff1ca..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -29,7 +29,12 @@
* Supported ASIC types
*/
enum amd_asic_type {
- CHIP_BONAIRE = 0,
+ CHIP_TAHITI = 0,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
CHIP_HAWAII,
@@ -159,8 +164,14 @@ struct amd_ip_funcs {
bool (*is_idle)(void *handle);
/* poll for idle */
int (*wait_for_idle)(void *handle);
+ /* check soft reset the IP block */
+ bool (*check_soft_reset)(void *handle);
+ /* pre soft reset the IP block */
+ int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
int (*soft_reset)(void *handle);
+ /* post soft reset the IP block */
+ int (*post_soft_reset)(void *handle);
/* enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
new file mode 100644
index 000000000000..66e39cdb5cb0
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const u32 si_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const u32 si_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_WAVE_MGMT_1
+ 0x00000000, // SPI_WAVE_MGMT_2
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const u32 si_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const u32 si_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0, // HOLE
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const u32 si_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const u32 si_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const u32 si_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
+{
+ {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+ {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {si_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {si_SECT_CONTEXT_def_5, 0x0000a2a1, 1 },
+ {si_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {si_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { NULL, 0, 0 }
+};
+static const struct cs_section_def si_cs_data[] = {
+ { si_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { NULL, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
new file mode 100644
index 000000000000..895c8e2353e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK 0x196c
+#define SI_DC_GPIO_HPD_A 0x196d
+#define SI_DC_GPIO_HPD_EN 0x196e
+#define SI_DC_GPIO_HPD_Y 0x196f
+
+#define SI_GRPH_CONTROL 0x1a01
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
new file mode 100644
index 000000000000..c57eff159374
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -0,0 +1,2461 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define SI_MAX_SH_GPRS 256
+#define SI_MAX_TEMP_GPRS 16
+#define SI_MAX_SH_THREADS 256
+#define SI_MAX_SH_STACK_ENTRIES 4096
+#define SI_MAX_FRC_EOV_CNT 16384
+#define SI_MAX_BACKENDS 8
+#define SI_MAX_BACKENDS_MASK 0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
+#define SI_MAX_SIMDS 12
+#define SI_MAX_SIMDS_MASK 0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
+#define SI_MAX_PIPES 8
+#define SI_MAX_PIPES_MASK 0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
+#define SI_MAX_LDS_NUM 0xFFFF
+#define SI_MAX_TCC 16
+#define SI_MAX_TCC_MASK 0xFFFF
+
+#define AMDGPU_NUM_OF_VMIDS 8
+
+/* SMC IND accessor regs */
+#define SMC_IND_INDEX_0 0x80
+#define SMC_IND_DATA_0 0x81
+
+#define SMC_IND_ACCESS_CNTL 0x8A
+# define AUTO_INCREMENT_IND_0 (1 << 0)
+#define SMC_MESSAGE_0 0x8B
+#define SMC_RESP_0 0x8C
+
+/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
+#define SMC_CG_IND_START 0xc0030000
+#define SMC_CG_IND_END 0xc0040000
+
+#define CG_CGTT_LOCAL_0 0x400
+#define CG_CGTT_LOCAL_1 0x401
+
+/* SMC IND registers */
+#define SMC_SYSCON_RESET_CNTL 0x80000000
+# define RST_REG (1 << 0)
+#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
+# define CK_DISABLE (1 << 0)
+# define CKEN (1 << 24)
+
+#define VGA_HDP_CONTROL 0xCA
+#define VGA_MEMORY_DISABLE (1 << 4)
+
+#define DCCG_DISP_SLOW_SELECT_REG 0x13F
+#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
+#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
+#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
+#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
+#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
+#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
+
+#define CG_SPLL_FUNC_CNTL 0x180
+#define SPLL_RESET (1 << 0)
+#define SPLL_SLEEP (1 << 1)
+#define SPLL_BYPASS_EN (1 << 3)
+#define SPLL_REF_DIV(x) ((x) << 4)
+#define SPLL_REF_DIV_MASK (0x3f << 4)
+#define SPLL_PDIV_A(x) ((x) << 20)
+#define SPLL_PDIV_A_MASK (0x7f << 20)
+#define SPLL_PDIV_A_SHIFT 20
+#define CG_SPLL_FUNC_CNTL_2 0x181
+#define SCLK_MUX_SEL(x) ((x) << 0)
+#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define SPLL_CTLREQ_CHG (1 << 23)
+#define SCLK_MUX_UPDATE (1 << 26)
+#define CG_SPLL_FUNC_CNTL_3 0x182
+#define SPLL_FB_DIV(x) ((x) << 0)
+#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
+#define SPLL_FB_DIV_SHIFT 0
+#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_FUNC_CNTL_4 0x183
+
+#define SPLL_STATUS 0x185
+#define SPLL_CHG_STATUS (1 << 1)
+#define SPLL_CNTL_MODE 0x186
+#define SPLL_SW_DIR_CONTROL (1 << 0)
+# define SPLL_REFCLK_SEL(x) ((x) << 26)
+# define SPLL_REFCLK_SEL_MASK (3 << 26)
+
+#define CG_SPLL_SPREAD_SPECTRUM 0x188
+#define SSEN (1 << 0)
+#define CLK_S(x) ((x) << 4)
+#define CLK_S_MASK (0xfff << 4)
+#define CLK_S_SHIFT 4
+#define CG_SPLL_SPREAD_SPECTRUM_2 0x189
+#define CLK_V(x) ((x) << 0)
+#define CLK_V_MASK (0x3ffffff << 0)
+#define CLK_V_SHIFT 0
+
+#define CG_SPLL_AUTOSCALE_CNTL 0x18b
+# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL 0x18d
+# define UPLL_RESET_MASK 0x00000001
+# define UPLL_SLEEP_MASK 0x00000002
+# define UPLL_BYPASS_EN_MASK 0x00000004
+# define UPLL_CTLREQ_MASK 0x00000008
+# define UPLL_VCO_MODE_MASK 0x00000600
+# define UPLL_REF_DIV_MASK 0x003F0000
+# define UPLL_CTLACK_MASK 0x40000000
+# define UPLL_CTLACK2_MASK 0x80000000
+#define CG_UPLL_FUNC_CNTL_2 0x18e
+# define UPLL_PDIV_A(x) ((x) << 0)
+# define UPLL_PDIV_A_MASK 0x0000007F
+# define UPLL_PDIV_B(x) ((x) << 8)
+# define UPLL_PDIV_B_MASK 0x00007F00
+# define VCLK_SRC_SEL(x) ((x) << 20)
+# define VCLK_SRC_SEL_MASK 0x01F00000
+# define DCLK_SRC_SEL(x) ((x) << 25)
+# define DCLK_SRC_SEL_MASK 0x3E000000
+#define CG_UPLL_FUNC_CNTL_3 0x18f
+# define UPLL_FB_DIV(x) ((x) << 0)
+# define UPLL_FB_DIV_MASK 0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4 0x191
+# define UPLL_SPARE_ISPARE9 0x00020000
+#define CG_UPLL_FUNC_CNTL_5 0x192
+# define RESET_ANTI_MUX_MASK 0x00000200
+#define CG_UPLL_SPREAD_SPECTRUM 0x194
+# define SSEN_MASK 0x00000001
+
+#define MPLL_BYPASSCLK_SEL 0x197
+# define MPLL_CLKOUT_SEL(x) ((x) << 8)
+# define MPLL_CLKOUT_SEL_MASK 0xFF00
+
+#define CG_CLKPIN_CNTL 0x198
+# define XTALIN_DIVIDE (1 << 1)
+# define BCLK_AS_XCLK (1 << 2)
+#define CG_CLKPIN_CNTL_2 0x199
+# define FORCE_BIF_REFCLK_EN (1 << 3)
+# define MUX_TCLK_TO_XCLK (1 << 8)
+
+#define THM_CLK_CNTL 0x19b
+# define CMON_CLK_SEL(x) ((x) << 0)
+# define CMON_CLK_SEL_MASK 0xFF
+# define TMON_CLK_SEL(x) ((x) << 8)
+# define TMON_CLK_SEL_MASK 0xFF00
+#define MISC_CLK_CNTL 0x19c
+# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
+# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
+# define ZCLK_SEL(x) ((x) << 8)
+# define ZCLK_SEL_MASK 0xFF00
+
+#define CG_THERMAL_CTRL 0x1c0
+#define DPM_EVENT_SRC(x) ((x) << 0)
+#define DPM_EVENT_SRC_MASK (7 << 0)
+#define DIG_THERM_DPM(x) ((x) << 14)
+#define DIG_THERM_DPM_MASK 0x003FC000
+#define DIG_THERM_DPM_SHIFT 14
+#define CG_THERMAL_STATUS 0x1c1
+#define FDO_PWM_DUTY(x) ((x) << 9)
+#define FDO_PWM_DUTY_MASK (0xff << 9)
+#define FDO_PWM_DUTY_SHIFT 9
+#define CG_THERMAL_INT 0x1c2
+#define DIG_THERM_INTH(x) ((x) << 8)
+#define DIG_THERM_INTH_MASK 0x0000FF00
+#define DIG_THERM_INTH_SHIFT 8
+#define DIG_THERM_INTL(x) ((x) << 16)
+#define DIG_THERM_INTL_MASK 0x00FF0000
+#define DIG_THERM_INTL_SHIFT 16
+#define THERM_INT_MASK_HIGH (1 << 24)
+#define THERM_INT_MASK_LOW (1 << 25)
+
+#define CG_MULT_THERMAL_CTRL 0x1c4
+#define TEMP_SEL(x) ((x) << 20)
+#define TEMP_SEL_MASK (0xff << 20)
+#define TEMP_SEL_SHIFT 20
+#define CG_MULT_THERMAL_STATUS 0x1c5
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define CG_FDO_CTRL0 0x1d5
+#define FDO_STATIC_DUTY(x) ((x) << 0)
+#define FDO_STATIC_DUTY_MASK 0x000000FF
+#define FDO_STATIC_DUTY_SHIFT 0
+#define CG_FDO_CTRL1 0x1d6
+#define FMAX_DUTY100(x) ((x) << 0)
+#define FMAX_DUTY100_MASK 0x000000FF
+#define FMAX_DUTY100_SHIFT 0
+#define CG_FDO_CTRL2 0x1d7
+#define TMIN(x) ((x) << 0)
+#define TMIN_MASK 0x000000FF
+#define TMIN_SHIFT 0
+#define FDO_PWM_MODE(x) ((x) << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
+#define FDO_PWM_MODE_SHIFT 11
+#define TACH_PWM_RESP_RATE(x) ((x) << 25)
+#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
+#define TACH_PWM_RESP_RATE_SHIFT 25
+
+#define CG_TACH_CTRL 0x1dc
+# define EDGE_PER_REV(x) ((x) << 0)
+# define EDGE_PER_REV_MASK (0x7 << 0)
+# define EDGE_PER_REV_SHIFT 0
+# define TARGET_PERIOD(x) ((x) << 3)
+# define TARGET_PERIOD_MASK 0xfffffff8
+# define TARGET_PERIOD_SHIFT 3
+#define CG_TACH_STATUS 0x1dd
+# define TACH_PERIOD(x) ((x) << 0)
+# define TACH_PERIOD_MASK 0xffffffff
+# define TACH_PERIOD_SHIFT 0
+
+#define GENERAL_PWRMGT 0x1e0
+# define GLOBAL_PWRMGT_EN (1 << 0)
+# define STATIC_PM_EN (1 << 1)
+# define THERMAL_PROTECTION_DIS (1 << 2)
+# define THERMAL_PROTECTION_TYPE (1 << 3)
+# define SW_SMIO_INDEX(x) ((x) << 6)
+# define SW_SMIO_INDEX_MASK (1 << 6)
+# define SW_SMIO_INDEX_SHIFT 6
+# define VOLT_PWRMGT_EN (1 << 10)
+# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
+#define CG_TPC 0x1e1
+#define SCLK_PWRMGT_CNTL 0x1e2
+# define SCLK_PWRMGT_OFF (1 << 0)
+# define SCLK_LOW_D1 (1 << 1)
+# define FIR_RESET (1 << 4)
+# define FIR_FORCE_TREND_SEL (1 << 5)
+# define FIR_TREND_MODE (1 << 6)
+# define DYN_GFX_CLK_OFF_EN (1 << 7)
+# define GFX_CLK_FORCE_ON (1 << 8)
+# define GFX_CLK_REQUEST_OFF (1 << 9)
+# define GFX_CLK_FORCE_OFF (1 << 10)
+# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
+# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
+# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
+# define DYN_LIGHT_SLEEP_EN (1 << 14)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x1e6
+# define CURRENT_STATE_INDEX_MASK (0xf << 4)
+# define CURRENT_STATE_INDEX_SHIFT 4
+
+#define CG_FTV 0x1ef
+
+#define CG_FFCT_0 0x1f0
+# define UTC_0(x) ((x) << 0)
+# define UTC_0_MASK (0x3ff << 0)
+# define DTC_0(x) ((x) << 10)
+# define DTC_0_MASK (0x3ff << 10)
+
+#define CG_BSP 0x1ff
+# define BSP(x) ((x) << 0)
+# define BSP_MASK (0xffff << 0)
+# define BSU(x) ((x) << 16)
+# define BSU_MASK (0xf << 16)
+#define CG_AT 0x200
+# define CG_R(x) ((x) << 0)
+# define CG_R_MASK (0xffff << 0)
+# define CG_L(x) ((x) << 16)
+# define CG_L_MASK (0xffff << 16)
+
+#define CG_GIT 0x201
+# define CG_GICST(x) ((x) << 0)
+# define CG_GICST_MASK (0xffff << 0)
+# define CG_GIPOT(x) ((x) << 16)
+# define CG_GIPOT_MASK (0xffff << 16)
+
+#define CG_SSP 0x203
+# define SST(x) ((x) << 0)
+# define SST_MASK (0xffff << 0)
+# define SSTU(x) ((x) << 16)
+# define SSTU_MASK (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL 0x20a
+# define DISP1_GAP(x) ((x) << 0)
+# define DISP1_GAP_MASK (3 << 0)
+# define DISP2_GAP(x) ((x) << 2)
+# define DISP2_GAP_MASK (3 << 2)
+# define VBI_TIMER_COUNT(x) ((x) << 4)
+# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
+# define VBI_TIMER_UNIT(x) ((x) << 20)
+# define VBI_TIMER_UNIT_MASK (7 << 20)
+# define DISP1_GAP_MCHG(x) ((x) << 24)
+# define DISP1_GAP_MCHG_MASK (3 << 24)
+# define DISP2_GAP_MCHG(x) ((x) << 26)
+# define DISP2_GAP_MCHG_MASK (3 << 26)
+
+#define CG_ULV_CONTROL 0x21e
+#define CG_ULV_PARAMETER 0x21f
+
+#define SMC_SCRATCH0 0x221
+
+#define CG_CAC_CTRL 0x22e
+# define CAC_WINDOW(x) ((x) << 0)
+# define CAC_WINDOW_MASK 0x00ffffff
+
+#define DMIF_ADDR_CONFIG 0x2F5
+
+#define DMIF_ADDR_CALC 0x300
+
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0328
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
+#define SRBM_STATUS 0x394
+#define GRBM_RQ_PENDING (1 << 5)
+#define VMC_BUSY (1 << 8)
+#define MCB_BUSY (1 << 9)
+#define MCB_NON_DISPLAY_BUSY (1 << 10)
+#define MCC_BUSY (1 << 11)
+#define MCD_BUSY (1 << 12)
+#define SEM_BUSY (1 << 14)
+#define IH_BUSY (1 << 17)
+
+#define SRBM_SOFT_RESET 0x398
+#define SOFT_RESET_BIF (1 << 1)
+#define SOFT_RESET_DC (1 << 5)
+#define SOFT_RESET_DMA1 (1 << 6)
+#define SOFT_RESET_GRBM (1 << 8)
+#define SOFT_RESET_HDP (1 << 9)
+#define SOFT_RESET_IH (1 << 10)
+#define SOFT_RESET_MC (1 << 11)
+#define SOFT_RESET_ROM (1 << 14)
+#define SOFT_RESET_SEM (1 << 15)
+#define SOFT_RESET_VMC (1 << 17)
+#define SOFT_RESET_DMA (1 << 20)
+#define SOFT_RESET_TST (1 << 21)
+#define SOFT_RESET_REGBB (1 << 22)
+#define SOFT_RESET_ORB (1 << 23)
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3A0
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3A1
+
+#define SRBM_READ_ERROR 0x3A6
+#define SRBM_INT_CNTL 0x3A8
+#define SRBM_INT_ACK 0x3AA
+
+#define SRBM_STATUS2 0x3B1
+#define DMA_BUSY (1 << 5)
+#define DMA1_BUSY (1 << 6)
+
+#define VM_L2_CNTL 0x500
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
+#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
+#define VM_L2_CNTL2 0x501
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
+#define INVALIDATE_PTE_AND_PDE_CACHES 0
+#define INVALIDATE_ONLY_PTE_CACHES 1
+#define INVALIDATE_ONLY_PDE_CACHES 2
+#define VM_L2_CNTL3 0x502
+#define BANK_SELECT(x) ((x) << 0)
+#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define VM_L2_STATUS 0x503
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x504
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
+#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
+#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
+#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
+#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
+#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
+#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
+#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
+#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
+#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
+#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
+#define VM_CONTEXT1_CNTL 0x505
+#define VM_CONTEXT0_CNTL2 0x50C
+#define VM_CONTEXT1_CNTL2 0x50D
+#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x50E
+#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x50F
+#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x510
+#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x511
+#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x512
+#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x513
+#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x514
+#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x515
+
+#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x53f
+#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x537
+#define PROTECTIONS_MASK (0xf << 0)
+#define PROTECTIONS_SHIFT 0
+ /* bit 0: range
+ * bit 1: pde0
+ * bit 2: valid
+ * bit 3: read
+ * bit 4: write
+ */
+#define MEMORY_CLIENT_ID_MASK (0xff << 12)
+#define MEMORY_CLIENT_ID_SHIFT 12
+#define MEMORY_CLIENT_RW_MASK (1 << 24)
+#define MEMORY_CLIENT_RW_SHIFT 24
+#define FAULT_VMID_MASK (0xf << 25)
+#define FAULT_VMID_SHIFT 25
+
+#define VM_INVALIDATE_REQUEST 0x51E
+#define VM_INVALIDATE_RESPONSE 0x51F
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x546
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x547
+
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54F
+#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x550
+#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x551
+#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x552
+#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x553
+#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x554
+#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x555
+#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x556
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x557
+#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x558
+
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x55F
+#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x560
+
+#define VM_L2_CG 0x570
+#define MC_CG_ENABLE (1 << 18)
+#define MC_LS_ENABLE (1 << 19)
+
+#define MC_SHARED_CHMAP 0x801
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x0000f000
+#define MC_SHARED_CHREMAP 0x802
+
+#define MC_VM_FB_LOCATION 0x809
+#define MC_VM_AGP_TOP 0x80A
+#define MC_VM_AGP_BOT 0x80B
+#define MC_VM_AGP_BASE 0x80C
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x80D
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x80E
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x80F
+
+#define MC_VM_MX_L1_TLB_CNTL 0x819
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL 0x82B
+
+#define MC_HUB_MISC_HUB_CG 0x82E
+#define MC_HUB_MISC_VM_CG 0x82F
+
+#define MC_HUB_MISC_SIP_CG 0x830
+
+#define MC_XPB_CLK_GAT 0x91E
+
+#define MC_CITF_MISC_RD_CG 0x992
+#define MC_CITF_MISC_WR_CG 0x993
+#define MC_CITF_MISC_VM_CG 0x994
+
+#define MC_ARB_RAMCFG 0x9D8
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define CHANSIZE_OVERRIDE (1 << 11)
+#define NOOFGROUPS_SHIFT 12
+#define NOOFGROUPS_MASK 0x00001000
+
+#define MC_ARB_DRAM_TIMING 0x9DD
+#define MC_ARB_DRAM_TIMING2 0x9DE
+
+#define MC_ARB_BURST_TIME 0xA02
+#define STATE0(x) ((x) << 0)
+#define STATE0_MASK (0x1f << 0)
+#define STATE0_SHIFT 0
+#define STATE1(x) ((x) << 5)
+#define STATE1_MASK (0x1f << 5)
+#define STATE1_SHIFT 5
+#define STATE2(x) ((x) << 10)
+#define STATE2_MASK (0x1f << 10)
+#define STATE2_SHIFT 10
+#define STATE3(x) ((x) << 15)
+#define STATE3_MASK (0x1f << 15)
+#define STATE3_SHIFT 15
+
+#define MC_SEQ_TRAIN_WAKEUP_CNTL 0xA3A
+#define TRAIN_DONE_D0 (1 << 30)
+#define TRAIN_DONE_D1 (1 << 31)
+
+#define MC_SEQ_SUP_CNTL 0xA32
+#define RUN_MASK (1 << 0)
+#define MC_SEQ_SUP_PGM 0xA33
+#define MC_PMG_AUTO_CMD 0xA34
+
+#define MC_IO_PAD_CNTL_D0 0xA74
+#define MEM_FALL_OUT_CMD (1 << 8)
+
+#define MC_SEQ_RAS_TIMING 0xA28
+#define MC_SEQ_CAS_TIMING 0xA29
+#define MC_SEQ_MISC_TIMING 0xA2A
+#define MC_SEQ_MISC_TIMING2 0xA2B
+#define MC_SEQ_PMG_TIMING 0xA2C
+#define MC_SEQ_RD_CTL_D0 0xA2D
+#define MC_SEQ_RD_CTL_D1 0xA2E
+#define MC_SEQ_WR_CTL_D0 0xA2F
+#define MC_SEQ_WR_CTL_D1 0xA30
+
+#define MC_SEQ_MISC0 0xA80
+#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
+#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
+#define MC_SEQ_MISC0_VEN_ID_VALUE 3
+#define MC_SEQ_MISC0_REV_ID_SHIFT 12
+#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
+#define MC_SEQ_MISC0_REV_ID_VALUE 1
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_MISC1 0xA81
+#define MC_SEQ_RESERVE_M 0xA82
+#define MC_PMG_CMD_EMRS 0xA83
+
+#define MC_SEQ_IO_DEBUG_INDEX 0xA91
+#define MC_SEQ_IO_DEBUG_DATA 0xA92
+
+#define MC_SEQ_MISC5 0xA95
+#define MC_SEQ_MISC6 0xA96
+
+#define MC_SEQ_MISC7 0xA99
+
+#define MC_SEQ_RAS_TIMING_LP 0xA9B
+#define MC_SEQ_CAS_TIMING_LP 0xA9C
+#define MC_SEQ_MISC_TIMING_LP 0xA9D
+#define MC_SEQ_MISC_TIMING2_LP 0xA9E
+#define MC_SEQ_WR_CTL_D0_LP 0xA9F
+#define MC_SEQ_WR_CTL_D1_LP 0xAA0
+#define MC_SEQ_PMG_CMD_EMRS_LP 0xAA1
+#define MC_SEQ_PMG_CMD_MRS_LP 0xAA2
+
+#define MC_PMG_CMD_MRS 0xAAB
+
+#define MC_SEQ_RD_CTL_D0_LP 0xAC7
+#define MC_SEQ_RD_CTL_D1_LP 0xAC8
+
+#define MC_PMG_CMD_MRS1 0xAD1
+#define MC_SEQ_PMG_CMD_MRS1_LP 0xAD2
+#define MC_SEQ_PMG_TIMING_LP 0xAD3
+
+#define MC_SEQ_WR_CTL_2 0xAD5
+#define MC_SEQ_WR_CTL_2_LP 0xAD6
+#define MC_PMG_CMD_MRS2 0xAD7
+#define MC_SEQ_PMG_CMD_MRS2_LP 0xAD8
+
+#define MCLK_PWRMGT_CNTL 0xAE8
+# define DLL_SPEED(x) ((x) << 0)
+# define DLL_SPEED_MASK (0x1f << 0)
+# define DLL_READY (1 << 6)
+# define MC_INT_CNTL (1 << 7)
+# define MRDCK0_PDNB (1 << 8)
+# define MRDCK1_PDNB (1 << 9)
+# define MRDCK0_RESET (1 << 16)
+# define MRDCK1_RESET (1 << 17)
+# define DLL_READY_READ (1 << 24)
+#define DLL_CNTL 0xAE9
+# define MRDCK0_BYPASS (1 << 24)
+# define MRDCK1_BYPASS (1 << 25)
+
+#define MPLL_CNTL_MODE 0xAEC
+# define MPLL_MCLK_SEL (1 << 11)
+#define MPLL_FUNC_CNTL 0xAED
+#define BWCTRL(x) ((x) << 20)
+#define BWCTRL_MASK (0xff << 20)
+#define MPLL_FUNC_CNTL_1 0xAEE
+#define VCO_MODE(x) ((x) << 0)
+#define VCO_MODE_MASK (3 << 0)
+#define CLKFRAC(x) ((x) << 4)
+#define CLKFRAC_MASK (0xfff << 4)
+#define CLKF(x) ((x) << 16)
+#define CLKF_MASK (0xfff << 16)
+#define MPLL_FUNC_CNTL_2 0xAEF
+#define MPLL_AD_FUNC_CNTL 0xAF0
+#define YCLK_POST_DIV(x) ((x) << 0)
+#define YCLK_POST_DIV_MASK (7 << 0)
+#define MPLL_DQ_FUNC_CNTL 0xAF1
+#define YCLK_SEL(x) ((x) << 4)
+#define YCLK_SEL_MASK (1 << 4)
+
+#define MPLL_SS1 0xAF3
+#define CLKV(x) ((x) << 0)
+#define CLKV_MASK (0x3ffffff << 0)
+#define MPLL_SS2 0xAF4
+#define CLKS(x) ((x) << 0)
+#define CLKS_MASK (0xfff << 0)
+
+#define HDP_HOST_PATH_CNTL 0xB00
+#define CLOCK_GATING_DIS (1 << 23)
+#define HDP_NONSURFACE_BASE 0xB01
+#define HDP_NONSURFACE_INFO 0xB02
+#define HDP_NONSURFACE_SIZE 0xB03
+
+#define HDP_DEBUG0 0xBCC
+
+#define HDP_ADDR_CONFIG 0xBD2
+#define HDP_MISC_CNTL 0xBD3
+#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0xBD4
+#define HDP_LS_ENABLE (1 << 0)
+
+#define ATC_MISC_CG 0xCD4
+
+#define IH_RB_CNTL 0xF80
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0xF81
+#define IH_RB_RPTR 0xF82
+#define IH_RB_WPTR 0xF83
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0xF84
+#define IH_RB_WPTR_ADDR_LO 0xF85
+#define IH_CNTL 0xF86
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 1)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+# define MC_VMID(x) ((x) << 25)
+
+#define CONFIG_MEMSIZE 0x150A
+
+#define INTERRUPT_CNTL 0x151A
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x151B
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x1520
+
+#define BIF_FB_EN 0x1524
+#define FB_READ_EN (1 << 0)
+#define FB_WRITE_EN (1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x1528
+
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX 0x1780
+# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
+# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA 0x1781
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
+# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
+# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0 = invalid
+ * x = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
+# define HBR_CAPABLE (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
+# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
+# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
+# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
+# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
+# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
+# define DESCRIPTION0(x) (((x) & 0xff) << 0)
+# define DESCRIPTION1(x) (((x) & 0xff) << 8)
+# define DESCRIPTION2(x) (((x) & 0xff) << 16)
+# define DESCRIPTION3(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
+# define DESCRIPTION4(x) (((x) & 0xff) << 0)
+# define DESCRIPTION5(x) (((x) & 0xff) << 8)
+# define DESCRIPTION6(x) (((x) & 0xff) << 16)
+# define DESCRIPTION7(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
+# define DESCRIPTION8(x) (((x) & 0xff) << 0)
+# define DESCRIPTION9(x) (((x) & 0xff) << 8)
+# define DESCRIPTION10(x) (((x) & 0xff) << 16)
+# define DESCRIPTION11(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
+# define DESCRIPTION12(x) (((x) & 0xff) << 0)
+# define DESCRIPTION13(x) (((x) & 0xff) << 8)
+# define DESCRIPTION14(x) (((x) & 0xff) << 16)
+# define DESCRIPTION15(x) (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
+# define DESCRIPTION16(x) (((x) & 0xff) << 0)
+# define DESCRIPTION17(x) (((x) & 0xff) << 8)
+
+#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
+# define AUDIO_ENABLED (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define PORT_CONNECTIVITY_MASK (3 << 30)
+#define PORT_CONNECTIVITY_SHIFT 30
+
+#define DC_LB_MEMORY_SPLIT 0x1AC3
+#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
+
+#define PRIORITY_A_CNT 0x1AC6
+#define PRIORITY_MARK_MASK 0x7fff
+#define PRIORITY_OFF (1 << 16)
+#define PRIORITY_ALWAYS_ON (1 << 20)
+#define PRIORITY_B_CNT 0x1AC7
+
+#define DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
+#define DPG_PIPE_LATENCY_CONTROL 0x1B33
+# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS 0x1AEE
+# define VLINE_OCCURRED (1 << 0)
+# define VLINE_ACK (1 << 4)
+# define VLINE_STAT (1 << 12)
+# define VLINE_INTERRUPT (1 << 16)
+# define VLINE_INTERRUPT_TYPE (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS 0x1AEF
+# define VBLANK_OCCURRED (1 << 0)
+# define VBLANK_ACK (1 << 4)
+# define VBLANK_STAT (1 << 12)
+# define VBLANK_INTERRUPT (1 << 16)
+# define VBLANK_INTERRUPT_TYPE (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK 0x1AD0
+# define VBLANK_INT_MASK (1 << 0)
+# define VLINE_INT_MASK (1 << 4)
+
+#define DISP_INTERRUPT_STATUS 0x183D
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D1_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD1_INTERRUPT (1 << 17)
+# define DC_HPD1_RX_INTERRUPT (1 << 18)
+# define DACA_AUTODETECT_INTERRUPT (1 << 22)
+# define DACB_AUTODETECT_INTERRUPT (1 << 23)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x183E
+# define LB_D2_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD2_INTERRUPT (1 << 17)
+# define DC_HPD2_RX_INTERRUPT (1 << 18)
+# define DISP_TIMER_INTERRUPT (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2 0x183F
+# define LB_D3_VLINE_INTERRUPT (1 << 2)
+# define LB_D3_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD3_INTERRUPT (1 << 17)
+# define DC_HPD3_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3 0x1840
+# define LB_D4_VLINE_INTERRUPT (1 << 2)
+# define LB_D4_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD4_INTERRUPT (1 << 17)
+# define DC_HPD4_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4 0x1853
+# define LB_D5_VLINE_INTERRUPT (1 << 2)
+# define LB_D5_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD5_INTERRUPT (1 << 17)
+# define DC_HPD5_RX_INTERRUPT (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x1854
+# define LB_D6_VLINE_INTERRUPT (1 << 2)
+# define LB_D6_VBLANK_INTERRUPT (1 << 3)
+# define DC_HPD6_INTERRUPT (1 << 17)
+# define DC_HPD6_RX_INTERRUPT (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x1A16
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x1A17
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
+#define DAC_AUTODETECT_INT_CONTROL 0x19F2
+
+#define DC_HPD1_INT_STATUS 0x1807
+#define DC_HPD2_INT_STATUS 0x180A
+#define DC_HPD3_INT_STATUS 0x180D
+#define DC_HPD4_INT_STATUS 0x1810
+#define DC_HPD5_INT_STATUS 0x1813
+#define DC_HPD6_INT_STATUS 0x1816
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HPD1_INT_CONTROL 0x1808
+#define DC_HPD2_INT_CONTROL 0x180B
+#define DC_HPD3_INT_CONTROL 0x180E
+#define DC_HPD4_INT_CONTROL 0x1811
+#define DC_HPD5_INT_CONTROL 0x1814
+#define DC_HPD6_INT_CONTROL 0x1817
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+#define DC_HPD1_CONTROL 0x1809
+#define DC_HPD2_CONTROL 0x180C
+#define DC_HPD3_CONTROL 0x180F
+#define DC_HPD4_CONTROL 0x1812
+#define DC_HPD5_CONTROL 0x1815
+#define DC_HPD6_CONTROL 0x1818
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+# define DC_HPDx_EN (1 << 28)
+
+#define DPG_PIPE_STUTTER_CONTROL 0x1B35
+# define STUTTER_ENABLE (1 << 0)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT 0x1BA6
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+
+#define AFMT_AUDIO_SRC_CONTROL 0x1c4f
+#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
+#define GRBM_CNTL 0x2000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+
+#define GRBM_STATUS2 0x2002
+#define RLC_RQ_PENDING (1 << 0)
+#define RLC_BUSY (1 << 8)
+#define TC_BUSY (1 << 9)
+
+#define GRBM_STATUS 0x2004
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SPI_BUSY (1 << 22)
+#define BCI_BUSY (1 << 23)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x2005
+#define GRBM_STATUS_SE1 0x2006
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_BCI_BUSY (1 << 22)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+
+#define GRBM_SOFT_RESET 0x2008
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_RLC (1 << 2)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_BCI (1 << 7)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define GRBM_GFX_INDEX 0x200B
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
+#define GRBM_INT_CNTL 0x2018
+# define RDERR_INT_ENABLE (1 << 0)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define CP_STRMOUT_CNTL 0x213F
+#define SCRATCH_REG0 0x2140
+#define SCRATCH_REG1 0x2141
+#define SCRATCH_REG2 0x2142
+#define SCRATCH_REG3 0x2143
+#define SCRATCH_REG4 0x2144
+#define SCRATCH_REG5 0x2145
+#define SCRATCH_REG6 0x2146
+#define SCRATCH_REG7 0x2147
+
+#define SCRATCH_UMSK 0x2150
+#define SCRATCH_ADDR 0x2151
+
+#define CP_SEM_WAIT_TIMER 0x216F
+
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x2172
+
+#define CP_ME_CNTL 0x21B6
+#define CP_CE_HALT (1 << 24)
+#define CP_PFP_HALT (1 << 26)
+#define CP_ME_HALT (1 << 28)
+
+#define CP_COHER_CNTL2 0x217A
+
+#define CP_RB2_RPTR 0x21BE
+#define CP_RB1_RPTR 0x21BF
+#define CP_RB0_RPTR 0x21C0
+#define CP_RB_WPTR_DELAY 0x21C1
+
+#define CP_QUEUE_THRESHOLDS 0x21D8
+#define ROQ_IB1_START(x) ((x) << 0)
+#define ROQ_IB2_START(x) ((x) << 8)
+#define CP_MEQ_THRESHOLDS 0x21D9
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+
+#define CP_PERFMON_CNTL 0x21FF
+
+#define VGT_VTX_VECT_EJECT_REG 0x222C
+
+#define VGT_CACHE_INVALIDATION 0x2231
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_ESGS_RING_SIZE 0x2232
+#define VGT_GSVS_RING_SIZE 0x2233
+
+#define VGT_GS_VERTEX_REUSE 0x2235
+
+#define VGT_PRIMITIVE_TYPE 0x2256
+#define VGT_INDEX_TYPE 0x2257
+
+#define VGT_NUM_INDICES 0x225C
+#define VGT_NUM_INSTANCES 0x225D
+
+#define VGT_TF_RING_SIZE 0x2262
+
+#define VGT_HS_OFFCHIP_PARAM 0x226C
+
+#define VGT_TF_MEMORY_BASE 0x226E
+
+#define CC_GC_SHADER_ARRAY_CONFIG 0x226F
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
+#define GC_USER_SHADER_ARRAY_CONFIG 0x2270
+
+#define PA_CL_ENHANCE 0x2285
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+
+#define PA_SU_LINE_STIPPLE_VALUE 0x2298
+
+#define PA_SC_LINE_STIPPLE_STATE 0x22C4
+
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x22C9
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define PA_SC_FIFO_SIZE 0x22F3
+#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
+
+#define PA_SC_ENHANCE 0x22FC
+
+#define SQ_CONFIG 0x2300
+
+#define SQC_CACHES 0x2302
+
+#define SQ_POWER_THROTTLE 0x2396
+#define MIN_POWER(x) ((x) << 0)
+#define MIN_POWER_MASK (0x3fff << 0)
+#define MIN_POWER_SHIFT 0
+#define MAX_POWER(x) ((x) << 16)
+#define MAX_POWER_MASK (0x3fff << 16)
+#define MAX_POWER_SHIFT 0
+#define SQ_POWER_THROTTLE2 0x2397
+#define MAX_POWER_DELTA(x) ((x) << 0)
+#define MAX_POWER_DELTA_MASK (0x3fff << 0)
+#define MAX_POWER_DELTA_SHIFT 0
+#define STI_SIZE(x) ((x) << 16)
+#define STI_SIZE_MASK (0x3ff << 16)
+#define STI_SIZE_SHIFT 16
+#define LTI_RATIO(x) ((x) << 27)
+#define LTI_RATIO_MASK (0xf << 27)
+#define LTI_RATIO_SHIFT 27
+
+#define SX_DEBUG_1 0x2418
+
+#define SPI_STATIC_THREAD_MGMT_1 0x2438
+#define SPI_STATIC_THREAD_MGMT_2 0x2439
+#define SPI_STATIC_THREAD_MGMT_3 0x243A
+#define SPI_PS_MAX_WAVE_ID 0x243B
+
+#define SPI_CONFIG_CNTL 0x2440
+
+#define SPI_CONFIG_CNTL_1 0x244F
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+
+#define CGTS_TCC_DISABLE 0x2452
+#define CGTS_USER_TCC_DISABLE 0x2453
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x2454
+#define OVERRIDE (1 << 21)
+#define LS_OVERRIDE (1 << 22)
+
+#define SPI_LB_CU_MASK 0x24D5
+
+#define TA_CNTL_AUX 0x2542
+
+#define CC_RB_BACKEND_DISABLE 0x263D
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x263E
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000000
+#define ROW_SIZE_SHIFT 28
+
+#define GB_TILE_MODE0 0x2644
+# define MICRO_TILE_MODE(x) ((x) << 0)
+# define ADDR_SURF_DISPLAY_MICRO_TILING 0
+# define ADDR_SURF_THIN_MICRO_TILING 1
+# define ADDR_SURF_DEPTH_MICRO_TILING 2
+# define ARRAY_MODE(x) ((x) << 2)
+# define ARRAY_LINEAR_GENERAL 0
+# define ARRAY_LINEAR_ALIGNED 1
+# define ARRAY_1D_TILED_THIN1 2
+# define ARRAY_2D_TILED_THIN1 4
+# define PIPE_CONFIG(x) ((x) << 6)
+# define ADDR_SURF_P2 0
+# define ADDR_SURF_P4_8x16 4
+# define ADDR_SURF_P4_16x16 5
+# define ADDR_SURF_P4_16x32 6
+# define ADDR_SURF_P4_32x32 7
+# define ADDR_SURF_P8_16x16_8x16 8
+# define ADDR_SURF_P8_16x32_8x16 9
+# define ADDR_SURF_P8_32x32_8x16 10
+# define ADDR_SURF_P8_16x32_16x16 11
+# define ADDR_SURF_P8_32x32_16x16 12
+# define ADDR_SURF_P8_32x32_16x32 13
+# define ADDR_SURF_P8_32x64_32x32 14
+# define TILE_SPLIT(x) ((x) << 11)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define BANK_WIDTH(x) ((x) << 14)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define BANK_HEIGHT(x) ((x) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
+# define MACRO_TILE_ASPECT(x) ((x) << 18)
+# define ADDR_SURF_MACRO_ASPECT_1 0
+# define ADDR_SURF_MACRO_ASPECT_2 1
+# define ADDR_SURF_MACRO_ASPECT_4 2
+# define ADDR_SURF_MACRO_ASPECT_8 3
+# define NUM_BANKS(x) ((x) << 20)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+#define GB_TILE_MODE1 0x2645
+#define GB_TILE_MODE2 0x2646
+#define GB_TILE_MODE3 0x2647
+#define GB_TILE_MODE4 0x2648
+#define GB_TILE_MODE5 0x2649
+#define GB_TILE_MODE6 0x264a
+#define GB_TILE_MODE7 0x264b
+#define GB_TILE_MODE8 0x264c
+#define GB_TILE_MODE9 0x264d
+#define GB_TILE_MODE10 0x264e
+#define GB_TILE_MODE11 0x264f
+#define GB_TILE_MODE12 0x2650
+#define GB_TILE_MODE13 0x2651
+#define GB_TILE_MODE14 0x2652
+#define GB_TILE_MODE15 0x2653
+#define GB_TILE_MODE16 0x2654
+#define GB_TILE_MODE17 0x2655
+#define GB_TILE_MODE18 0x2656
+#define GB_TILE_MODE19 0x2657
+#define GB_TILE_MODE20 0x2658
+#define GB_TILE_MODE21 0x2659
+#define GB_TILE_MODE22 0x265a
+#define GB_TILE_MODE23 0x265b
+#define GB_TILE_MODE24 0x265c
+#define GB_TILE_MODE25 0x265d
+#define GB_TILE_MODE26 0x265e
+#define GB_TILE_MODE27 0x265f
+#define GB_TILE_MODE28 0x2660
+#define GB_TILE_MODE29 0x2661
+#define GB_TILE_MODE30 0x2662
+#define GB_TILE_MODE31 0x2663
+
+#define CB_PERFCOUNTER0_SELECT0 0x2688
+#define CB_PERFCOUNTER0_SELECT1 0x2689
+#define CB_PERFCOUNTER1_SELECT0 0x268A
+#define CB_PERFCOUNTER1_SELECT1 0x268B
+#define CB_PERFCOUNTER2_SELECT0 0x268C
+#define CB_PERFCOUNTER2_SELECT1 0x268D
+#define CB_PERFCOUNTER3_SELECT0 0x268E
+#define CB_PERFCOUNTER3_SELECT1 0x268F
+
+#define CB_CGTT_SCLK_CTRL 0x2698
+
+#define GC_USER_RB_BACKEND_DISABLE 0x26DF
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define TCP_CHAN_STEER_LO 0x2B03
+#define TCP_CHAN_STEER_HI 0x2B94
+
+#define CP_RB0_BASE 0x3040
+#define CP_RB0_CNTL 0x3041
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define BUF_SWAP_32BIT (2 << 16)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+
+#define CP_RB0_RPTR_ADDR 0x3043
+#define CP_RB0_RPTR_ADDR_HI 0x3044
+#define CP_RB0_WPTR 0x3045
+
+#define CP_PFP_UCODE_ADDR 0x3054
+#define CP_PFP_UCODE_DATA 0x3055
+#define CP_ME_RAM_RADDR 0x3056
+#define CP_ME_RAM_WADDR 0x3057
+#define CP_ME_RAM_DATA 0x3058
+
+#define CP_CE_UCODE_ADDR 0x305A
+#define CP_CE_UCODE_DATA 0x305B
+
+#define CP_RB1_BASE 0x3060
+#define CP_RB1_CNTL 0x3061
+#define CP_RB1_RPTR_ADDR 0x3062
+#define CP_RB1_RPTR_ADDR_HI 0x3063
+#define CP_RB1_WPTR 0x3064
+#define CP_RB2_BASE 0x3065
+#define CP_RB2_CNTL 0x3066
+#define CP_RB2_RPTR_ADDR 0x3067
+#define CP_RB2_RPTR_ADDR_HI 0x3068
+#define CP_RB2_WPTR 0x3069
+#define CP_INT_CNTL_RING0 0x306A
+#define CP_INT_CNTL_RING1 0x306B
+#define CP_INT_CNTL_RING2 0x306C
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define CP_RINGID2_INT_ENABLE (1 << 29)
+# define CP_RINGID1_INT_ENABLE (1 << 30)
+# define CP_RINGID0_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS_RING0 0x306D
+#define CP_INT_STATUS_RING1 0x306E
+#define CP_INT_STATUS_RING2 0x306F
+# define WAIT_MEM_SEM_INT_STAT (1 << 21)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define CP_RINGID2_INT_STAT (1 << 29)
+# define CP_RINGID1_INT_STAT (1 << 30)
+# define CP_RINGID0_INT_STAT (1 << 31)
+
+#define CP_MEM_SLP_CNTL 0x3079
+# define CP_MEM_LS_EN (1 << 0)
+
+#define CP_DEBUG 0x307F
+
+#define RLC_CNTL 0x30C0
+# define RLC_ENABLE (1 << 0)
+#define RLC_RL_BASE 0x30C1
+#define RLC_RL_SIZE 0x30C2
+#define RLC_LB_CNTL 0x30C3
+# define LOAD_BALANCE_ENABLE (1 << 0)
+#define RLC_SAVE_AND_RESTORE_BASE 0x30C4
+#define RLC_LB_CNTR_MAX 0x30C5
+#define RLC_LB_CNTR_INIT 0x30C6
+
+#define RLC_CLEAR_STATE_RESTORE_BASE 0x30C8
+
+#define RLC_UCODE_ADDR 0x30CB
+#define RLC_UCODE_DATA 0x30CC
+
+#define RLC_GPU_CLOCK_COUNT_LSB 0x30CE
+#define RLC_GPU_CLOCK_COUNT_MSB 0x30CF
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x30D0
+#define RLC_MC_CNTL 0x30D1
+#define RLC_UCODE_CNTL 0x30D2
+#define RLC_STAT 0x30D3
+# define RLC_BUSY_STATUS (1 << 0)
+# define GFX_POWER_STATUS (1 << 1)
+# define GFX_CLOCK_STATUS (1 << 2)
+# define GFX_LS_STATUS (1 << 3)
+
+#define RLC_PG_CNTL 0x30D7
+# define GFX_PG_ENABLE (1 << 0)
+# define GFX_PG_SRC (1 << 1)
+
+#define RLC_CGTT_MGCG_OVERRIDE 0x3100
+#define RLC_CGCG_CGLS_CTRL 0x3101
+# define CGCG_EN (1 << 0)
+# define CGLS_EN (1 << 1)
+
+#define RLC_TTOP_D 0x3105
+# define RLC_PUD(x) ((x) << 0)
+# define RLC_PUD_MASK (0xff << 0)
+# define RLC_PDD(x) ((x) << 8)
+# define RLC_PDD_MASK (0xff << 8)
+# define RLC_TTPD(x) ((x) << 16)
+# define RLC_TTPD_MASK (0xff << 16)
+# define RLC_MSD(x) ((x) << 24)
+# define RLC_MSD_MASK (0xff << 24)
+
+#define RLC_LB_INIT_CU_MASK 0x3107
+
+#define RLC_PG_AO_CU_MASK 0x310B
+#define RLC_MAX_PG_CU 0x310C
+# define MAX_PU_CU(x) ((x) << 0)
+# define MAX_PU_CU_MASK (0xff << 0)
+#define RLC_AUTO_PG_CTRL 0x310C
+# define AUTO_PG_EN (1 << 0)
+# define GRBM_REG_SGIT(x) ((x) << 3)
+# define GRBM_REG_SGIT_MASK (0xffff << 3)
+# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
+# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
+
+#define RLC_SERDES_WR_MASTER_MASK_0 0x3115
+#define RLC_SERDES_WR_MASTER_MASK_1 0x3116
+#define RLC_SERDES_WR_CTRL 0x3117
+
+#define RLC_SERDES_MASTER_BUSY_0 0x3119
+#define RLC_SERDES_MASTER_BUSY_1 0x311A
+
+#define RLC_GCPM_GENERAL_3 0x311E
+
+#define DB_RENDER_CONTROL 0xA000
+
+#define DB_DEPTH_INFO 0xA00F
+
+#define PA_SC_RASTER_CONFIG 0xA0D4
+# define RB_MAP_PKR0(x) ((x) << 0)
+# define RB_MAP_PKR0_MASK (0x3 << 0)
+# define RB_MAP_PKR1(x) ((x) << 2)
+# define RB_MAP_PKR1_MASK (0x3 << 2)
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+# define RB_XSEL2(x) ((x) << 4)
+# define RB_XSEL2_MASK (0x3 << 4)
+# define RB_XSEL (1 << 6)
+# define RB_YSEL (1 << 7)
+# define PKR_MAP(x) ((x) << 8)
+# define PKR_MAP_MASK (0x3 << 8)
+# define RASTER_CONFIG_PKR_MAP_0 0
+# define RASTER_CONFIG_PKR_MAP_1 1
+# define RASTER_CONFIG_PKR_MAP_2 2
+# define RASTER_CONFIG_PKR_MAP_3 3
+# define PKR_XSEL(x) ((x) << 10)
+# define PKR_XSEL_MASK (0x3 << 10)
+# define PKR_YSEL(x) ((x) << 12)
+# define PKR_YSEL_MASK (0x3 << 12)
+# define SC_MAP(x) ((x) << 16)
+# define SC_MAP_MASK (0x3 << 16)
+# define SC_XSEL(x) ((x) << 18)
+# define SC_XSEL_MASK (0x3 << 18)
+# define SC_YSEL(x) ((x) << 20)
+# define SC_YSEL_MASK (0x3 << 20)
+# define SE_MAP(x) ((x) << 24)
+# define SE_MAP_MASK (0x3 << 24)
+# define RASTER_CONFIG_SE_MAP_0 0
+# define RASTER_CONFIG_SE_MAP_1 1
+# define RASTER_CONFIG_SE_MAP_2 2
+# define RASTER_CONFIG_SE_MAP_3 3
+# define SE_XSEL(x) ((x) << 26)
+# define SE_XSEL_MASK (0x3 << 26)
+# define SE_YSEL(x) ((x) << 28)
+# define SE_YSEL_MASK (0x3 << 28)
+
+
+#define VGT_EVENT_INITIATOR 0xA2A4
+# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
+# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
+# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
+# define CACHE_FLUSH_TS (4 << 0)
+# define CACHE_FLUSH (6 << 0)
+# define CS_PARTIAL_FLUSH (7 << 0)
+# define VGT_STREAMOUT_RESET (10 << 0)
+# define END_OF_PIPE_INCR_DE (11 << 0)
+# define END_OF_PIPE_IB_END (12 << 0)
+# define RST_PIX_CNT (13 << 0)
+# define VS_PARTIAL_FLUSH (15 << 0)
+# define PS_PARTIAL_FLUSH (16 << 0)
+# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
+# define ZPASS_DONE (21 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
+# define PERFCOUNTER_START (23 << 0)
+# define PERFCOUNTER_STOP (24 << 0)
+# define PIPELINESTAT_START (25 << 0)
+# define PIPELINESTAT_STOP (26 << 0)
+# define PERFCOUNTER_SAMPLE (27 << 0)
+# define SAMPLE_PIPELINESTAT (30 << 0)
+# define SAMPLE_STREAMOUTSTATS (32 << 0)
+# define RESET_VTX_CNT (33 << 0)
+# define VGT_FLUSH (36 << 0)
+# define BOTTOM_OF_PIPE_TS (40 << 0)
+# define DB_CACHE_FLUSH_AND_INV (42 << 0)
+# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
+# define FLUSH_AND_INV_DB_META (44 << 0)
+# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
+# define FLUSH_AND_INV_CB_META (46 << 0)
+# define CS_DONE (47 << 0)
+# define PS_DONE (48 << 0)
+# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
+# define THREAD_TRACE_START (51 << 0)
+# define THREAD_TRACE_STOP (52 << 0)
+# define THREAD_TRACE_FLUSH (54 << 0)
+# define THREAD_TRACE_FINISH (55 << 0)
+
+/* PIF PHY0 registers idx/data 0x8/0xc */
+#define PB0_PIF_CNTL 0x10
+# define LS2_EXIT_TIME(x) ((x) << 17)
+# define LS2_EXIT_TIME_MASK (0x7 << 17)
+# define LS2_EXIT_TIME_SHIFT 17
+#define PB0_PIF_PAIRING 0x11
+# define MULTI_PIF (1 << 25)
+#define PB0_PIF_PWRDOWN_0 0x12
+# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
+# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_0_SHIFT 24
+#define PB0_PIF_PWRDOWN_1 0x13
+# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
+# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_1_SHIFT 24
+
+#define PB0_PIF_PWRDOWN_2 0x17
+# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
+# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_2_SHIFT 24
+#define PB0_PIF_PWRDOWN_3 0x18
+# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
+# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_3_SHIFT 24
+/* PIF PHY1 registers idx/data 0x10/0x14 */
+#define PB1_PIF_CNTL 0x10
+#define PB1_PIF_PAIRING 0x11
+#define PB1_PIF_PWRDOWN_0 0x12
+#define PB1_PIF_PWRDOWN_1 0x13
+
+#define PB1_PIF_PWRDOWN_2 0x17
+#define PB1_PIF_PWRDOWN_3 0x18
+/* PCIE registers idx/data 0x30/0x34 */
+#define PCIE_CNTL2 0x1c /* PCIE */
+# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
+# define MST_MEM_LS_EN (1 << 18)
+# define REPLAY_MEM_LS_EN (1 << 19)
+#define PCIE_LC_STATUS1 0x28 /* PCIE */
+# define LC_REVERSE_RCVR (1 << 0)
+# define LC_REVERSE_XMIT (1 << 1)
+# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
+# define LC_OPERATING_LINK_WIDTH_SHIFT 2
+# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
+# define LC_DETECTED_LINK_WIDTH_SHIFT 5
+
+#define PCIE_P_CNTL 0x40 /* PCIE */
+# define P_IGNORE_EDB_ERR (1 << 6)
+
+/* PCIE PORT registers idx/data 0x38/0x3c */
+#define PCIE_LC_CNTL 0xa0
+# define LC_L0S_INACTIVITY(x) ((x) << 8)
+# define LC_L0S_INACTIVITY_MASK (0xf << 8)
+# define LC_L0S_INACTIVITY_SHIFT 8
+# define LC_L1_INACTIVITY(x) ((x) << 12)
+# define LC_L1_INACTIVITY_MASK (0xf << 12)
+# define LC_L1_INACTIVITY_SHIFT 12
+# define LC_PMI_TO_L1_DIS (1 << 16)
+# define LC_ASPM_TO_L1_DIS (1 << 24)
+#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
+# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
+# define LC_DYN_LANES_PWR_STATE_SHIFT 21
+#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
+# define LC_XMIT_N_FTS(x) ((x) << 0)
+# define LC_XMIT_N_FTS_MASK (0xff << 0)
+# define LC_XMIT_N_FTS_SHIFT 0
+# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
+# define LC_N_FTS_MASK (0xff << 24)
+#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_GEN3_EN_STRAP (1 << 1)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
+# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
+# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
+# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+# define LC_CURRENT_DATA_RATE_SHIFT 13
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
+# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
+# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
+
+#define PCIE_LC_CNTL2 0xb1
+# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
+# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
+
+#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
+# define LC_GO_TO_RECOVERY (1 << 30)
+#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
+# define LC_REDO_EQ (1 << 5)
+# define LC_SET_QUIESCE (1 << 13)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG 0x3bd3
+#define UVD_UDEC_DB_ADDR_CONFIG 0x3bd4
+#define UVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define UVD_RBC_RB_RPTR 0x3da4
+#define UVD_RBC_RB_WPTR 0x3da5
+#define UVD_STATUS 0x3daf
+
+#define UVD_CGC_CTRL 0x3dc2
+# define DCM (1 << 0)
+# define CG_DT(x) ((x) << 2)
+# define CG_DT_MASK (0xf << 2)
+# define CLK_OD(x) ((x) << 6)
+# define CLK_OD_MASK (0x1f << 6)
+
+ /* UVD CTX indirect */
+#define UVD_CGC_MEM_CTRL 0xC0
+#define UVD_CGC_CTRL2 0xC1
+# define DYN_OR_EN (1 << 0)
+# define DYN_RR_EN (1 << 1)
+# define G_DIV_ID(x) ((x) << 2)
+# define G_DIV_ID_MASK (0x7 << 2)
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define RADEON_PACKET_TYPE3 3
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_BASE_INDEX(x) ((x) << 0)
+#define GDS_PARTITION_BASE 2
+#define CE_PARTITION_BASE 3
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_ALLOC_GDS 0x1B
+#define PACKET3_WRITE_GDS_RAM 0x1C
+#define PACKET3_ATOMIC_GDS 0x1D
+#define PACKET3_ATOMIC 0x1E
+#define PACKET3_OCCLUSION_QUERY 0x1F
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER_CONST 0x31
+#define PACKET3_INDIRECT_BUFFER 0x3F
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define WRITE_DATA_DST_SEL(x) ((x) << 8)
+ /* 0 - register
+ * 1 - memory (sync - via GRBM)
+ * 2 - tc/l2
+ * 3 - gds
+ * 4 - reserved
+ * 5 - memory (async - direct)
+ */
+#define WR_ONE_ADDR (1 << 16)
+#define WR_CONFIRM (1 << 20)
+#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
+ /* 0 - me
+ * 1 - pfp
+ * 2 - ce
+ */
+#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_COPY_DATA 0x40
+#define PACKET3_CP_DMA 0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ * SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
+ /* 0 - DST_ADDR
+ * 1 - GDS
+ */
+# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
+ /* 0 - ME
+ * 1 - PFP
+ */
+# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
+ /* 0 - SRC_ADDR
+ * 1 - GDS
+ * 2 - DATA
+ */
+# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
+/* COMMAND */
+# define PACKET3_CP_DMA_DIS_WC (1 << 21)
+# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+ /* 0 - none
+ * 1 - 8 in 16
+ * 2 - 8 in 32
+ * 3 - 8 in 64
+ */
+# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
+ /* 0 - memory
+ * 1 - register
+ */
+# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
+# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
+#define PACKET3_PFP_SYNC_ME 0x42
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_DEST_BASE_0_ENA (1 << 0)
+# define PACKET3_DEST_BASE_1_ENA (1 << 1)
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_DEST_BASE_2_ENA (1 << 19)
+# define PACKET3_DEST_BASE_3_ENA (1 << 21)
+# define PACKET3_TCL1_ACTION_ENA (1 << 22)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - EOP events
+ * 6 - EOS events
+ * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+ */
+#define INV_L2 (1 << 20)
+ /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_LOAD_CONFIG_REG 0x5F
+#define PACKET3_LOAD_CONTEXT_REG 0x60
+#define PACKET3_LOAD_SH_REG 0x61
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00002000
+#define PACKET3_SET_CONFIG_REG_END 0x00002c00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x000a000
+#define PACKET3_SET_CONTEXT_REG_END 0x000a400
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_START 0x00002c00
+#define PACKET3_SET_SH_REG_END 0x00003000
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_ME_WRITE 0x7A
+#define PACKET3_SCRATCH_RAM_WRITE 0x7D
+#define PACKET3_SCRATCH_RAM_READ 0x7E
+#define PACKET3_CE_WRITE 0x7F
+#define PACKET3_LOAD_CONST_RAM 0x80
+#define PACKET3_WRITE_CONST_RAM 0x81
+#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
+#define PACKET3_DUMP_CONST_RAM 0x83
+#define PACKET3_INCREMENT_CE_COUNTER 0x84
+#define PACKET3_INCREMENT_DE_COUNTER 0x85
+#define PACKET3_WAIT_ON_CE_COUNTER 0x86
+#define PACKET3_WAIT_ON_DE_COUNTER 0x87
+#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
+#define PACKET3_SET_CE_DE_COUNTERS 0x89
+#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET 0x200 /* not a register */
+
+#define DMA_RB_CNTL 0x3400
+# define DMA_RB_ENABLE (1 << 0)
+# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
+# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
+# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
+# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
+#define DMA_RB_BASE 0x3401
+#define DMA_RB_RPTR 0x3402
+#define DMA_RB_WPTR 0x3403
+
+#define DMA_RB_RPTR_ADDR_HI 0x3407
+#define DMA_RB_RPTR_ADDR_LO 0x3408
+
+#define DMA_IB_CNTL 0x3409
+# define DMA_IB_ENABLE (1 << 0)
+# define DMA_IB_SWAP_ENABLE (1 << 4)
+# define CMD_VMID_FORCE (1 << 31)
+#define DMA_IB_RPTR 0x340a
+#define DMA_CNTL 0x340b
+# define TRAP_ENABLE (1 << 0)
+# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
+# define SEM_WAIT_INT_ENABLE (1 << 2)
+# define DATA_SWAP_ENABLE (1 << 3)
+# define FENCE_SWAP_ENABLE (1 << 4)
+# define CTXEMPTY_INT_ENABLE (1 << 28)
+#define DMA_STATUS_REG 0x340d
+# define DMA_IDLE (1 << 0)
+#define DMA_TILING_CONFIG 0x342e
+
+#define DMA_POWER_CNTL 0x342f
+# define MEM_POWER_OVERRIDE (1 << 8)
+#define DMA_CLK_CTRL 0x3430
+
+#define DMA_PG 0x3435
+# define PG_CNTL_ENABLE (1 << 0)
+#define DMA_PGFSM_CONFIG 0x3436
+#define DMA_PGFSM_WRITE 0x3437
+
+#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((b) & 0x1) << 26) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
+ (((vmid) & 0xF) << 20) | \
+ (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
+ (1 << 26) | \
+ (1 << 21) | \
+ (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
+#define DMA_PACKET_NOP 0xf
+
+#define VCE_STATUS 0x20004
+#define VCE_VCPU_CNTL 0x20014
+#define VCE_CLK_EN (1 << 0)
+#define VCE_VCPU_CACHE_OFFSET0 0x20024
+#define VCE_VCPU_CACHE_SIZE0 0x20028
+#define VCE_VCPU_CACHE_OFFSET1 0x2002c
+#define VCE_VCPU_CACHE_SIZE1 0x20030
+#define VCE_VCPU_CACHE_OFFSET2 0x20034
+#define VCE_VCPU_CACHE_SIZE2 0x20038
+#define VCE_SOFT_RESET 0x20120
+#define VCE_ECPU_SOFT_RESET (1 << 0)
+#define VCE_FME_SOFT_RESET (1 << 2)
+#define VCE_RB_BASE_LO2 0x2016c
+#define VCE_RB_BASE_HI2 0x20170
+#define VCE_RB_SIZE2 0x20174
+#define VCE_RB_RPTR2 0x20178
+#define VCE_RB_WPTR2 0x2017c
+#define VCE_RB_BASE_LO 0x20180
+#define VCE_RB_BASE_HI 0x20184
+#define VCE_RB_SIZE 0x20188
+#define VCE_RB_RPTR 0x2018c
+#define VCE_RB_WPTR 0x20190
+#define VCE_CLOCK_GATING_A 0x202f8
+#define VCE_CLOCK_GATING_B 0x202fc
+#define VCE_UENC_CLOCK_GATING 0x205bc
+#define VCE_UENC_REG_CLOCK_GATING 0x205c0
+#define VCE_FW_REG_STATUS 0x20e10
+# define VCE_FW_REG_STATUS_BUSY (1 << 0)
+# define VCE_FW_REG_STATUS_PASS (1 << 3)
+# define VCE_FW_REG_STATUS_DONE (1 << 11)
+#define VCE_LMI_FW_START_KEYSEL 0x20e18
+#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
+#define VCE_LMI_CTRL2 0x20e74
+#define VCE_LMI_CTRL 0x20e98
+#define VCE_LMI_VM_CTRL 0x20ea0
+#define VCE_LMI_SWAP_CNTL 0x20eb4
+#define VCE_LMI_SWAP_CNTL1 0x20eb8
+#define VCE_LMI_CACHE_CTRL 0x20ef4
+
+#define VCE_CMD_NO_OP 0x00000000
+#define VCE_CMD_END 0x00000001
+#define VCE_CMD_IB 0x00000002
+#define VCE_CMD_FENCE 0x00000003
+#define VCE_CMD_TRAP 0x00000004
+#define VCE_CMD_IB_AUTO 0x00000005
+#define VCE_CMD_SEMAPHORE 0x00000006
+
+
+//#dce stupp
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define SI_CRTC0_REGISTER_OFFSET 0 //(0x6df0 - 0x6df0)/4
+#define SI_CRTC1_REGISTER_OFFSET 0x300 //(0x79f0 - 0x6df0)/4
+#define SI_CRTC2_REGISTER_OFFSET 0x2600 //(0x105f0 - 0x6df0)/4
+#define SI_CRTC3_REGISTER_OFFSET 0x2900 //(0x111f0 - 0x6df0)/4
+#define SI_CRTC4_REGISTER_OFFSET 0x2c00 //(0x11df0 - 0x6df0)/4
+#define SI_CRTC5_REGISTER_OFFSET 0x2f00 //(0x129f0 - 0x6df0)/4
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+#define AMDGPU_MM_INDEX 0x0000
+#define AMDGPU_MM_DATA 0x0001
+
+#define VERDE_NUM_CRTC 6
+#define BLACKOUT_MODE_MASK 0x00000007
+#define VGA_RENDER_CONTROL 0xC0
+#define R_000300_VGA_RENDER_CONTROL 0xC0
+#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS 0x1BA3
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
+#define EVERGREEN_CRTC_CONTROL 0x1b9c
+#define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+#define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
+#define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+
+#define EVERGREEN_DATA_FORMAT 0x1ac0
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+
+#define MC_SHARED_CHMAP__NOOFCHAN_MASK 0xf000
+#define MC_SHARED_CHMAP__NOOFCHAN__SHIFT 0xc
+
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
+#define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
+#define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
+#define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
+
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a45
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1845
+
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1847
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a47
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+
+#define DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK 0x1
+
+#define R600_D1GRPH_SWAP_CONTROL 0x1843
+#define R600_D1GRPH_SWAP_ENDIAN_NONE (0 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_16BIT (1 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_32BIT (2 << 0)
+#define R600_D1GRPH_SWAP_ENDIAN_64BIT (3 << 0)
+
+#define AVIVO_D1VGA_CONTROL 0x00cc
+# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1 << 0)
+# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1 << 8)
+# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
+# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
+# define AVIVO_DVGA_CONTROL_ROTATE (1 << 24)
+#define AVIVO_D2VGA_CONTROL 0x00ce
+
+#define R600_BUS_CNTL 0x1508
+# define R600_BIOS_ROM_DIS (1 << 1)
+
+#define R600_ROM_CNTL 0x580
+# define R600_SCK_OVERWRITE (1 << 1)
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
+
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+
+#define FMT_BIT_DEPTH_CONTROL 0x1bf2
+#define FMT_TRUNCATE_EN (1 << 0)
+#define FMT_TRUNCATE_DEPTH (1 << 4)
+#define FMT_SPATIAL_DITHER_EN (1 << 8)
+#define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
+#define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
+#define FMT_FRAME_RANDOM_ENABLE (1 << 13)
+#define FMT_RGB_RANDOM_ENABLE (1 << 14)
+#define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
+#define FMT_TEMPORAL_DITHER_EN (1 << 16)
+#define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
+#define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#define FMT_TEMPORAL_LEVEL (1 << 24)
+#define FMT_TEMPORAL_DITHER_RESET (1 << 25)
+#define FMT_25FRC_SEL(x) ((x) << 26)
+#define FMT_50FRC_SEL(x) ((x) << 28)
+#define FMT_75FRC_SEL(x) ((x) << 30)
+
+#define EVERGREEN_DC_LUT_CONTROL 0x1a80
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x1a83
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x1a86
+#define EVERGREEN_DC_LUT_30_COLOR 0x1a7c
+#define EVERGREEN_DC_LUT_RW_INDEX 0x1a79
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x1a7e
+#define EVERGREEN_DC_LUT_RW_MODE 0x1a78
+
+#define EVERGREEN_GRPH_ENABLE 0x1a00
+#define EVERGREEN_GRPH_CONTROL 0x1a01
+#define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+#define EVERGREEN_GRPH_DEPTH_8BPP 0
+#define EVERGREEN_GRPH_DEPTH_16BPP 1
+#define EVERGREEN_GRPH_DEPTH_32BPP 2
+#define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+#define EVERGREEN_ADDR_SURF_2_BANK 0
+#define EVERGREEN_ADDR_SURF_4_BANK 1
+#define EVERGREEN_ADDR_SURF_8_BANK 2
+#define EVERGREEN_ADDR_SURF_16_BANK 3
+#define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+#define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+#define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
+#define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+
+#define EVERGREEN_GRPH_FORMAT_INDEXED 0
+#define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+#define EVERGREEN_GRPH_FORMAT_ARGB565 1
+#define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+#define EVERGREEN_GRPH_FORMAT_AI88 3
+#define EVERGREEN_GRPH_FORMAT_MONO16 4
+#define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+
+/* 32 BPP */
+#define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+#define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+#define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+#define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+#define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+#define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+#define EVERGREEN_GRPH_FORMAT_RGB111110 6
+#define EVERGREEN_GRPH_FORMAT_BGR101111 7
+#define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+#define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+#define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+#define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+#define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+#define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+#define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
+#define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
+#define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
+#define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+#define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x1a03
+#define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+
+#define EVERGREEN_D3VGA_CONTROL 0xf8
+#define EVERGREEN_D4VGA_CONTROL 0xf9
+#define EVERGREEN_D5VGA_CONTROL 0xfa
+#define EVERGREEN_D6VGA_CONTROL 0xfb
+
+#define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x1a02
+#define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
+
+#define EVERGREEN_GRPH_PITCH 0x1a06
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x1a09
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x1a0a
+#define EVERGREEN_GRPH_X_START 0x1a0b
+#define EVERGREEN_GRPH_Y_START 0x1a0c
+#define EVERGREEN_GRPH_X_END 0x1a0d
+#define EVERGREEN_GRPH_Y_END 0x1a0e
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL 0x1a12
+#define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+#define EVERGREEN_VIEWPORT_START 0x1b5c
+#define EVERGREEN_VIEWPORT_SIZE 0x1b5d
+#define EVERGREEN_DESKTOP_HEIGHT 0x1ac1
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x1a66
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x1a67
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x1a68
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x1a69
+#define EVERGREEN_CUR_POSITION 0x1a6a
+#define EVERGREEN_CUR_HOT_SPOT 0x1a6b
+#define EVERGREEN_CUR_COLOR1 0x1a6c
+#define EVERGREEN_CUR_COLOR2 0x1a6d
+#define EVERGREEN_CUR_UPDATE 0x1a6e
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+
+#define NI_INPUT_CSC_CONTROL 0x1a35
+# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_CSC_BYPASS 0
+# define NI_INPUT_CSC_PROG_COEFF 1
+# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
+# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL 0x1a3c
+# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
+# define NI_OUTPUT_CSC_BYPASS 0
+# define NI_OUTPUT_CSC_TV_RGB 1
+# define NI_OUTPUT_CSC_YCBCR_601 2
+# define NI_OUTPUT_CSC_YCBCR_709 3
+# define NI_OUTPUT_CSC_PROG_COEFF 4
+# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
+# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL 0x1a58
+# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_DEGAMMA_BYPASS 0
+# define NI_DEGAMMA_SRGB_24 1
+# define NI_DEGAMMA_XVYCC_222 2
+# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
+# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
+# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL 0x1a59
+# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
+# define NI_GAMUT_REMAP_BYPASS 0
+# define NI_GAMUT_REMAP_PROG_COEFF 1
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
+# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
+# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL 0x1aa0
+# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
+# define NI_REGAMMA_BYPASS 0
+# define NI_REGAMMA_SRGB_24 1
+# define NI_REGAMMA_XVYCC_222 2
+# define NI_REGAMMA_PROG_A 3
+# define NI_REGAMMA_PROG_B 4
+# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
+
+
+#define NI_PRESCALE_GRPH_CONTROL 0x1a2d
+# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL 0x1a31
+# define NI_OVL_PRESCALE_BYPASS (1 << 4)
+
+#define NI_INPUT_GAMMA_CONTROL 0x1a10
+# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
+# define NI_INPUT_GAMMA_USE_LUT 0
+# define NI_INPUT_GAMMA_BYPASS 1
+# define NI_INPUT_GAMMA_SRGB_24 2
+# define NI_INPUT_GAMMA_XVYCC_222 3
+# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
+
+#define IH_RB_WPTR__RB_OVERFLOW_MASK 0x1
+#define IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK 0x80000000
+#define SRBM_STATUS__IH_BUSY_MASK 0x20000
+#define SRBM_SOFT_RESET__SOFT_RESET_IH_MASK 0x400
+
+#define BLACKOUT_MODE_MASK 0x00000007
+#define VGA_RENDER_CONTROL 0xC0
+#define R_000300_VGA_RENDER_CONTROL 0xC0
+#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
+#define EVERGREEN_CRTC_STATUS 0x1BA3
+#define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION 0x1BA4
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END 0x1b8d
+#define EVERGREEN_CRTC_CONTROL 0x1b9c
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL 0x1b9d
+# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x1ba8
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x1bb5
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x1bbd
+#define EVERGREEN_MASTER_UPDATE_MODE 0x1bbe
+#define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define EVERGREEN_GRPH_UPDATE 0x1a11
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0xc4
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
+#define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
+
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10
+#define mmVM_CONTEXT1_CNTL__xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80
+#define mmVM_CONTEXT1_CNTL__xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x400
+#define mmVM_CONTEXT1_CNTL__xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x2000
+#define mmVM_CONTEXT1_CNTL__xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xd
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x10000
+#define mmVM_CONTEXT1_CNTL__xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x80000
+#define mmVM_CONTEXT1_CNTL__xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x13
+
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID_MASK 0x1e000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxVMID__SHIFT 0x19
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS_MASK 0xff
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxPROTECTIONS__SHIFT 0x0
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID_MASK 0xff000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_ID__SHIFT 0xc
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW_MASK 0x1000000
+#define mmVM_CONTEXT1_PROTECTION_FAULT_STATUS__xxMEMORY_CLIENT_RW__SHIFT 0x18
+
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE_MASK 0x7
+#define mmMC_SHARED_BLACKOUT_CNTL__xxBLACKOUT_MODE__SHIFT 0x0
+
+#define mmBIF_FB_EN__xxFB_READ_EN_MASK 0x1
+#define mmBIF_FB_EN__xxFB_READ_EN__SHIFT 0x0
+#define mmBIF_FB_EN__xxFB_WRITE_EN_MASK 0x2
+#define mmBIF_FB_EN__xxFB_WRITE_EN__SHIFT 0x1
+
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC_MASK 0x20000
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_VMC__SHIFT 0x11
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC_MASK 0x800
+#define mmSRBM_SOFT_RESET__xxSOFT_RESET_MC__SHIFT 0xb
+
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8
+#define VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x3
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40
+#define VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x6
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x200
+#define VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x1000
+#define VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xc
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x8000
+#define VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x40000
+#define VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x12
+
+#define MC_SEQ_MISC0__MT__MASK 0xf0000000
+#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
+#define MC_SEQ_MISC0__MT__DDR2 0x20000000
+#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
+#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
+#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
+#define MC_SEQ_MISC0__MT__HBM 0x60000000
+#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
+
+#define SRBM_STATUS__MCB_BUSY_MASK 0x200
+#define SRBM_STATUS__MCB_BUSY__SHIFT 0x9
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK 0x400
+#define SRBM_STATUS__MCB_NON_DISPLAY_BUSY__SHIFT 0xa
+#define SRBM_STATUS__MCC_BUSY_MASK 0x800
+#define SRBM_STATUS__MCC_BUSY__SHIFT 0xb
+#define SRBM_STATUS__MCD_BUSY_MASK 0x1000
+#define SRBM_STATUS__MCD_BUSY__SHIFT 0xc
+#define SRBM_STATUS__VMC_BUSY_MASK 0x100
+#define SRBM_STATUS__VMC_BUSY__SHIFT 0x8
+
+
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000
+#define CP_INT_CNTL_RING__TIME_STAMP_INT_ENABLE_MASK 0x4000000
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x800000
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x400000
+#define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+#define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+#define PACKET3_SEM_SEL_WAIT (0x7 << 29)
+
+#define CONFIG_CNTL 0x1509
+#define CC_DRM_ID_STRAPS 0X1559
+#define AMDGPU_PCIE_INDEX 0xc
+#define AMDGPU_PCIE_DATA 0xd
+
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0x3411
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0x3412
+#define DMA_MODE 0x342f
+#define DMA_RB_RPTR_ADDR_HI 0x3407
+#define DMA_RB_RPTR_ADDR_LO 0x3408
+#define DMA_BUSY_MASK 0x20
+#define DMA1_BUSY_MASK 0X40
+#define SDMA_MAX_INSTANCE 2
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+#define CC_DRM_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define PCIE_PORT_INDEX 0xe
+#define PCIE_PORT_DATA 0xf
+#define EVERGREEN_PIF_PHY0_INDEX 0x8
+#define EVERGREEN_PIF_PHY0_DATA 0xc
+#define EVERGREEN_PIF_PHY1_INDEX 0x10
+#define EVERGREEN_PIF_PHY1_DATA 0x14
+
+#define MC_VM_FB_OFFSET 0x81a
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
index f3e53b118361..19802e96417e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h
@@ -34,6 +34,7 @@
#define mmUVD_UDEC_ADDR_CONFIG 0x3bd3
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_SEMA_CNTL 0x3d00
#define mmUVD_LMI_EXT40_ADDR 0x3d26
#define mmUVD_CTX_INDEX 0x3d28
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
index eb4cf53427da..cc972d237a7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h
@@ -34,6 +34,7 @@
#define mmUVD_UDEC_ADDR_CONFIG 0x3bd3
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index ec69869c55ff..378f4b6b43da 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -35,6 +35,7 @@
#define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
#define mmUVD_POWER_STATUS_U 0x3bfd
+#define mmUVD_NO_OP 0x3bff
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 3493da5c8f0e..4a4d3797a6d3 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -494,6 +494,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -526,6 +527,7 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index b86aba9d019f..df7c18b6a02a 100644..100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -119,6 +119,8 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
CGS_SYSTEM_INFO_GFX_SE_INFO,
+ CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
+ CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -159,6 +161,7 @@ struct cgs_clock_limits {
*/
struct cgs_firmware_info {
uint16_t version;
+ uint16_t fw_version;
uint16_t feature_version;
uint32_t image_size;
uint64_t mc_addr;
diff --git a/drivers/gpu/drm/amd/powerplay/Kconfig b/drivers/gpu/drm/amd/powerplay/Kconfig
deleted file mode 100644
index af380335b425..000000000000
--- a/drivers/gpu/drm/amd/powerplay/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DRM_AMD_POWERPLAY
- bool "Enable AMD powerplay component"
- depends on DRM_AMDGPU
- default n
- help
- select this option will enable AMD powerplay component.
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index abbb658bdc1e..7174f7a68266 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -31,6 +31,7 @@
#include "eventmanager.h"
#include "pp_debug.h"
+
#define PP_CHECK(handle) \
do { \
if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \
@@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle)
pp_handle = (struct pp_instance *)handle;
eventmgr = pp_handle->eventmgr;
- if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL)
+ if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL)
eventmgr->pp_eventmgr_fini(eventmgr);
smumgr = pp_handle->smu_mgr;
- if (smumgr != NULL || smumgr->smumgr_funcs != NULL ||
+ if (smumgr != NULL && smumgr->smumgr_funcs != NULL &&
smumgr->smumgr_funcs->smu_fini != NULL)
smumgr->smumgr_funcs->smu_fini(smumgr);
@@ -190,11 +191,9 @@ static int pp_sw_reset(void *handle)
}
-static int pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr;
- uint32_t msg_id, pp_state;
if (handle == NULL)
return -EINVAL;
@@ -208,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
return 0;
}
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- /* Enable/disable GFX blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- /* Enable/disable System blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- return 0;
+ return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}
static int pp_set_powergating_state(void *handle,
@@ -361,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
- .set_clockgating_state = pp_set_clockgating_state,
+ .set_clockgating_state = NULL,
.set_powergating_state = pp_set_powergating_state,
};
@@ -537,7 +467,6 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
case AMD_PP_EVENT_READJUST_POWER_STATE:
- pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
default:
@@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
}
}
-static void
-pp_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL)
- return;
-
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
- return;
-
- if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return;
- }
-
- hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
-}
-
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
@@ -764,15 +671,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
PP_CHECK_HW(hwmgr);
if (!hwmgr->hardcode_pp_table) {
- hwmgr->hardcode_pp_table =
- kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
return -ENOMEM;
-
- /* to avoid powerplay crash when hardcode pptable is empty */
- memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -897,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
+static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->read_sensor == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -909,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.powergate_vce = pp_dpm_powergate_vce,
.powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
- .print_current_performance_level = pp_debugfs_print_current_performance_level,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -923,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
+ .read_sensor = pp_dpm_read_sensor,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b48184..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
+ uninitialize_thermal_controller_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
@@ -262,6 +263,8 @@ static const pem_event_action * const display_config_change_event[] = {
unblock_adjust_power_state_tasks,
set_cpu_power_state,
notify_hw_power_source_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
/* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index a46225c0fc01..489908887e9c 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -70,11 +70,12 @@ int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
int i;
table_entries = hwmgr->num_ps;
+
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
if (state->id == *state_id) {
- hwmgr->request_ps = state;
+ memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
return 0;
}
state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
@@ -100,13 +101,14 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
if (requested == NULL)
return 0;
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
- hwmgr->current_ps = requested;
+ memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index f7ce4cb71346..5fff1d636ab7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,14 +3,12 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
- hardwaremanager.o pp_acpi.o cz_hwmgr.o \
- cz_clockpowergating.o \
- tonga_processpptables.o ppatomctrl.o \
- tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
- fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
- fiji_clockpowergating.o fiji_thermal.o \
- polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
- polaris10_clockpowergating.o
+ hardwaremanager.o pp_acpi.o cz_hwmgr.o \
+ cz_clockpowergating.o pppcielanes.o\
+ process_pptables_v1_0.o ppatomctrl.o \
+ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
+ smu7_clockpowergating.o
+
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 8cc0df9b534a..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
int result;
cz_hwmgr->gfx_ramp_step = 256*25/100;
-
cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
@@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
-
cz_hwmgr->clock_slow_down_freq = 25000;
-
cz_hwmgr->skip_clock_slow_down = 1;
-
cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
-
cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
-
cz_hwmgr->voting_rights_clients = 0x00C00033;
-
cz_hwmgr->static_screen_threshold = 8;
-
cz_hwmgr->ddi_power_gating_disabled = 0;
-
cz_hwmgr->bapm_enabled = 1;
-
cz_hwmgr->voltage_drop_threshold = 0;
-
cz_hwmgr->gfx_power_gating_threshold = 500;
-
cz_hwmgr->vce_slow_sclk_threshold = 20000;
-
cz_hwmgr->dce_slow_sclk_threshold = 30000;
-
cz_hwmgr->disable_driver_thermal_policy = 1;
-
cz_hwmgr->disable_nb_ps3_in_battery = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_NonABMSupportInPPLib);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicM3Arbiter);
@@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_DynamicPatchPowerState);
cz_hwmgr->thermal_auto_throttling_treshold = 0;
-
cz_hwmgr->tdr_clock = 0;
-
cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
(uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
cz_hwmgr->boot_power_level.dsDividerIndex = 0;
-
cz_hwmgr->boot_power_level.ssDividerIndex = 0;
-
cz_hwmgr->boot_power_level.allowGnbSlow = 1;
-
cz_hwmgr->boot_power_level.forceNBPstate = 0;
-
cz_hwmgr->boot_power_level.hysteresis_up = 0;
-
cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
-
cz_hwmgr->boot_power_level.display_wm = 0;
-
cz_hwmgr->boot_power_level.vce_wm = 0;
return 0;
@@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config.min_core_set_clock;
-;
if (clock == 0)
printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
@@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetWatermarkFrequency,
- cz_hwmgr->sclk_dpm.soft_max_clk);
+ cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
@@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
PP_DBG_LOG("enabling ALL SMU features.\n");
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ hwmgr->smumgr,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features);
if (ret == 0)
cz_hwmgr->is_nb_dpm_enabled = true;
}
@@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (hwmgr != NULL || hwmgr->backend != NULL) {
+ if (hwmgr != NULL && hwmgr->backend != NULL) {
kfree(hwmgr->backend);
kfree(hwmgr);
}
@@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
PPSMC_MSG_SetUvdHardMin));
cz_enable_disable_uvd_dpm(hwmgr, true);
- } else
+ } else {
cz_enable_disable_uvd_dpm(hwmgr, true);
- } else
+ }
+ } else {
cz_enable_disable_uvd_dpm(hwmgr, false);
+ }
return 0;
}
@@ -1564,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct cz_power_state);
}
-static void
-cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
-
- struct phm_vce_clock_voltage_dependency_table *vce_table =
- hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
- struct phm_uvd_clock_voltage_dependency_table *uvd_table =
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
- uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
- uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
- uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
- uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
- uint16_t vddnb, vddgfx;
- int result;
-
- if (sclk_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
- } else {
- sclk = table->entries[sclk_index].clk;
- seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
- }
-
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
- CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
- CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
- seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
- seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
- } else {
- vclk = uvd_table->entries[uvd_index].vclk;
- dclk = uvd_table->entries[uvd_index].dclk;
- seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
- }
- }
-
- seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
- if (!cz_hwmgr->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
- } else {
- ecclk = vce_table->entries[vce_index].ecclk;
- seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
- }
- }
-
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
- if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
- } else {
- activity_percent = 50;
- }
-
- seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
-}
-
static void cz_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
@@ -1690,13 +1592,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
if (separation_time !=
- hw_data->cc6_settings.cpu_pstate_separation_time
- || cc6_disable !=
- hw_data->cc6_settings.cpu_cc6_disable
- || pstate_disable !=
- hw_data->cc6_settings.cpu_pstate_disable
- || pstate_switch_disable !=
- hw_data->cc6_settings.nb_pstate_switch_disable) {
+ hw_data->cc6_settings.cpu_pstate_separation_time ||
+ cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
+ pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
+ pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
hw_data->cc6_settings.cc6_setting_changed = true;
@@ -1799,8 +1698,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
ps = cast_const_PhwCzPowerState(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
-
- level->coreClock = ps->levels[level_index].engineClock;
+ level->coreClock = ps->levels[level_index].engineClock;
if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
for (i = 1; i < ps->level; i++) {
@@ -1887,6 +1785,125 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int actual_temp = 0;
+ uint32_t val = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+ uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+ if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+ actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return actual_temp;
+}
+
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+ uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+ uint16_t vddnb, vddgfx;
+ int result;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ if (sclk_index < NUM_SCLK_LEVELS) {
+ sclk = table->entries[sclk_index].clk;
+ *value = sclk;
+ return 0;
+ }
+ return -EINVAL;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ *value = vddnb;
+ return 0;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ *value = vddgfx;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_VCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ *value = vclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_DCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ dclk = uvd_table->entries[uvd_index].dclk;
+ *value = dclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_ECCLK:
+ if (!cz_hwmgr->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ *value = ecclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ if (0 == result) {
+ activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+ activity_percent = activity_percent > 100 ? 100 : activity_percent;
+ } else {
+ activity_percent = 50;
+ }
+ *value = activity_percent;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *value = cz_hwmgr->uvd_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *value = cz_hwmgr->vce_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = cz_thermal_get_temperature(hwmgr);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
@@ -1902,7 +1919,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.patch_boot_state = cz_dpm_patch_boot_state,
.get_pp_table_entry = cz_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
- .print_current_perforce_level = cz_print_current_perforce_level,
.set_cpu_power_state = cz_set_cpu_power_state,
.store_cc6_data = cz_store_cc6_data,
.force_clock_level = cz_force_clock_level,
@@ -1912,6 +1928,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
+ .read_sensor = cz_read_sensor,
};
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe82068b29..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "fiji_clockpowergating.h"
-#include "fiji_ppsmc.h"
-#include "fiji_hwmgr.h"
-
-int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
-
- return 0;
-}
-
-int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
-
- data->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- fiji_update_uvd_dpm(hwmgr, true);
- } else {
- fiji_update_uvd_dpm(hwmgr, false);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- }
-
- return 0;
-}
-
-int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_set_power_state_input states;
- const struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
-
- if (data->vce_power_gated == bgate)
- return 0;
-
- data->vce_power_gated = bgate;
-
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- states.pcurrent_state = &(pcurrent->hardware);
- states.pnew_state = &(requested->hardware);
-
- fiji_update_vce_dpm(hwmgr, &states);
- fiji_enable_disable_vce_dpm(hwmgr, !bgate);
-
- return 0;
-}
-
-int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate)
- fiji_update_samu_dpm(hwmgr, true);
- else
- fiji_update_samu_dpm(hwmgr, false);
-
- return 0;
-}
-
-int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->acp_power_gated == bgate)
- return 0;
-
- data->acp_power_gated = bgate;
-
- if (bgate)
- fiji_update_acp_dpm(hwmgr, true);
- else
- fiji_update_acp_dpm(hwmgr, false);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8fecb2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_DYN_DEFAULTS_H
-#define FIJI_DYN_DEFAULTS_H
-
-/** \file
-* Volcanic Islands Dynamic default parameters.
-*/
-
-enum FIJIdpm_TrendDetection
-{
- FIJIAdpm_TrendDetection_AUTO,
- FIJIAdpm_TrendDetection_UP,
- FIJIAdpm_TrendDetection_DOWN
-};
-typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
-
-/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
-
-/* Bit vector representing same fields as hardware register. */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
- * HDP_busy
- * IH_busy
- * UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy
- * SDMA enabled */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
- * SH_Gfx_busy
- * RB_Gfx_busy
- * VCE_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * RB_Gfx_busy
- * ACP_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * SH_Gfx_busy
- * UVD_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
-
-
-/* thermal protection counter (units). */
-#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-/* static screen threshold */
-#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPFIJI_REFERENCEDIVIDER_DFLT 4
-
-/* ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
-#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
-#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
-#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 120a9e2c3152..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5599 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-
-#include "hwmgr.h"
-#include "fiji_smumgr.h"
-#include "atombios.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "cgs_common.h"
-#include "fiji_dyn_defaults.h"
-#include "fiji_powertune.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "pppcielanes.h"
-#include "fiji_hwmgr.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "pp_acpi.h"
-#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
-#include "ppinterrupt.h"
-
-#include "fiji_clockpowergating.h"
-#include "fiji_thermal.h"
-
-#define VOLTAGE_SCALE 4
-#define SMC_RAM_END 0x40000
-#define VDDC_VDDCI_DELTA 300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-/* From smc_reg.h */
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 300
-
-#define ixSWRST_COMMAND_1 0x1400103
-#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct fiji_power_state *cast_phw_fiji_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (struct fiji_power_state *)hw_ps;
-}
-
-const struct fiji_power_state *cast_const_phw_fiji_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (const struct fiji_power_state *)hw_ps;
-}
-
-static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &data->ulv;
-
- ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit =
- PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold =
- PPFIJI_STATICSCREENTHRESHOLD_DFLT;
-
- /* Unset ABM cap as it moved to DAL.
- * Add PHM_PlatformCaps_NonABMSupportInPPLib
- * for re-direct ABM related request to DAL
- */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- data->mclk_stutter_mode_threshold = 60000;
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-}
-
-static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
- break;
- }
-
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
-
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
-
- return 0;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t vv_id;
- uint16_t vddc = 0;
- uint16_t evv_default = 1150;
- uint16_t i, j;
- uint32_t sclk = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- int result;
-
- for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
- vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (!fiji_get_sclk_for_voltage_evv(hwmgr,
- table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableDriverEVV))
- result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
- else
- result = -EINVAL;
-
- if (result)
- result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000),
- "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
-
- if (result)
- /* 1.15V is the default safe value for Fiji */
- vddc = evv_default;
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != vv_id) {
- data->vddc_leakage.actual_voltage
- [data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id
- [data->vddc_leakage.count] = vv_id;
- data->vddc_leakage.count++;
- }
- }
- }
- return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (index = 0; index < leakage_table->count; index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (leakage_table->leakage_id[index] == *voltage) {
- *voltage = leakage_table->actual_voltage[index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
-static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++)
- fiji_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, leakage_table);
-
- return 0;
-}
-
-static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
- struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
- uint16_t *vddc)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- table_info->max_clock_voltage_on_dc.vddc;
- return 0;
-}
-
-static int fiji_patch_voltage_dependency_tables_with_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage. */
- return 0;
-}
-
-static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage from mm table. */
- return 0;
-}
-
-static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -EINVAL);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd <
- lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
- table_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
- if(tmp_result)
- result = tmp_result;
-
- return result;
-}
-
-static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
- data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
- entries[allowed_sclk_vdd_table->count - 1].vddc;
-
- table_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- table_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- table_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- table_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- table_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
- table_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-/** Patch the Boot State to match VBIOS boot clocks and voltage.
-*
-* @param hwmgr Pointer to the hardware manager.
-* @param pPowerState The address of the PowerState instance being created.
-*
-*/
-static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value =
- le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value =
- fiji_get_current_pcie_speed(hwmgr);
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data;
- uint32_t i;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- bool stay_in_boot;
- int result;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_default_on = false;
- data->sram_end = SMC_RAM_END;
-
- for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
- data->activity_target[i] = FIJI_AT_DFLT;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
- data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
- data->mclk_dpm0_activity_target = 0xa;
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
- data->gpio_debug = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- /* need to set voltage control types before EVV patching */
- data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl))
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-
- if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- fiji_init_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID. */
- fiji_get_evv_voltages(hwmgr);
-
- /* Patch our voltage dependency table with actual leakage voltage
- * We need to perform leakage translation before it's used by other functions
- */
- fiji_complete_dependency_tables(hwmgr);
-
- /* Parse pptable data read from VBIOS */
- fiji_set_private_data_based_on_pptable(hwmgr);
-
- /* ULV Support */
- data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
-
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
- if (!result) {
- data->uvd_enabled = false;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- data->vddc_phase_shed_control = false;
- }
-
- stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StayInBootState);
-
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
-
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- FIJI_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
- if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucFanControlMode) {
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
- table_info->cac_dtp_table->usOperatingTempMinLimit;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
- table_info->cac_dtp_table->usOperatingTempMaxLimit;
- hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
- table_info->cac_dtp_table->usOperatingTempStep;
- hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
- table_info->cac_dtp_table->usTargetOperatingTemp;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
- }
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- fiji_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM_2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
- data->pg_acp_init = true;
-
- return 0;
-}
-
-static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = fiji_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = fiji_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = fiji_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = tonga_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = fiji_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-*/
-static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data =
- (const struct fiji_hwmgr *)(hwmgr->backend);
-
- return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
-static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
- struct pp_atomctrl_voltage_table *vol_table)
-{
- uint32_t i, j;
- uint16_t vvalue;
- bool found = false;
- struct pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "Voltage Table empty.", return -EINVAL);
- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
- GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- table->mask_low = vol_table->mask_low;
- table->phase_delay = vol_table->phase_delay;
-
- for (i = 0; i < vol_table->count; i++) {
- vvalue = vol_table->entries[i].value;
- found = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- vol_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
- kfree(table);
-
- return 0;
-}
-
-static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].mvdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim MVDD table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].vddci;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- int i = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
- "Voltage Lookup Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
-
- vol_table->count = lookup_table->count;
-
- for (i = 0; i < vol_table->count; i++) {
- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/* ---- Voltage Tables ----
- * If the voltage table would be bigger than
- * what will fit into the state table on
- * the SMC keep only the higher entries.
- */
-static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
- uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
-{
- unsigned int i, diff;
-
- if (vol_table->count <= max_vol_steps)
- return;
-
- diff = vol_table->count - max_vol_steps;
-
- for (i = 0; i < max_vol_steps; i++)
- vol_table->entries[i] = vol_table->entries[i + diff];
-
- vol_table->count = max_vol_steps;
-
- return;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- int result;
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
- &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
- return result;);
- }
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
- &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- result = fiji_get_svi2_vddci_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
- return result);
- }
-
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- result = fiji_get_svi2_vdd_voltage_table(hwmgr,
- table_info->vddc_lookup_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.",
- return result);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
-
- return 0;
-}
-
-static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_static_screen_threshold_parameters(
- struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t displayGap =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, DISPLAY_GAP_IGNORE);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, displayGap);
-
- return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
- /* Reset voting clients before disabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
-
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result)
- data->dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->mc_reg_table_start = tmp;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arb_src, uint32_t arb_dest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arb_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_dest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
- return 0;
-}
-
-/**
-* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return if success then 0;
-*/
-static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return fiji_copy_and_switch_arb_sets(hwmgr,
- MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
- uint32_t tmp;
-
- tmp = (cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
- 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return fiji_copy_and_switch_arb_sets(hwmgr,
- tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table *dpm_table, uint32_t count)
-{
- int i;
- PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
- "Fatal error, can not set up single DPM table entries "
- "to exceed max number!",);
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
- dpm_table->dpm_levels[i].enabled = false;
-
- return 0;
-}
-
-static void fiji_setup_pcie_table_entry(
- struct fiji_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint32_t i, max_entry;
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels), "No pcie performance levels!",
- return -EINVAL);
-
- if (data->use_pcie_performance_levels &&
- !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels &&
- data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
-
- if (pcie_table != NULL) {
- /* max_entry is used to make sure we reserve one PCIE level
- * for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries,
- * then ignore the last entry.*/
- max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU73_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < max_entry; i++) {
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap,
- pcie_table->entries[i].lane_width));
- }
- data->dpm_table.pcie_speed_table.count = max_entry - 1;
- } else {
- /* Hardcode Pcie Table */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
- "MCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- /* clear the state table to reset everything to default */
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
-
- /* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.sclk_table.count = 0;
- for (i = 0; i < dep_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count - 1].value !=
- dep_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].value =
- dep_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.sclk_table.count++;
- }
- }
-
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i=0; i<dep_mclk_table->count; i++) {
- if ( i==0 || data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count - 1].value !=
- dep_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].value =
- dep_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels */
- fiji_setup_default_pcie_table(hwmgr);
-
- /* save a copy of the default DPM table */
- memcpy(&(data->golden_dpm_table), &(data->dpm_table),
- sizeof(struct fiji_dpm_table));
-
- return 0;
-}
-
-/**
- * @brief PhwFiji_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param lookup_table - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t fiji_get_voltage_index(
- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
- uint8_t count = (uint8_t) (lookup_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != lookup_table),
- "Lookup Table empty.", return 0);
- PP_ASSERT_WITH_CODE((0 != count),
- "Lookup Table empty.", return 0);
-
- for (i = 0; i < lookup_table->count; i++) {
- /* find first voltage equal or bigger than requested */
- if (lookup_table->entries[i].us_vdd >= voltage)
- return i;
- }
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for( count = 0; count<lookup_table->count; count++) {
- index = fiji_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_low *
- VOLTAGE_SCALE)) / 25);
- table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_high *
- VOLTAGE_SCALE)) / 25);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
-
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int32_t fiji_get_dpm_level_enable_mask_value(
- struct fiji_single_dpm_table* dpm_table)
-{
- int32_t i;
- int32_t mask = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask = mask << 1;
- if (dpm_table->dpm_levels[i - 1].enabled)
- mask |= 0x1;
- else
- mask &= 0xFFFFFFFE;
- }
- return mask;
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
-{
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vddci_table =
- &(data->vddci_voltage_table);
-
- for (i = 0; i < vddci_table->count; i++) {
- if (vddci_table->entries[i].value >= vddci)
- return vddci_table->entries[i].value;
- }
-
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
-}
-
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
- uint32_t clock_insr)
-{
- uint8_t i;
- uint32_t temp;
- uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
-
- PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
- for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- temp = clock >> i;
-
- if (temp >= min || i == 0)
- break;
- }
- return i;
-}
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry)? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0 ))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
- hightest_pcie_level_enabled?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for(i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000) return 0x0;
- if (mem_clock <= 15000) return 0x1;
- if (mem_clock <= 20000) return 0x2;
- if (mem_clock <= 25000) return 0x3;
- if (mem_clock <= 30000) return 0x4;
- if (mem_clock <= 35000) return 0x5;
- if (mem_clock <= 40000) return 0x6;
- if (mem_clock <= 45000) return 0x7;
- if (mem_clock <= 50000) return 0x8;
- if (mem_clock <= 55000) return 0x9;
- if (mem_clock <= 60000) return 0xa;
- if (mem_clock <= 65000) return 0xb;
- if (mem_clock <= 70000) return 0xc;
- if (mem_clock <= 75000) return 0xd;
- if (mem_clock <= 80000) return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if ((data->mclk_stutter_mode_threshold) &&
- (clock <= data->mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",);
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",);
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for(count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- data->sram_end);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
- uint32_t value, uint32_t *boot_level)
-{
- int result = -EINVAL;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
- GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",);
- }
- /* Set Vddci Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct fiji_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- result = fiji_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result);
-
- if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- data->sram_end);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return fiji_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
- return 0;
-}
-
-static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- SCLK_PWRMGT_OFF, 0);
- return 0;
-}
-
-static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
- return 0;
-}
-
-static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
- return 0;
-}
-
-static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -1);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t val, val0, val2;
- uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
-
- /* enable SCLK dpm */
- if(!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
-
- /* enable MCLK dpm */
- if(0 == data->mclk_dpm_key_disabled) {
- cpl_threshold = 0;
- mc_threshold = 0;
-
- /* Read per MCD tile (0 - 7) */
- for (i = 0; i < 8; i++) {
- PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
- val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
- if (0xf0000000 != val) {
- /* count number of MCQ that has channel(s) enabled */
- cpl_threshold++;
- /* only harvest 3 or full 4 supported */
- mc_threshold = val ? 3 : 4;
- }
- }
- PP_ASSERT_WITH_CODE(0 != cpl_threshold,
- "Number of MCQ is zero!", return -EINVAL;);
-
- mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
- LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
- LCAC_MC0_CNTL__MC0_ENABLE_MASK;
- cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
- LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
- LCAC_CPL_CNTL__CPL_ENABLE_MASK;
- cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- udelay(5);
-
- mc_threshold = mc_threshold |
- (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
- cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- /* Program CAC_EN per MCD (0-7) Tile */
- val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
- val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
-
- for (i = 0; i < 8; i++) {
- /* Enable MCD i Tile read & write */
- val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
- (1 << i));
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
- /* Enbale CAC_ON MCD i Tile */
- val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
- val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
- cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
- }
- /* Set MC_CONFIG_MCD back to its default setting val0 */
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
- }
- return 0;
-}
-
-static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /*enable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 1);
- /* prepare for PCIE DPM */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + offsetof(SMU73_SoftRegisters,
- VoltageChangeTimeout), 0x1000);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- /* enable PCIE dpm */
- if(!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -1);
-
- /* disable MCLK dpm */
- if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
- "Failed to force MCLK DPM0!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
- "Failed to disable pcie DPM during DPM Stop Function!",
- return -1);
- }
-
- if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Disable) == 0),
- "Failed to disable voltage DPM during DPM Stop Function!",
- return -1);
-
- return 0;
-}
-
-static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
- uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->active_auto_throttle_sources & (1 << source)) {
- data->active_auto_throttle_sources &= ~(1 << source);
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
- PP_ASSERT_WITH_CODE(result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "Failed to enable voltage control!",
- result = tmp_result);
- }
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
- result = tmp_result);
- }
-
- tmp_result = fiji_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
- tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!",
- result = tmp_result);
-
- tmp_result = fiji_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = fiji_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = fiji_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
-
- tmp_result = fiji_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = fiji_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", result = tmp_result);
-
- tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
- tmp_result = tonga_notify_smc_display_change(hwmgr, false);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify no display!", result = tmp_result);
-
- tmp_result = fiji_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- tmp_result = fiji_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ULV!", result = tmp_result);
-
- tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- tmp_result = fiji_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", result = tmp_result);
-
- tmp_result = fiji_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", result = tmp_result);
-
- tmp_result = fiji_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
- tmp_result = fiji_disable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable power containment!", result = tmp_result);
-
- tmp_result = fiji_disable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable SMC CAC!", result = tmp_result);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
- tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable thermal auto throttle!", result = tmp_result);
-
- tmp_result = fiji_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_disable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable ULV!", result = tmp_result);
-
- tmp_result = fiji_clear_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to clear voting clients!", result = tmp_result);
-
- tmp_result = fiji_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to reset to default!", result = tmp_result);
-
- tmp_result = fiji_force_switch_to_arbf0(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to force to switch arbf0!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level, tmp;
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
- return 0;
-}
-
-static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- phm_apply_dal_min_voltage_request(hwmgr);
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- }
- return 0;
-}
-
-static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!fiji_is_dpm_running(hwmgr))
- return -EINVAL;
-
- if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- }
-
- return fiji_upload_dpmlevel_enable_mask(hwmgr);
-}
-
-static uint32_t fiji_get_lowest_enabled_level(
- struct pp_hwmgr *hwmgr, uint32_t mask)
-{
- uint32_t level = 0;
-
- while(0 == (mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data =
- (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level;
-
- if (!data->sclk_dpm_key_disabled)
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
-
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
-
- return 0;
-
-}
-static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = fiji_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = fiji_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = fiji_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
-
- return ret;
-}
-
-static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct fiji_power_state);
-}
-
-static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *fiji_power_state =
- (struct fiji_power_state *)(&(power_state->hardware));
- struct fiji_performance_level *performance_level;
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexLow].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexHigh].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *state)
-{
- int result;
- struct fiji_power_state *ps;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- state->hardware.magic = PHM_VIslands_Magic;
-
- ps = (struct fiji_power_state *)(&state->hardware);
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
- fiji_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
-
- if (state->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (state->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_performance.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *request_ps,
- const struct pp_power_state *current_ps)
-{
- struct fiji_power_state *fiji_ps =
- cast_phw_fiji_power_state(&request_ps->hardware);
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery ==
- request_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",);
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- /* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
- fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
- fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
- fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = table_info->vdd_dep_on_sclk->count - 1;
- count >= 0; count--) {
- if (stable_pstate_sclk >=
- table_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk =
- table_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- fiji_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- fiji_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = fiji_ps->performance_levels[0].engine_clock;
- mclk = fiji_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
- max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
- max_limits->mclk : minimum_clocks.memoryClock;
-
- fiji_ps->performance_levels[0].engine_clock = sclk;
- fiji_ps->performance_levels[0].memory_clock = mclk;
-
- fiji_ps->performance_levels[1].engine_clock =
- (fiji_ps->performance_levels[1].engine_clock >=
- fiji_ps->performance_levels[0].engine_clock) ?
- fiji_ps->performance_levels[1].engine_clock :
- fiji_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < fiji_ps->performance_levels[1].memory_clock)
- mclk = fiji_ps->performance_levels[1].memory_clock;
-
- fiji_ps->performance_levels[0].memory_clock = mclk;
- fiji_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (fiji_ps->performance_levels[1].memory_clock <
- fiji_ps->performance_levels[0].memory_clock)
- fiji_ps->performance_levels[1].memory_clock =
- fiji_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- if(data->display_timing.min_clock_in_sr !=
- hwmgr->display_config.min_core_set_clock_in_sr)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- sclk = fiji_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
- dpm_table->pcie_speed_table.dpm_levels
- [dpm_table->pcie_speed_table.count - 1].value :
- dpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int fiji_request_link_speed_change_before_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->sclk_table.count < 2 ?
- 0 : dpm_table->sclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk - golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) * 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value - sclk) *
- 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->mclk_table.count < 2 ?
- 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table * dpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit) ||
- (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
- return 0;
-}
-
-static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- fiji_ps->performance_levels[0].engine_clock,
- fiji_ps->performance_levels[high_limit_count].engine_clock);
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- fiji_ps->performance_levels[0].memory_clock,
- fiji_ps->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int fiji_generate_dpm_level_enable_mask(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
-
- result = fiji_trim_dpm_states(hwmgr, fiji_ps);
- if (result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->last_mclk_dpm_enable_mask =
- data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-
- if (data->uvd_enabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
- data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- }
-
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
-}
-
-int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_ACPDPM_Enable :
- PPSMC_MSG_ACPDPM_Disable);
-}
-
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (fiji_nps->vce_clks.evclk >0 &&
- (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
- data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << data->smc_state_table.VceBootLevel);
-
- fiji_enable_disable_vce_dpm(hwmgr, true);
- } else if (fiji_nps->vce_clks.evclk == 0 &&
- fiji_cps != NULL &&
- fiji_cps->vce_clks.evclk > 0)
- fiji_enable_disable_vce_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.SamuBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
- }
-
- return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.AcpBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFF00FF;
- mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
- }
-
- return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
-}
-
-static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end);
- }
-
- return result;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-/* Look up the voltaged based on DAL's requested level.
- * and then send the requested VDDC voltage to SMC
- */
-static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
- return;
-}
-
-int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Apply minimum voltage based on DAL's request level */
- fiji_apply_dal_minimum_voltage_request(hwmgr);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] "
- "Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Sclk Dpm enable Mask failed", return -1);
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ]"
- " Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Mclk Dpm enable Mask failed", return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_notify_link_speed_change_after_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
- uint8_t request;
-
- if (data->pspp_notify_required) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if(request == PCIE_PERF_REQ_GEN1 &&
- fiji_get_current_pcie_speed(hwmgr) > 0)
- return 0;
-
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- return 0;
-}
-
-static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
- const void *input)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to find DPM states clocks in DPM table!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to request link speed change before state change!",
- result = tmp_result);
- }
-
- tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate and upload SCLK MCLK DPM levels!",
- result = tmp_result);
-
- tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to generate DPM level enabled mask!",
- result = tmp_result);
-
- tmp_result = fiji_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update VCE DPM!",
- result = tmp_result);
-
- tmp_result = fiji_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update SCLK threshold!",
- result = tmp_result);
-
- tmp_result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program memory timing parameters!",
- result = tmp_result);
-
- tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
-
- tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload DPM level enabled mask!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify link speed change after state change!",
- result = tmp_result);
- }
-
- return result;
-}
-
-static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].engine_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].engine_clock;
-}
-
-static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].memory_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].memory_clock;
-}
-
-static void fiji_print_current_perforce_level(
- struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent = 0;
- uint32_t offset;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
- mclk / 100, sclk / 100);
-
- offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, (num_active_displays > 0)?
- DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if (refresh_rate == 0)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, VBlankTimeout),
- (frame_time_in_us - pre_vbi_time_in_us));
-
- if (num_active_displays == 1)
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
- return fiji_program_display_gap(hwmgr);
-}
-
-static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_rpm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int fiji_dpm_set_interrupt_state(void *private_data,
- unsigned src_id, unsigned type,
- int enabled)
-{
- uint32_t cg_thermal_int;
- struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- switch (type) {
- case AMD_THERMAL_IRQ_LOW_TO_HIGH:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
-
- case AMD_THERMAL_IRQ_HIGH_TO_LOW:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- int result;
- const struct pp_interrupt_registration_info *info =
- (const struct pp_interrupt_registration_info *)
- thermal_interrupt_info;
-
- if (info == NULL)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
- fiji_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
-
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
-
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = fiji_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
- const struct fiji_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
- const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
- int i;
-
- if (equal == NULL || psa == NULL || psb == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
- *equal &= (psa->acp_clk == psb->acp_clk);
-
- return 0;
-}
-
-bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0,0,NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
- is_update_required = true;
- }
-
- return is_update_required;
-}
-
-static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
- .backend_init = &fiji_hwmgr_backend_init,
- .backend_fini = &fiji_hwmgr_backend_fini,
- .asic_setup = &fiji_setup_asic_task,
- .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
- .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
- .force_dpm_level = &fiji_dpm_force_dpm_level,
- .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
- .get_power_state_size = &fiji_get_power_state_size,
- .get_pp_table_entry = &fiji_get_pp_table_entry,
- .patch_boot_state = &fiji_patch_boot_state,
- .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
- .power_state_set = &fiji_set_power_state_tasks,
- .get_sclk = &fiji_dpm_get_sclk,
- .get_mclk = &fiji_dpm_get_mclk,
- .print_current_perforce_level = &fiji_print_current_perforce_level,
- .powergate_uvd = &fiji_phm_powergate_uvd,
- .powergate_vce = &fiji_phm_powergate_vce,
- .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
- .notify_smc_display_config_after_ps_adjustment =
- &tonga_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = &fiji_display_configuration_changed_task,
- .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
- .get_temperature = fiji_thermal_get_temperature,
- .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
- .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
- .set_fan_control_mode = fiji_set_fan_control_mode,
- .get_fan_control_mode = fiji_get_fan_control_mode,
- .check_states_equal = fiji_check_states_equal,
- .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .force_clock_level = fiji_force_clock_level,
- .print_clock_levels = fiji_print_clock_levels,
- .get_sclk_od = fiji_get_sclk_od,
- .set_sclk_od = fiji_set_sclk_od,
- .get_mclk_od = fiji_get_mclk_od,
- .set_mclk_od = fiji_set_mclk_od,
-};
-
-int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_fiji_thermal_initialize(hwmgr);
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a92c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_HWMGR_H_
-#define _FIJI_HWMGR_H_
-
-#include "hwmgr.h"
-#include "smu73.h"
-#include "smu73_discrete.h"
-#include "ppatomctrl.h"
-#include "fiji_ppsmc.h"
-#include "pp_endian.h"
-
-#define FIJI_MAX_HARDWARE_POWERLEVELS 2
-#define FIJI_AT_DFLT 30
-
-#define FIJI_VOLTAGE_CONTROL_NONE 0x0
-#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-struct fiji_performance_level {
- uint32_t memory_clock;
- uint32_t engine_clock;
- uint16_t pcie_gen;
- uint16_t pcie_lane;
-};
-
-struct fiji_uvd_clocks {
- uint32_t vclk;
- uint32_t dclk;
-};
-
-struct fiji_vce_clocks {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct fiji_power_state {
- uint32_t magic;
- struct fiji_uvd_clocks uvd_clks;
- struct fiji_vce_clocks vce_clks;
- uint32_t sam_clk;
- uint32_t acp_clk;
- uint16_t performance_level_count;
- bool dc_compatible;
- uint32_t sclk_threshold;
- struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct fiji_dpm_level {
- bool enabled;
- uint32_t value;
- uint32_t param1;
-};
-
-#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define FIJI_MINIMUM_ENGINE_CLOCK 2500
-
-struct fiji_single_dpm_table {
- uint32_t count;
- struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct fiji_dpm_table {
- struct fiji_single_dpm_table sclk_table;
- struct fiji_single_dpm_table mclk_table;
- struct fiji_single_dpm_table pcie_speed_table;
- struct fiji_single_dpm_table vddc_table;
- struct fiji_single_dpm_table vddci_table;
- struct fiji_single_dpm_table mvdd_table;
-};
-
-struct fiji_clock_registers {
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t vDLL_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_SS1;
- uint32_t vMPLL_SS2;
-};
-
-struct fiji_voltage_smio_registers {
- uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define FIJI_MAX_LEAKAGE_COUNT 8
-struct fiji_leakage_voltage {
- uint16_t count;
- uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
-};
-
-struct fiji_vbios_boot_state {
- uint16_t mvdd_bootup_value;
- uint16_t vddc_bootup_value;
- uint16_t vddci_bootup_value;
- uint32_t sclk_bootup_value;
- uint32_t mclk_bootup_value;
- uint16_t pcie_gen_bootup_value;
- uint16_t pcie_lane_bootup_value;
-};
-
-struct fiji_bacos {
- uint32_t best_match;
- uint32_t baco_flags;
- struct fiji_performance_level performance_level;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct fiji_ulv_parm {
- bool ulv_supported;
- uint32_t cg_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct fiji_performance_level ulv_power_level;
-};
-
-struct fiji_display_timing {
- uint32_t min_clock_in_sr;
- uint32_t num_existing_displays;
-};
-
-struct fiji_dpmlevel_enable_mask {
- uint32_t uvd_dpm_enable_mask;
- uint32_t vce_dpm_enable_mask;
- uint32_t acp_dpm_enable_mask;
- uint32_t samu_dpm_enable_mask;
- uint32_t sclk_dpm_enable_mask;
- uint32_t mclk_dpm_enable_mask;
- uint32_t pcie_dpm_enable_mask;
-};
-
-struct fiji_pcie_perf_range {
- uint16_t max;
- uint16_t min;
-};
-
-struct fiji_hwmgr {
- struct fiji_dpm_table dpm_table;
- struct fiji_dpm_table golden_dpm_table;
-
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
- uint32_t static_screen_threshold_unit;
- uint32_t static_screen_threshold;
- uint32_t voltage_control;
- uint32_t vddc_vddci_delta;
-
- uint32_t active_auto_throttle_sources;
-
- struct fiji_clock_registers clock_registers;
- struct fiji_voltage_smio_registers voltage_smio_registers;
-
- bool is_memory_gddr5;
- uint16_t acpi_vddc;
- bool pspp_notify_required;
- uint16_t force_pcie_gen;
- uint16_t acpi_pcie_gen;
- uint32_t pcie_gen_cap;
- uint32_t pcie_lane_cap;
- uint32_t pcie_spc_cap;
- struct fiji_leakage_voltage vddc_leakage;
- struct fiji_leakage_voltage Vddci_leakage;
-
- uint32_t mvdd_control;
- uint32_t vddc_mask_low;
- uint32_t mvdd_mask_low;
- uint16_t max_vddc_in_pptable;
- uint16_t min_vddc_in_pptable;
- uint16_t max_vddci_in_pptable;
- uint16_t min_vddci_in_pptable;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edcwr_enable_threshold;
- bool is_uvd_enabled;
- struct fiji_vbios_boot_state vbios_boot_state;
-
- bool battery_state;
- bool is_tlu_enabled;
-
- /* ---- SMC SRAM Address of firmware header tables ---- */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- uint32_t mc_reg_table_start;
- uint32_t fan_table_start;
- uint32_t arb_table_start;
- struct SMU73_Discrete_DpmTable smc_state_table;
- struct SMU73_Discrete_Ulv ulv_setting;
-
- /* ---- Stuff originally coming from Evergreen ---- */
- uint32_t vddci_control;
- struct pp_atomctrl_voltage_table vddc_voltage_table;
- struct pp_atomctrl_voltage_table vddci_voltage_table;
- struct pp_atomctrl_voltage_table mvdd_voltage_table;
-
- uint32_t mgcg_cgtt_local2;
- uint32_t mgcg_cgtt_local3;
- uint32_t gpio_debug;
- uint32_t mc_micro_code_feature;
- uint32_t highest_mclk;
- uint16_t acpi_vddci;
- uint8_t mvdd_high_index;
- uint8_t mvdd_low_index;
- bool dll_default_on;
- bool performance_request_registered;
-
- /* ---- Low Power Features ---- */
- struct fiji_bacos bacos;
- struct fiji_ulv_parm ulv;
-
- /* ---- CAC Stuff ---- */
- uint32_t cac_table_start;
- bool cac_configuration_required;
- bool driver_calculate_cac_leakage;
- bool cac_enabled;
-
- /* ---- DPM2 Parameters ---- */
- uint32_t power_containment_features;
- bool enable_dte_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool disable_uvd_power_tune_feature;
- const struct fiji_pt_defaults *power_tune_defaults;
- struct SMU73_Discrete_PmFuses power_tune_table;
- uint32_t dte_tj_offset;
- uint32_t fast_watermark_threshold;
-
- /* ---- Phase Shedding ---- */
- bool vddc_phase_shed_control;
-
- /* ---- DI/DT ---- */
- struct fiji_display_timing display_timing;
-
- /* ---- Thermal Temperature Setting ---- */
- struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
- uint32_t need_update_smu7_dpm_table;
- uint32_t sclk_dpm_key_disabled;
- uint32_t mclk_dpm_key_disabled;
- uint32_t pcie_dpm_key_disabled;
- uint32_t min_engine_clocks;
- struct fiji_pcie_perf_range pcie_gen_performance;
- struct fiji_pcie_perf_range pcie_lane_performance;
- struct fiji_pcie_perf_range pcie_gen_power_saving;
- struct fiji_pcie_perf_range pcie_lane_power_saving;
- bool use_pcie_performance_levels;
- bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
- uint32_t mclk_activity_target;
- uint32_t mclk_dpm0_activity_target;
- uint32_t low_sclk_interrupt_threshold;
- uint32_t last_mclk_dpm_enable_mask;
- bool uvd_enabled;
-
- /* ---- Power Gating States ---- */
- bool uvd_power_gated;
- bool vce_power_gated;
- bool samu_power_gated;
- bool acp_power_gated;
- bool pg_acp_init;
- bool frtc_enabled;
- bool frtc_status_changed;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
-
-enum Fiji_I2CLineID {
- Fiji_I2CLineID_DDC1 = 0x90,
- Fiji_I2CLineID_DDC2 = 0x91,
- Fiji_I2CLineID_DDC3 = 0x92,
- Fiji_I2CLineID_DDC4 = 0x93,
- Fiji_I2CLineID_DDC5 = 0x94,
- Fiji_I2CLineID_DDC6 = 0x95,
- Fiji_I2CLineID_SCLSDA = 0x96,
- Fiji_I2CLineID_DDCVGA = 0x97
-};
-
-#define Fiji_I2C_DDC1DATA 0
-#define Fiji_I2C_DDC1CLK 1
-#define Fiji_I2C_DDC2DATA 2
-#define Fiji_I2C_DDC2CLK 3
-#define Fiji_I2C_DDC3DATA 4
-#define Fiji_I2C_DDC3CLK 5
-#define Fiji_I2C_SDA 40
-#define Fiji_I2C_SCL 41
-#define Fiji_I2C_DDC4DATA 65
-#define Fiji_I2C_DDC4CLK 66
-#define Fiji_I2C_DDC5DATA 0x48
-#define Fiji_I2C_DDC5CLK 0x49
-#define Fiji_I2C_DDC6DATA 0x4a
-#define Fiji_I2C_DDC6CLK 0x4b
-#define Fiji_I2C_DDCVGADATA 0x4c
-#define Fiji_I2C_DDCVGACLK 0x4d
-
-#define FIJI_UNUSED_GPIO_PIN 0x7F
-
-extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index 44658451a8d2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "fiji_hwmgr.h"
-#include "fiji_powertune.h"
-#include "fiji_smumgr.h"
-#include "smu73_discrete.h"
-#include "pp_debug.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-
-const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t tmp = 0;
-
- if(table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- fiji_hwmgr->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
- /* Assume disabled */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SQRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- fiji_hwmgr->dte_tj_offset = tmp;
-
- if (!tmp) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- fiji_hwmgr->fast_watermark_threshold = 100;
-
- if (hwmgr->powercontainment_enabled) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
- }
- }
-}
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
-{
- switch (line) {
- case Fiji_I2CLineID_DDC1 :
- *scl = Fiji_I2C_DDC1CLK;
- *sda = Fiji_I2C_DDC1DATA;
- break;
- case Fiji_I2CLineID_DDC2 :
- *scl = Fiji_I2C_DDC2CLK;
- *sda = Fiji_I2C_DDC2DATA;
- break;
- case Fiji_I2CLineID_DDC3 :
- *scl = Fiji_I2C_DDC3CLK;
- *sda = Fiji_I2C_DDC3DATA;
- break;
- case Fiji_I2CLineID_DDC4 :
- *scl = Fiji_I2C_DDC4CLK;
- *sda = Fiji_I2C_DDC4DATA;
- break;
- case Fiji_I2CLineID_DDC5 :
- *scl = Fiji_I2C_DDC5CLK;
- *sda = Fiji_I2C_DDC5DATA;
- break;
- case Fiji_I2CLineID_DDC6 :
- *scl = Fiji_I2C_DDC6CLK;
- *sda = Fiji_I2C_DDC6DATA;
- break;
- case Fiji_I2CLineID_SCLSDA :
- *scl = Fiji_I2C_SCL;
- *sda = Fiji_I2C_SDA;
- break;
- case Fiji_I2CLineID_DDCVGA :
- *scl = Fiji_I2C_DDCVGACLK;
- *sda = Fiji_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table=
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",);
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- data->power_tune_table.SviLoadLineTrimVddC = 3;
- data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- uint32_t temp;
-
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if( (hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity )
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- /* int i, min, max;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
- uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
-
- min = max = pHiVID[0];
- for (i = 0; i < 8; i++) {
- if (0 != pHiVID[i]) {
- if (min > pHiVID[i])
- min = pHiVID[i];
- if (max < pHiVID[i])
- max = pHiVID[i];
- }
-
- if (0 != pLoVID[i]) {
- if (min > pLoVID[i])
- min = pLoVID[i];
- if (max < pLoVID[i])
- max = pLoVID[i];
- }
- }
-
- PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
- data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
- data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
-*/
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if(fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
- int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable CAC in SMC.", result = -1);
-
- data->cac_enabled = (0 == smc_result) ? true : false;
- }
- return result;
-}
-
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable CAC in SMC.", result = -1);
-
- data->cac_enabled = false;
- }
- return result;
-}
-
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if(data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PkgPwrSetLimit, n);
- return 0;
-}
-
-static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
- return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int smc_result;
- int result = 0;
-
- data->power_containment_features = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (data->enable_dte_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableDTE));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable DTE in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
- }
-
- if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable TDCLimit in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_TDCLimit;
- }
-
- if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable PkgPwrTracking in SMC.", result = -1;);
- if (0 == smc_result) {
- struct phm_cac_tdp_table *cac_table =
- table_info->cac_dtp_table;
- uint32_t default_limit =
- (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
- if (fiji_set_power_limit(hwmgr, default_limit))
- printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
- }
- }
- }
- return result;
-}
-
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
- int smc_result;
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable TDCLimit in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable DTE in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable PkgPwrTracking in SMC.",
- result = smc_result);
- }
- data->power_containment_features = 0;
- }
-
- return result;
-}
-
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
- int adjust_percent, target_tdp;
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- /* adjustment percentage has already been validated */
- adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
- hwmgr->platform_descriptor.TDPAdjustment :
- (-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
- result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec772421733..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_POWERTUNE_H
-#define FIJI_POWERTUNE_H
-
-enum fiji_pt_config_reg_type {
- FIJI_CONFIGREG_MMR = 0,
- FIJI_CONFIGREG_SMC_IND,
- FIJI_CONFIGREG_DIDT_IND,
- FIJI_CONFIGREG_CACHE,
- FIJI_CONFIGREG_MAX
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-
-struct fiji_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum fiji_pt_config_reg_type type;
-};
-
-struct fiji_pt_defaults
-{
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
-
-#endif /* FIJI_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493b8574..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_THERMAL_H
-#define FIJI_THERMAL_H
-
-#include "hwmgr.h"
-
-#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
-#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
-
-#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
-#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
-#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 789f98ad2615..0723758ed065 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -24,8 +24,6 @@
#include "hwmgr.h"
#include "hardwaremanager.h"
#include "power_state.h"
-#include "pp_acpi.h"
-#include "amd_acpi.h"
#include "pp_debug.h"
#define PHM_FUNC_CHECK(hw) \
@@ -34,38 +32,6 @@
return -EINVAL; \
} while (0)
-void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
-{
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
-
- if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
- acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
-}
-
bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
{
return hwmgr->block_hw_access;
@@ -306,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
- return -EINVAL;
+ return false;
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 27e07624ac28..e03dcb6ea9c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -32,13 +32,22 @@
#include "pp_debug.h"
#include "ppatomctrl.h"
#include "ppsmc.h"
-
-#define VOLTAGE_SCALE 4
+#include "pp_acpi.h"
+#include "amd_acpi.h"
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
+static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -56,10 +65,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
hwmgr->device = pp_init->device;
hwmgr->chip_family = pp_init->chip_family;
hwmgr->chip_id = pp_init->chip_id;
- hwmgr->hw_revision = pp_init->rev_id;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
- hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
+ hwmgr->pp_table_version = PP_TABLE_V1;
+
+ hwmgr_init_default_caps(hwmgr);
+ hwmgr_set_user_specify_caps(hwmgr);
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CZ:
@@ -67,26 +78,38 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
+ case CHIP_TOPAZ:
+ topaz_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
+ break;
case CHIP_TONGA:
- tonga_hwmgr_init(hwmgr);
+ tonga_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK);
break;
case CHIP_FIJI:
- fiji_hwmgr_init(hwmgr);
+ fiji_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- polaris10_hwmgr_init(hwmgr);
+ polaris_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
default:
return -EINVAL;
}
+ smu7_hwmgr_init(hwmgr);
break;
default:
return -EINVAL;
}
- phm_init_dynamic_caps(hwmgr);
-
return 0;
}
@@ -105,6 +128,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->set_temperature_range.function_list);
kfree(hwmgr->ps);
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
kfree(hwmgr);
return 0;
}
@@ -129,10 +154,17 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
sizeof(struct pp_power_state);
hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
-
if (hwmgr->ps == NULL)
return -ENOMEM;
+ hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->request_ps == NULL)
+ return -ENOMEM;
+
+ hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->current_ps == NULL)
+ return -ENOMEM;
+
state = hwmgr->ps;
for (i = 0; i < table_entries; i++) {
@@ -140,7 +172,8 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
if (state->classification.flags & PP_StateClassificationFlag_Boot) {
hwmgr->boot_ps = state;
- hwmgr->current_ps = hwmgr->request_ps = state;
+ memcpy(hwmgr->current_ps, state, size);
+ memcpy(hwmgr->request_ps, state, size);
}
state->id = i + 1; /* assigned unique num for every power state id */
@@ -150,6 +183,7 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
state = (struct pp_power_state *)((unsigned long)state + size);
}
+
return 0;
}
@@ -182,30 +216,6 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
return 0;
}
-int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index, uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
- return -EINVAL;
- }
-
- for (i = 0; i < hwmgr->usec_timeout; i++) {
- cur_value = cgs_read_register(hwmgr->device, index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == hwmgr->usec_timeout)
- return -1;
- return 0;
-}
-
/**
* Returns once the part of the register indicated by the mask has
@@ -227,21 +237,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
}
-void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (hwmgr == NULL || hwmgr->device == NULL) {
- printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!");
- return;
- }
- cgs_write_register(hwmgr->device, indirect_port, index);
- phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
- value, mask);
-}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
@@ -403,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
- PP_ASSERT_WITH_CODE(count <= max,
- "Fatal error, can not set up single DPM table entries to exceed max number!",
- );
+ dpm_table->count = count > max ? max : count;
- dpm_table->count = count;
- for (i = 0; i < max; i++)
+ for (i = 0; i < dpm_table->count; i++)
dpm_table->dpm_level[i].enabled = false;
return 0;
@@ -462,6 +455,27 @@ uint8_t phm_get_voltage_index(
return i - 1;
}
+uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage)
+{
+ uint8_t count = (uint8_t) (voltage_table->count);
+ uint8_t i = 0;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Table empty.", return 0;);
+ PP_ASSERT_WITH_CODE((0 != count),
+ "Voltage Table empty.", return 0;);
+
+ for (i = 0; i < count; i++) {
+ /* find first voltage bigger than requested */
+ if (voltage_table->entries[i].value >= voltage)
+ return i;
+ }
+
+ /* voltage is bigger than max voltage in the table */
+ return i - 1;
+}
+
uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
{
uint32_t i;
@@ -549,7 +563,8 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
table_clk_vlt->entries[2].v = 810;
table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
table_clk_vlt->entries[3].v = 900;
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
+ if (pptable_info != NULL)
+ pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
}
@@ -615,3 +630,188 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
printk(KERN_ERR "DAL requested level can not"
" found a available voltage in VDDC DPM Table \n");
}
+
+void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
+ if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
+ acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPatchPowerState);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPowerManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicUVDState);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ return;
+}
+
+int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
+{
+ if (amdgpu_sclk_deep_sleep_en)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+
+ if (amdgpu_powercontainment)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+
+ hwmgr->feature_mask = amdgpu_pp_feature_mask;
+
+ return 0;
+}
+
+int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage)
+{
+ uint32_t vol;
+ int ret = 0;
+
+ if (hwmgr->chip_id < CHIP_TONGA) {
+ ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
+ } else if (hwmgr->chip_id < CHIP_POLARIS10) {
+ ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
+ if (*voltage >= 2000 || *voltage == 0)
+ *voltage = 1150;
+ } else {
+ ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
+ *voltage = (uint16_t)(vol/100);
+ }
+ return ret;
+}
+
+int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ /* power tune caps Assume disabled */
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ if (hwmgr->chip_id == CHIP_POLARIS11)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport);
+ return 0;
+}
+
+int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ return 0;
+}
+
+int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ return 0;
+}
+
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
deleted file mode 100644
index f78ffd935cee..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef POLARIS10_DYN_DEFAULTS_H
-#define POLARIS10_DYN_DEFAULTS_H
-
-
-enum Polaris10dpm_TrendDetection {
- Polaris10Adpm_TrendDetection_AUTO,
- Polaris10Adpm_TrendDetection_UP,
- Polaris10Adpm_TrendDetection_DOWN
-};
-typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
-
-/* We need to fill in the default values */
-
-
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
-#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
-
-
-#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
-#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
-#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
-#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
-#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
-
-#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
-#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
-#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
-#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
deleted file mode 100644
index 769636a0c5b5..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ /dev/null
@@ -1,5290 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <asm/div64.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_dyn_defaults.h"
-#include "polaris10_smumgr.h"
-#include "pp_debug.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "tonga_pptable.h"
-#include "pppcielanes.h"
-#include "amd_pcie_helpers.h"
-#include "hardwaremanager.h"
-#include "tonga_processpptables.h"
-#include "cgs_common.h"
-#include "smu74.h"
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "polaris10_thermal.h"
-#include "polaris10_clockpowergating.h"
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0 0x05
-#define MC_CG_SEQ_DRAMCONF_S1 0x06
-#define MC_CG_SEQ_YCLK_SUSPEND 0x04
-#define MC_CG_SEQ_YCLK_RESUME 0x0a
-
-
-#define SMC_RAM_END 0x40000
-
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 200
-
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-
-#define MEM_LATENCY_HIGH 45
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-
-static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0,
- DPM_EVENT_SRC_EXTERNAL = 1,
- DPM_EVENT_SRC_DIGITAL = 2,
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
-};
-
-static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct polaris10_power_state *cast_phw_polaris10_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (struct polaris10_power_state *)hw_ps;
-}
-
-const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (const struct polaris10_power_state *)hw_ps;
-}
-
-static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
- cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
- hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
- return 0;
-}
-
-uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-/**
-* Enable voltage control
-*
-* @param pHwMgr the address of the powerplay hardware manager.
-* @return always PP_Result_OK
-*/
-int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
-{
- PP_ASSERT_WITH_CODE(
- (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
- "Failed to enable voltage DPM during DPM Start Function!",
- return 1;
- );
-
- return 0;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-*/
-static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct polaris10_hwmgr *data =
- (const struct polaris10_hwmgr *)(hwmgr->backend);
-
- return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- int result;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
- &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.",
- return result);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
- return result;);
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
- &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.",
- return result);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
- return result);
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
- table_info->vddc_lookup_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.",
- return result);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
- &(data->vddc_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
- &(data->vddci_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
- &(data->mvdd_voltage_table)));
-
- return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_program_static_screen_threshold_parameters(
- struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t display_gap =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL);
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, DISPLAY_GAP_IGNORE);
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
- /* Reset voting clients before disabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
-
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result)
- data->dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->mc_reg_table_start = tmp;
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arb_src, uint32_t arb_dest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arb_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_dest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
- return 0;
-}
-
-static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return polaris10_copy_and_switch_arb_sets(hwmgr,
- MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
- uint32_t tmp;
-
- tmp = (cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
- 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return polaris10_copy_and_switch_arb_sets(hwmgr,
- tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint32_t i, max_entry;
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels), "No pcie performance levels!",
- return -EINVAL);
-
- if (data->use_pcie_performance_levels &&
- !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels &&
- data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
- SMU74_MAX_LEVELS_LINK,
- MAX_REGULAR_DPM_NUMBER);
-
- if (pcie_table != NULL) {
- /* max_entry is used to make sure we reserve one PCIE level
- * for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries,
- * then ignore the last entry.*/
- max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU74_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < max_entry; i++) {
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap,
- pcie_table->entries[i].lane_width));
- }
- data->dpm_table.pcie_speed_table.count = max_entry - 1;
-
- /* Setup BIF_SCLK levels */
- for (i = 0; i < max_entry; i++)
- data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
- } else {
- /* Hardcode Pcie Table */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing."
- "This table is mandatory",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
- "MCLK dependency table has to have is missing."
- "This table is mandatory",
- return -EINVAL);
-
- /* clear the state table to reset everything to default */
- phm_reset_single_dpm_table(
- &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
- phm_reset_single_dpm_table(
- &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
-
-
- /* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.sclk_table.count = 0;
- for (i = 0; i < dep_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
- dep_sclk_table->entries[i].clk) {
-
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- dep_sclk_table->entries[i].clk;
-
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.sclk_table.count++;
- }
- }
-
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i = 0; i < dep_mclk_table->count; i++) {
- if (i == 0 || data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count - 1].value !=
- dep_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
- dep_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels */
- polaris10_setup_default_pcie_table(hwmgr);
-
- /* save a copy of the default DPM table */
- memcpy(&(data->golden_dpm_table), &(data->dpm_table),
- sizeof(struct polaris10_dpm_table));
-
- return 0;
-}
-
-uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t count, level;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- count = data->mvdd_voltage_table.count;
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; level++) {
- table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[level].Smio =
- (uint8_t) level;
- table->Smio[level] |=
- data->mvdd_voltage_table.entries[level].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
- }
-
- return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count, level;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- count = data->vddci_voltage_table.count;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; ++level) {
- table->SmioTable1.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
- table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
- table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- polaris10_populate_smc_vddci_table(hwmgr, table);
- polaris10_populate_smc_mvdd_table(hwmgr, table);
- polaris10_populate_cac_table(hwmgr, table);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_Ulv *state)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
-{
- uint32_t reference_clock, tmp;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
- if (tmp)
- return TCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- reference_clock = mode_info.ref_clock;
-
- tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
- if (0 != tmp)
- return reference_clock / 4;
-
- return reference_clock;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- struct pp_atomctrl_clock_dividers_ai dividers;
-
- uint32_t ref_clock;
- uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
- uint8_t i;
- int result;
- uint64_t temp;
-
- sclk_setting->SclkFrequency = clock;
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
- if (result == 0) {
- sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
- sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
- sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
- sclk_setting->PllRange = dividers.ucSclkPllRange;
- sclk_setting->Sclk_slew_rate = 0x400;
- sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
- sclk_setting->Pcc_down_slew_rate = 0xffff;
- sclk_setting->SSc_En = dividers.ucSscEnable;
- sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
- sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
- sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
- return result;
- }
-
- ref_clock = polaris10_get_xclk(hwmgr);
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- if (clock > data->range_table[i].trans_lower_frequency
- && clock <= data->range_table[i].trans_upper_frequency) {
- sclk_setting->PllRange = i;
- break;
- }
- }
-
- sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw_frac = temp & 0xffff;
-
- pcc_target_percent = 10; /* Hardcode 10% for now. */
- pcc_target_freq = clock - (clock * pcc_target_percent / 100);
- sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
- ss_target_percent = 2; /* Hardcode 2% for now. */
- sclk_setting->SSc_En = 0;
- if (ss_target_percent) {
- sclk_setting->SSc_En = 1;
- ss_target_freq = clock - (clock * ss_target_percent / 100);
- sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw1_frac = temp & 0xffff;
- }
-
- return 0;
-}
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
-{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
- {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
- {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t i, ref_clk;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
- ref_clk = polaris10_get_xclk(hwmgr);
-
- if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
- table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
- return;
- }
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
-
- data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
- data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
- table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
- table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
-{
- int result, i, temp;
- /* PP_Clocks minClocks; */
- uint32_t mvdd;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU_SclkSetting curr_sclk_setting = { 0 };
-
- result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
- /* populate graphics levels */
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
- level->ActivityLevel = sclk_al_threshold;
-
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- /*
- * TODO: get minimum clocks from dal configaration
- * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
- */
- /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
-
- /* get level->DeepSleepDivId
- if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
- */
- PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
- for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- temp = clock >> i;
-
- if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
- break;
- }
-
- level->DeepSleepDivId = i;
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- if (data->update_up_hyst)
- level->UpHyst = (uint8_t)data->up_hyst;
- if (data->update_down_hyst)
- level->DownHyst = (uint8_t)data->down_hyst;
-
- level->SclkSetting = curr_sclk_setting;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
- return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- polaris10_get_sclk_range_table(hwmgr);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
- result = polaris10_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &(data->smc_state_table.GraphicsLevel[i]));
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport))
- data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
- data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (table_info->vdd_dep_on_mclk) {
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->MclkFrequency = clock;
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((data->mclk_stutter_mode_threshold) &&
- (clock <= data->mclk_stutter_mode_threshold) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
- SMU74_MAX_LEVELS_MEMORY;
- struct SMU74_Discrete_MemoryLevel *levels =
- data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = polaris10_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (i == dpm_table->mclk_table.count - 1) {
- levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- levels[i].EnabledForActivity = 1;
- }
- if (result)
- return result;
- }
-
- /* In order to prevent MC activity from stutter mode to push DPM up,
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not affected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = 0x1f;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- /* level count will send to smc once at init smc table and never change */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- uint32_t sclk_frequency;
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
-
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- sclk_frequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",
- );
-
-
- result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
- PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- table->ACPILevel.DeepSleepDivId = 0;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
-
- us_mvdd = 0;
- if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
- table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- table->MemoryACPILevel.StutterEnable = false;
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
- table->VceLevel[count].MinVoltage |=
- (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burst_time;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burst_time;
-
- return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = polaris10_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result == 0)
- result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
- if (result != 0)
- return result;
- }
- }
-
- result = polaris10_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU74_Discrete_MCArbDramTimingTable),
- data->sram_end);
- return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t i, stretch_amount, volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (67 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
-
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
- } else {
- min = 1100;
- max = 2100;
- }
-
- ro = efuse * (max -min)/255 + min;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
- (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
- (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
- } else {
- volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
- volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
- }
-
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
- data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
- /* Populate CKS Lookup Table */
- if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
- stretch_amount != 4 && stretch_amount != 5) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
- offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- int result = 0;
- struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
- AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
- AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
- uint32_t tmp, i;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
-
- result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
- if (0 == result) {
- table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
- table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
- table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
- table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
- table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
- table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
- table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
- table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
- table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
- table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
- table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
- table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
- table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
- table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
- table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
- AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
- AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
- AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
- AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
- AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
- AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
- AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
- for (i = 0; i < NUM_VFT_COLUMNS; i++) {
- AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
- AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
- }
-
- result = polaris10_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
- &tmp, data->sram_end);
-
- polaris10_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_meanNsigma,
- sizeof(AVFS_meanNsigma_t),
- data->sram_end);
-
- result = polaris10_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
- &tmp, data->sram_end);
- polaris10_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_SclkOffset,
- sizeof(AVFS_Sclk_Offset_t),
- data->sram_end);
-
- data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
- }
- return result;
-}
-
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct polaris10_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
- pp_atomctrl_clock_dividers_vi dividers;
-
- result = polaris10_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result);
-
- if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
- polaris10_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = polaris10_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
- }
-
- result = polaris10_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = polaris10_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = polaris10_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = polaris10_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = polaris10_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = polaris10_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = polaris10_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = polaris10_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- result = polaris10_populate_avfs_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
- table->CurrSclkPllRange = 0xff;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = polaris10_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- } else {
- table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
- & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
- && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- /* Populate BIF_SCLK levels into SMC DPM table */
- for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
- PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
- if (i == 0)
- table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- else
- table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- }
-
- for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
- data->sram_end);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return polaris10_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
- return 0;
-}
-
-static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- SCLK_PWRMGT_OFF, 0);
- return 0;
-}
-
-static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
- return 0;
-}
-
-static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
- return 0;
-}
-
-static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -1);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t soft_register_value = 0;
- uint32_t handshake_disables_offset = data->soft_regs_start
- + offsetof(SMU74_SoftRegisters, HandshakeDisables);
-
- /* enable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
-
- /* enable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
-/* Disable UVD - SMU handshake for MCLK. */
- soft_register_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, handshake_disables_offset);
- soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- handshake_disables_offset, soft_register_value);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
-
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
- udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
- }
-
- return 0;
-}
-
-static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /*enable general power management */
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 1);
-
- /* enable sclk deep sleep */
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 1);
-
- /* prepare for PCIE DPM */
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + offsetof(SMU74_SoftRegisters,
- VoltageChangeTimeout), 0x1000);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- SWRST_COMMAND_1, RESETLC, 0x0);
-/*
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-*/
-
- if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- /* enable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt)),
- "Failed to enable AC DC GPIO Interrupt!",
- );
- }
-
- return 0;
-}
-
-static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -1);
-
- /* disable MCLK dpm */
- if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -1);
- }
-
- return 0;
-}
-
-static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* disable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
- "Failed to disable pcie DPM during DPM Stop Function!",
- return -1);
- }
-
- if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- return 0;
-}
-
-static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->active_auto_throttle_sources & (1 << source)) {
- data->active_auto_throttle_sources &= ~(1 << source);
- polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- data->pcie_performance_request = true;
-
- return 0;
-}
-
-int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
- tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
-
- if (polaris10_voltage_control(hwmgr)) {
- tmp_result = polaris10_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "Failed to enable voltage control!",
- result = tmp_result);
-
- tmp_result = polaris10_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
- result = tmp_result);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
- tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!",
- result = tmp_result);
-
- tmp_result = polaris10_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = polaris10_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = polaris10_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
-
- tmp_result = polaris10_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = polaris10_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = polaris10_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", result = tmp_result);
-
- tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
-
- tmp_result = polaris10_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable voltage control!", result = tmp_result);
-
- tmp_result = polaris10_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ULV!", result = tmp_result);
-
- tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_enable_didt_config(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- tmp_result = polaris10_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", result = tmp_result);
-
- tmp_result = polaris10_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", result = tmp_result);
-
- tmp_result = polaris10_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- tmp_result = polaris10_pcie_performance_request(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "pcie performance request failed!", result = tmp_result);
-
- return result;
-}
-
-int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
- tmp_result = polaris10_disable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable power containment!", result = tmp_result);
-
- tmp_result = polaris10_disable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable SMC CAC!", result = tmp_result);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
- tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable thermal auto throttle!", result = tmp_result);
-
- tmp_result = polaris10_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable deep sleep master switch!", result = tmp_result);
-
- tmp_result = polaris10_disable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable ULV!", result = tmp_result);
-
- tmp_result = polaris10_clear_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to clear voting clients!", result = tmp_result);
-
- tmp_result = polaris10_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to reset to default!", result = tmp_result);
-
- tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to force to switch arbf0!", result = tmp_result);
-
- return result;
-}
-
-int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
-
- return 0;
-}
-
-int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPowerManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
- /* power tune caps Assume disabled */
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SQRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- if (hwmgr->powercontainment_enabled)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
- else
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
- if (hwmgr->chip_id == CHIP_POLARIS11)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport);
- return 0;
-}
-
-static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- polaris10_initialize_power_tune_defaults(hwmgr);
-
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint16_t vv_id;
- uint32_t vddc = 0;
- uint16_t i, j;
- uint32_t sclk = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- int result;
-
- for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
- vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (!phm_get_sclk_for_voltage_evv(hwmgr,
- table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
-
- if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
- VOLTAGE_TYPE_VDDC,
- sclk, vv_id, &vddc) != 0) {
- printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
- continue;
- }
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
- * real voltage level in unit of 0.01mv */
- PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
- "Invalid VDDC value", result = -EINVAL;);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != vv_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
- data->vddc_leakage.count++;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
-{
- uint32_t index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (index = 0; index < leakage_table->count; index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (leakage_table->leakage_id[index] == *voltage) {
- *voltage = leakage_table->actual_voltage[index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
-static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- struct polaris10_leakage_voltage *leakage_table)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++)
- polaris10_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, leakage_table);
-
- return 0;
-}
-
-static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
- struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
- uint16_t *vddc)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- table_info->max_clock_voltage_on_dc.vddc;
- return 0;
-}
-
-static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage. */
- return 0;
-}
-
-static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage from mm table. */
- return 0;
-}
-
-static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -EINVAL);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd <
- lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
- table_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
- if (tmp_result)
- result = tmp_result;
-
- return result;
-}
-
-static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- table_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- table_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- uint32_t i;
-
- if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
- if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
- return 0;
-
- for (i = 0; i < lookup_table->count; i++) {
- if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
- dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
- return 0;
- }
- }
- }
- return 0;
-}
-
-
-int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
- uint32_t temp_reg;
- int result;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_default_on = false;
- data->sram_end = SMC_RAM_END;
- data->mclk_dpm0_activity_target = 0xa;
- data->disable_dpm_mask = 0xFF;
- data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
- data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
- data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
- data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
-
- data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
- data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
-
- /* need to set voltage control types before EVV patching */
- data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
-
- data->enable_tdc_limit_feature = true;
- data->enable_pkg_pwr_tracking_feature = true;
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->mclk_stutter_mode_threshold = 40000;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
- data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (table_info->cac_dtp_table->usClockStretchAmount != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- polaris10_set_features_platform_caps(hwmgr);
-
- polaris10_patch_voltage_workaround(hwmgr);
- polaris10_init_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID. */
- result = polaris10_get_evv_voltages(hwmgr);
-
- if (result) {
- printk("Get EVV Voltage Failed. Abort Driver loading!\n");
- return -1;
- }
-
- polaris10_complete_dependency_tables(hwmgr);
- polaris10_set_private_data_based_on_pptable(hwmgr);
-
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
-
- data->is_tlu_enabled = false;
-
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- POLARIS10_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
- temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
- switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
- case 0:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
- break;
- case 1:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
- break;
- case 2:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
- break;
- case 3:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
- break;
- case 4:
- temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
- break;
- default:
- PP_ASSERT_WITH_CODE(0,
- "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
- );
- break;
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
- }
-
- if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
- hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
- (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
-
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
- (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
-
- table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
- table_info->cac_dtp_table->usOperatingTempStep = 1;
- table_info->cac_dtp_table->usOperatingTempHyst = 1;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
-
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
- table_info->cac_dtp_table->usOperatingTempMinLimit;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
- table_info->cac_dtp_table->usOperatingTempMaxLimit;
-
- hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
-
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
- table_info->cac_dtp_table->usOperatingTempStep;
-
- hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
- table_info->cac_dtp_table->usTargetOperatingTemp;
- }
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- polaris10_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-}
-
-static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t level, tmp;
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel, level);
- }
- }
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
-
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- return 0;
-}
-
-static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- phm_apply_dal_min_voltage_request(hwmgr);
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
-
- return 0;
-}
-
-static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!polaris10_is_dpm_running(hwmgr))
- return -EINVAL;
-
- if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- }
-
- return polaris10_upload_dpm_level_enable_mask(hwmgr);
-}
-
-static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data =
- (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t level;
-
- if (!data->sclk_dpm_key_disabled)
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
-
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = phm_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (level));
- }
- }
-
- return 0;
-
-}
-static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = polaris10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = polaris10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = polaris10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
-
- return ret;
-}
-
-static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct polaris10_power_state);
-}
-
-
-static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *request_ps,
- const struct pp_power_state *current_ps)
-{
-
- struct polaris10_power_state *polaris10_ps =
- cast_phw_polaris10_power_state(&request_ps->hardware);
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery ==
- request_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- /* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
- polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
- polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = table_info->vdd_dep_on_sclk->count - 1;
- count >= 0; count--) {
- if (stable_pstate_sclk >=
- table_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk =
- table_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- polaris10_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- polaris10_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = polaris10_ps->performance_levels[0].engine_clock;
- mclk = polaris10_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
- max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
- max_limits->mclk : minimum_clocks.memoryClock;
-
- polaris10_ps->performance_levels[0].engine_clock = sclk;
- polaris10_ps->performance_levels[0].memory_clock = mclk;
-
- polaris10_ps->performance_levels[1].engine_clock =
- (polaris10_ps->performance_levels[1].engine_clock >=
- polaris10_ps->performance_levels[0].engine_clock) ?
- polaris10_ps->performance_levels[1].engine_clock :
- polaris10_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < polaris10_ps->performance_levels[1].memory_clock)
- mclk = polaris10_ps->performance_levels[1].memory_clock;
-
- polaris10_ps->performance_levels[0].memory_clock = mclk;
- polaris10_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (polaris10_ps->performance_levels[1].memory_clock <
- polaris10_ps->performance_levels[0].memory_clock)
- polaris10_ps->performance_levels[1].memory_clock =
- polaris10_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
- return 0;
-}
-
-
-static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- if (low)
- return polaris10_ps->performance_levels[0].memory_clock;
- else
- return polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count-1].memory_clock;
-}
-
-static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- if (low)
- return polaris10_ps->performance_levels[0].engine_clock;
- else
- return polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count-1].engine_clock;
-}
-
-static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value =
- le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value =
- phm_get_current_pcie_speed(hwmgr);
-
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_power_state *polaris10_power_state =
- (struct polaris10_power_state *)(&(power_state->hardware));
- struct polaris10_performance_level *performance_level;
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- PPTable_Generic_SubTable_Header *sclk_dep_table =
- (PPTable_Generic_SubTable_Header *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(polaris10_power_state->performance_levels
- [polaris10_power_state->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (polaris10_power_state->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexLow].ulMclk;
- if (sclk_dep_table->ucRevId == 0)
- performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- else if (sclk_dep_table->ucRevId == 1)
- performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level = &(polaris10_power_state->performance_levels
- [polaris10_power_state->performance_level_count++]);
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexHigh].ulMclk;
-
- if (sclk_dep_table->ucRevId == 0)
- performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
- else if (sclk_dep_table->ucRevId == 1)
- performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
-
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *state)
-{
- int result;
- struct polaris10_power_state *ps;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- state->hardware.magic = PHM_VIslands_Magic;
-
- ps = (struct polaris10_power_state *)(&state->hardware);
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
- polaris10_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
-
- if (state->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (state->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- ps->performance_levels[i].pcie_lane;
- if (data->pcie_lane_performance.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static void
-polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent;
- uint32_t offset;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
- mclk / 100, sclk / 100);
-
- offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].engine_clock;
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /* TODO: Check SCLK in DAL's minimum clocks
- * in case DeepSleep divider update is required.
- */
- if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
- (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
- const struct polaris10_power_state *polaris10_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
- for (i = 0; i < polaris10_ps->performance_level_count; i++) {
- sclk = polaris10_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
- dpm_table->pcie_speed_table.dpm_levels
- [dpm_table->pcie_speed_table.count - 1].value :
- dpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int polaris10_request_link_speed_change_before_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_nps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- const struct polaris10_power_state *polaris10_cps =
- cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
-
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t sclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = polaris10_ps->performance_levels
- [polaris10_ps->performance_level_count - 1].memory_clock;
- struct polaris10_dpm_table *dpm_table = &data->dpm_table;
-
- struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
-
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk
- - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
- ) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
- - sclk) * 100)
- / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = (
- (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
- * 100)
- / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct polaris10_single_dpm_table *dpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit)
- || (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
-
- return 0;
-}
-
-static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
- const struct polaris10_power_state *polaris10_ps)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
-
- polaris10_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- polaris10_ps->performance_levels[0].engine_clock,
- polaris10_ps->performance_levels[high_limit_count].engine_clock);
-
- polaris10_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- polaris10_ps->performance_levels[0].memory_clock,
- polaris10_ps->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int polaris10_generate_dpm_level_enable_mask(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
-
- result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
- if (result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- PPSMC_MSG_UVDDPM_Enable :
- PPSMC_MSG_UVDDPM_Disable);
-}
-
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
-}
-
-int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << data->smc_state_table.VceBootLevel);
- }
-
- polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
-
- return 0;
-}
-
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- if (!bgate) {
- data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
- }
-
- return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = polaris10_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end);
- }
-
- return result;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return polaris10_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-static int polaris10_notify_link_speed_change_after_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_ps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
- uint8_t request;
-
- if (data->pspp_notify_required) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if (request == PCIE_PERF_REQ_GEN1 &&
- phm_get_current_pcie_speed(hwmgr) > 0)
- return 0;
-
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- return 0;
-}
-
-static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
- return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
-}
-
-
-
-static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
- int tmp_result, result = 0;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to find DPM states clocks in DPM table!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- polaris10_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to request link speed change before state change!",
- result = tmp_result);
- }
-
- tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate and upload SCLK MCLK DPM levels!",
- result = tmp_result);
-
- tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to generate DPM level enabled mask!",
- result = tmp_result);
-
- tmp_result = polaris10_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update SCLK threshold!",
- result = tmp_result);
-
- tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program memory timing parameters!",
- result = tmp_result);
-
- tmp_result = polaris10_notify_smc_display(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify smc display settings!",
- result = tmp_result);
-
- tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
-
- tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload DPM level enabled mask!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify link speed change after state change!",
- result = tmp_result);
- }
- data->apply_optimized_settings = false;
- return result;
-}
-
-static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-
-int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
- PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
-}
-
-int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
- uint32_t num_active_displays = 0;
- struct cgs_display_info info = {0};
- info.mode_info = NULL;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_displays = info.display_count;
-
- if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
- polaris10_notify_smc_display_change(hwmgr, false);
-
-
- return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always OK
-*/
-int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if (0 == refresh_rate)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- data->frame_time_x2 = frame_time_in_us * 2 / 100;
-
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
- return 0;
-}
-
-
-int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
- return polaris10_program_display_gap(hwmgr);
-}
-
-/**
-* Set maximum target operating fan output RPM
-*
-* @param hwmgr: the address of the powerplay hardware manager.
-* @param usMaxFanRpm: max operating fan RPM value.
-* @return The response that came from the SMC.
-*/
-static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- return 0;
-}
-
-bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0, 0, NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
- if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
- if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
- (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
- is_update_required = true;
-*/
- return is_update_required;
-}
-
-static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
- const struct polaris10_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
- const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
- int i;
-
- if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
-
- return 0;
-}
-
-int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- uint32_t vbios_version;
-
- /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
-
- phm_get_mc_microcode_version(hwmgr);
- vbios_version = hwmgr->microcode_version_info.MC & 0xf;
- /* Full version of MC ucode has already been loaded. */
- if (vbios_version == 0) {
- data->need_long_memory_training = false;
- return 0;
- }
-
- data->need_long_memory_training = false;
-
-/*
- * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
- pfd = &tonga_mcmeFirmware;
- if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
- polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
- pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
- pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
-*/
- return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
- & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
- & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
- & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
-
- return 0;
-}
-
-static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- polaris10_upload_mc_firmware(hwmgr);
-
- tmp_result = polaris10_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = polaris10_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = polaris10_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = polaris10_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = phm_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = polaris10_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
- polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct polaris10_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct polaris10_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct polaris10_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct polaris10_power_state *polaris10_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
-
- polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
- .backend_init = &polaris10_hwmgr_backend_init,
- .backend_fini = &polaris10_hwmgr_backend_fini,
- .asic_setup = &polaris10_setup_asic_task,
- .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
- .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
- .force_dpm_level = &polaris10_force_dpm_level,
- .power_state_set = polaris10_set_power_state_tasks,
- .get_power_state_size = polaris10_get_power_state_size,
- .get_mclk = polaris10_dpm_get_mclk,
- .get_sclk = polaris10_dpm_get_sclk,
- .patch_boot_state = polaris10_dpm_patch_boot_state,
- .get_pp_table_entry = polaris10_get_pp_table_entry,
- .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
- .print_current_perforce_level = polaris10_print_current_perforce_level,
- .powerdown_uvd = polaris10_phm_powerdown_uvd,
- .powergate_uvd = polaris10_phm_powergate_uvd,
- .powergate_vce = polaris10_phm_powergate_vce,
- .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
- .update_clock_gatings = polaris10_phm_update_clock_gatings,
- .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = polaris10_display_configuration_changed_task,
- .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
- .get_temperature = polaris10_thermal_get_temperature,
- .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
- .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
- .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
- .check_states_equal = polaris10_check_states_equal,
- .set_fan_control_mode = polaris10_set_fan_control_mode,
- .get_fan_control_mode = polaris10_get_fan_control_mode,
- .force_clock_level = polaris10_force_clock_level,
- .print_clock_levels = polaris10_print_clock_levels,
- .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
- .get_sclk_od = polaris10_get_sclk_od,
- .set_sclk_od = polaris10_set_sclk_od,
- .get_mclk_od = polaris10_get_mclk_od,
- .set_mclk_od = polaris10_set_mclk_od,
-};
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_polaris10_thermal_initialize(hwmgr);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
deleted file mode 100644
index b206632d4650..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <asm/div64.h>
-#include "polaris10_thermal.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_smumgr.h"
-#include "polaris10_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
- struct phm_fan_speed_info *fan_speed_info)
-{
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- fan_speed_info->supports_percent_read = true;
- fan_speed_info->supports_percent_write = true;
- fan_speed_info->min_percent = 0;
- fan_speed_info->max_percent = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
- fan_speed_info->supports_rpm_read = true;
- fan_speed_info->supports_rpm_write = true;
- fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
- fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
- } else {
- fan_speed_info->min_rpm = 0;
- fan_speed_info->max_rpm = 0;
- }
-
- return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
- uint32_t *speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
- duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
- if (duty100 == 0)
- return -EINVAL;
-
-
- tmp64 = (uint64_t)duty * 100;
- do_div(tmp64, duty100);
- *speed = (uint32_t)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- uint32_t tach_period;
- uint32_t crystal_clock_freq;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0))
- return 0;
-
- tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_STATUS, TACH_PERIOD);
-
- if (tach_period == 0)
- return -EINVAL;
-
- crystal_clock_freq = tonga_get_xclk(hwmgr);
-
- *speed = 60 * crystal_clock_freq * 10000 / tach_period;
-
- return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param hwmgr the address of the powerplay hardware manager.
-* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
- if (hwmgr->fan_ctrl_is_in_default_mode) {
- hwmgr->fan_ctrl_default_mode =
- PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
- hwmgr->tmin =
- PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN);
- hwmgr->fan_ctrl_is_in_default_mode = false;
- }
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
- return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->fan_ctrl_is_in_default_mode) {
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TMIN, hwmgr->tmin);
- hwmgr->fan_ctrl_is_in_default_mode = true;
- }
-
- return 0;
-}
-
-int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM))
- hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM);
- else
- hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM);
-
- } else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
- }
-
- if (!result && hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanTemperatureTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature);
-
- return result;
-}
-
-
-int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
-}
-
-/**
-* Set Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
- uint32_t speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (speed > 100)
- speed = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
- duty = (uint32_t)tmp64;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
- return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- if (!result)
- result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = polaris10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- uint32_t tach_period;
- uint32_t crystal_clock_freq;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0) ||
- (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
- (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- crystal_clock_freq = tonga_get_xclk(hwmgr);
-
- tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_STATUS, TACH_PERIOD, tach_period);
-
- return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
- int temp;
-
- temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
- /* Bit 9 means the reading is lower than the lowest usable value. */
- if (temp & 0x200)
- temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
- else
- temp = temp & 0x1ff;
-
- temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param hwmgr The address of the hardware manager.
-* @param range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- uint32_t low_temp, uint32_t high_temp)
-{
- uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- if (low < low_temp)
- low = low_temp;
- if (high > high_temp)
- high = high_temp;
-
- if (low > high)
- return -EINVAL;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, DIG_THERM_INTH,
- (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, DIG_THERM_INTL,
- (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_CTRL, DIG_THERM_DPM,
- (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
- return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_CTRL, EDGE_PER_REV,
- hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution - 1);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
- return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to enable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param hwmgr The address of the hardware manager.
-*/
-static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param hwmgr The address of the hardware manager.
-*/
-int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- int result = polaris10_thermal_disable_alert(hwmgr);
-
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- polaris10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (data->fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- data->sram_end);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled
- * PHM_PlatformCaps_MicrocodeFanControl even after
- * this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
- polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- }
-
- return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
- if (range == NULL)
- return -EINVAL;
-
- return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return polaris10_thermal_disable_alert(hwmgr);
-}
-
-static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return 0;
-
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-static const struct phm_master_table_item
-polaris10_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_polaris10_thermal_initialize},
- {NULL, tf_polaris10_thermal_set_temperature_range},
- {NULL, tf_polaris10_thermal_enable_alert},
- {NULL, tf_polaris10_thermal_avfs_enable},
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this
- * so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
- {NULL, tf_polaris10_thermal_setup_fan_table},
- {NULL, tf_polaris10_thermal_start_smc_fan_control},
- {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- polaris10_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-polaris10_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_polaris10_thermal_disable_alert},
- {NULL, tf_polaris10_thermal_set_temperature_range},
- {NULL, tf_polaris10_thermal_enable_alert},
- {NULL, NULL}
-};
-
-static const struct phm_master_table_header
-polaris10_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- polaris10_thermal_set_temperature_range_master_list
-};
-
-int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- polaris10_fan_ctrl_set_default_mode(hwmgr);
- return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &polaris10_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &polaris10_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
deleted file mode 100644
index 62f8cbc2d590..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _POLARIS10_THERMAL_H_
-#define _POLARIS10_THERMAL_H_
-
-#include "hwmgr.h"
-
-#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
-#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
-
-#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
-#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
-#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 26f3e30d0fef..0894527d932f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "ppatomctrl.h"
#include "atombios.h"
@@ -1321,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)
+ (&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
index f127198aafc4..1e870f58dd12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
@@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
typedef struct _ATOM_Tonga_State_Array {
UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */
- ATOM_Tonga_State states[1]; /* Dynamically allocate entries. */
+ ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_State_Array;
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index cfb647f76cbe..4477c55a58e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -22,15 +22,14 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
-#include "tonga_processpptables.h"
+#include "process_pptables_v1_0.h"
#include "ppatomctrl.h"
#include "atombios.h"
#include "pp_debug.h"
#include "hwmgr.h"
#include "cgs_common.h"
-#include "tonga_pptable.h"
+#include "pptable_v1_0.h"
/**
* Private Function used during initialization.
@@ -154,12 +153,14 @@ const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
static int get_vddc_lookup_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table **lookup_table,
- const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
- uint32_t max_levels
+ const ATOM_Tonga_Voltage_Lookup_Table *vddc_lookup_pp_tables,
+ uint32_t max_levels
)
{
uint32_t table_size, i;
phm_ppt_v1_voltage_lookup_table *table;
+ phm_ppt_v1_voltage_lookup_record *record;
+ ATOM_Tonga_Voltage_Lookup_Record *atom_record;
PP_ASSERT_WITH_CODE((0 != vddc_lookup_pp_tables->ucNumEntries),
"Invalid CAC Leakage PowerPlay Table!", return 1);
@@ -177,15 +178,17 @@ static int get_vddc_lookup_table(
table->count = vddc_lookup_pp_tables->ucNumEntries;
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
- table->entries[i].us_calculated = 0;
- table->entries[i].us_vdd =
- vddc_lookup_pp_tables->entries[i].usVdd;
- table->entries[i].us_cac_low =
- vddc_lookup_pp_tables->entries[i].usCACLow;
- table->entries[i].us_cac_mid =
- vddc_lookup_pp_tables->entries[i].usCACMid;
- table->entries[i].us_cac_high =
- vddc_lookup_pp_tables->entries[i].usCACHigh;
+ record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_voltage_lookup_record,
+ entries, table, i);
+ atom_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_Voltage_Lookup_Record,
+ entries, vddc_lookup_pp_tables, i);
+ record->us_calculated = 0;
+ record->us_vdd = atom_record->usVdd;
+ record->us_cac_low = atom_record->usCACLow;
+ record->us_cac_mid = atom_record->usCACMid;
+ record->us_cac_high = atom_record->usCACHigh;
}
*lookup_table = table;
@@ -314,11 +317,12 @@ static int init_dpm_2_parameters(
static int get_valid_clk(
struct pp_hwmgr *hwmgr,
struct phm_clock_array **clk_table,
- const phm_ppt_v1_clock_voltage_dependency_table * clk_volt_pp_table
+ phm_ppt_v1_clock_voltage_dependency_table const *clk_volt_pp_table
)
{
uint32_t table_size, i;
struct phm_clock_array *table;
+ phm_ppt_v1_clock_voltage_dependency_record *dep_record;
PP_ASSERT_WITH_CODE((0 != clk_volt_pp_table->count),
"Invalid PowerPlay Table!", return -1);
@@ -335,9 +339,12 @@ static int get_valid_clk(
table->count = (uint32_t)clk_volt_pp_table->count;
- for (i = 0; i < table->count; i++)
- table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk;
-
+ for (i = 0; i < table->count; i++) {
+ dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, clk_volt_pp_table, i);
+ table->values[i] = (uint32_t)dep_record->clk;
+ }
*clk_table = table;
return 0;
@@ -346,7 +353,7 @@ static int get_valid_clk(
static int get_hard_limits(
struct pp_hwmgr *hwmgr,
struct phm_clock_and_voltage_limits *limits,
- const ATOM_Tonga_Hard_Limit_Table * limitable
+ ATOM_Tonga_Hard_Limit_Table const *limitable
)
{
PP_ASSERT_WITH_CODE((0 != limitable->ucNumEntries), "Invalid PowerPlay Table!", return -1);
@@ -364,11 +371,13 @@ static int get_hard_limits(
static int get_mclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_mclk_dep_table,
- const ATOM_Tonga_MCLK_Dependency_Table * mclk_dep_table
+ ATOM_Tonga_MCLK_Dependency_Table const *mclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *mclk_table;
+ phm_ppt_v1_clock_voltage_dependency_record *mclk_table_record;
+ ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
PP_ASSERT_WITH_CODE((0 != mclk_dep_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -386,16 +395,17 @@ static int get_mclk_voltage_dependency_table(
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
- mclk_table->entries[i].vddInd =
- mclk_dep_table->entries[i].ucVddcInd;
- mclk_table->entries[i].vdd_offset =
- mclk_dep_table->entries[i].usVddgfxOffset;
- mclk_table->entries[i].vddci =
- mclk_dep_table->entries[i].usVddci;
- mclk_table->entries[i].mvdd =
- mclk_dep_table->entries[i].usMvdd;
- mclk_table->entries[i].clk =
- mclk_dep_table->entries[i].ulMclk;
+ mclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, mclk_table, i);
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table, i);
+ mclk_table_record->vddInd = mclk_dep_record->ucVddcInd;
+ mclk_table_record->vdd_offset = mclk_dep_record->usVddgfxOffset;
+ mclk_table_record->vddci = mclk_dep_record->usVddci;
+ mclk_table_record->mvdd = mclk_dep_record->usMvdd;
+ mclk_table_record->clk = mclk_dep_record->ulMclk;
}
*pp_tonga_mclk_dep_table = mclk_table;
@@ -406,15 +416,17 @@ static int get_mclk_voltage_dependency_table(
static int get_sclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
- const PPTable_Generic_SubTable_Header *sclk_dep_table
+ PPTable_Generic_SubTable_Header const *sclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
+ phm_ppt_v1_clock_voltage_dependency_record *sclk_table_record;
if (sclk_dep_table->ucRevId < 1) {
const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
(ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
+ ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -432,20 +444,23 @@ static int get_sclk_voltage_dependency_table(
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
for (i = 0; i < tonga_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- tonga_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- tonga_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- tonga_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_SCLK_Dependency_Record,
+ entries, tonga_table, i);
+ sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+ sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+ sclk_table_record->clk = sclk_dep_record->ulSclk;
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
}
} else {
const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
(ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+ ATOM_Polaris_SCLK_Dependency_Record *sclk_dep_record;
PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -463,17 +478,19 @@ static int get_sclk_voltage_dependency_table(
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
for (i = 0; i < polaris_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- polaris_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- polaris_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- polaris_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
- sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Polaris_SCLK_Dependency_Record,
+ entries, polaris_table, i);
+ sclk_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_clock_voltage_dependency_record,
+ entries, sclk_table, i);
+ sclk_table_record->vddInd = sclk_dep_record->ucVddInd;
+ sclk_table_record->vdd_offset = sclk_dep_record->usVddcOffset;
+ sclk_table_record->clk = sclk_dep_record->ulSclk;
+ sclk_table_record->cks_enable =
+ (((sclk_dep_record->ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table_record->cks_voffset = (sclk_dep_record->ucCKSVOffsetandDisable & 0x7F);
+ sclk_table_record->sclk_offset = sclk_dep_record->ulSclkOffset;
}
}
*pp_tonga_sclk_dep_table = sclk_table;
@@ -484,16 +501,19 @@ static int get_sclk_voltage_dependency_table(
static int get_pcie_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
- const PPTable_Generic_SubTable_Header * pTable
+ PPTable_Generic_SubTable_Header const *ptable
)
{
uint32_t table_size, i, pcie_count;
phm_ppt_v1_pcie_table *pcie_table;
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_pcie_record *pcie_record;
+
+ if (ptable->ucRevId < 1) {
+ const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)ptable;
+ ATOM_Tonga_PCIE_Record *atom_pcie_record;
- if (pTable->ucRevId < 1) {
- const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -519,18 +539,23 @@ static int get_pcie_table(
Disregarding the excess entries... \n");
pcie_table->count = pcie_count;
-
for (i = 0; i < pcie_count; i++) {
- pcie_table->entries[i].gen_speed =
- atom_pcie_table->entries[i].ucPCIEGenSpeed;
- pcie_table->entries[i].lane_width =
- atom_pcie_table->entries[i].usPCIELaneWidth;
+ pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_pcie_record,
+ entries, pcie_table, i);
+ atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+ pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
}
*pp_tonga_pcie_table = pcie_table;
} else {
/* Polaris10/Polaris11 and newer. */
- const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+ const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)ptable;
+ ATOM_Polaris10_PCIE_Record *atom_pcie_record;
+
PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
"Invalid PowerPlay Table!", return -1);
@@ -558,12 +583,15 @@ static int get_pcie_table(
pcie_table->count = pcie_count;
for (i = 0; i < pcie_count; i++) {
- pcie_table->entries[i].gen_speed =
- atom_pcie_table->entries[i].ucPCIEGenSpeed;
- pcie_table->entries[i].lane_width =
- atom_pcie_table->entries[i].usPCIELaneWidth;
- pcie_table->entries[i].pcie_sclk =
- atom_pcie_table->entries[i].ulPCIE_Sclk;
+ pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_pcie_record,
+ entries, pcie_table, i);
+ atom_pcie_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Polaris10_PCIE_Record,
+ entries, atom_pcie_table, i);
+ pcie_record->gen_speed = atom_pcie_record->ucPCIEGenSpeed;
+ pcie_record->lane_width = atom_pcie_record->usPCIELaneWidth;
+ pcie_record->pcie_sclk = atom_pcie_record->ulPCIE_Sclk;
}
*pp_tonga_pcie_table = pcie_table;
@@ -685,6 +713,7 @@ static int get_mm_clock_voltage_table(
uint32_t table_size, i;
const ATOM_Tonga_MM_Dependency_Record *mm_dependency_record;
phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table;
+ phm_ppt_v1_mm_clock_voltage_dependency_record *mm_table_record;
PP_ASSERT_WITH_CODE((0 != mm_dependency_table->ucNumEntries),
"Invalid PowerPlay Table!", return -1);
@@ -701,14 +730,19 @@ static int get_mm_clock_voltage_table(
mm_table->count = mm_dependency_table->ucNumEntries;
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
- mm_dependency_record = &mm_dependency_table->entries[i];
- mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd;
- mm_table->entries[i].vddgfx_offset = mm_dependency_record->usVddgfxOffset;
- mm_table->entries[i].aclk = mm_dependency_record->ulAClk;
- mm_table->entries[i].samclock = mm_dependency_record->ulSAMUClk;
- mm_table->entries[i].eclk = mm_dependency_record->ulEClk;
- mm_table->entries[i].vclk = mm_dependency_record->ulVClk;
- mm_table->entries[i].dclk = mm_dependency_record->ulDClk;
+ mm_dependency_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MM_Dependency_Record,
+ entries, mm_dependency_table, i);
+ mm_table_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ phm_ppt_v1_mm_clock_voltage_dependency_record,
+ entries, mm_table, i);
+ mm_table_record->vddcInd = mm_dependency_record->ucVddcInd;
+ mm_table_record->vddgfx_offset = mm_dependency_record->usVddgfxOffset;
+ mm_table_record->aclk = mm_dependency_record->ulAClk;
+ mm_table_record->samclock = mm_dependency_record->ulSAMUClk;
+ mm_table_record->eclk = mm_dependency_record->ulEClk;
+ mm_table_record->vclk = mm_dependency_record->ulVClk;
+ mm_table_record->dclk = mm_dependency_record->ulDClk;
}
*tonga_mm_table = mm_table;
@@ -1015,7 +1049,7 @@ static int check_powerplay_tables(
return 0;
}
-int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
{
int result = 0;
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table;
@@ -1066,7 +1100,7 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
return result;
}
-int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
+int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1110,14 +1144,14 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
return 0;
}
-const struct pp_table_func tonga_pptable_funcs = {
- .pptable_init = tonga_pp_tables_initialize,
- .pptable_fini = tonga_pp_tables_uninitialize,
+const struct pp_table_func pptable_v1_0_funcs = {
+ .pptable_init = pp_tables_v1_0_initialize,
+ .pptable_fini = pp_tables_v1_0_uninitialize,
};
-int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr)
{
- const ATOM_Tonga_State_Array * state_arrays;
+ ATOM_Tonga_State_Array const *state_arrays;
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
PP_ASSERT_WITH_CODE((NULL != pp_table),
@@ -1164,6 +1198,74 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
return result;
}
+static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr)
+{
+ const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+ const ATOM_Tonga_VCE_State_Table *vce_state_table;
+
+
+ if (pp_table == NULL)
+ return 0;
+
+ vce_state_table = (void *)pp_table +
+ le16_to_cpu(pp_table->usVCEStateTableOffset);
+
+ return vce_state_table->ucNumEntries;
+}
+
+static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i,
+ struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag)
+{
+ const ATOM_Tonga_VCE_State_Record *vce_state_record;
+ ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record;
+ ATOM_Tonga_MCLK_Dependency_Record *mclk_dep_record;
+ ATOM_Tonga_MM_Dependency_Record *mm_dep_record;
+ const ATOM_Tonga_POWERPLAYTABLE *pptable = get_powerplay_table(hwmgr);
+ const ATOM_Tonga_VCE_State_Table *vce_state_table = (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usVCEStateTableOffset));
+ const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = (ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usSclkDependencyTableOffset));
+ const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usMclkDependencyTableOffset));
+ const ATOM_Tonga_MM_Dependency_Table *mm_dep_table = (ATOM_Tonga_MM_Dependency_Table *)(((unsigned long)pptable)
+ + le16_to_cpu(pptable->usMMDependencyTableOffset));
+
+ PP_ASSERT_WITH_CODE((i < vce_state_table->ucNumEntries),
+ "Requested state entry ID is out of range!",
+ return -EINVAL);
+
+ vce_state_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_VCE_State_Record,
+ entries, vce_state_table, i);
+ sclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_SCLK_Dependency_Record,
+ entries, sclk_dep_table,
+ vce_state_record->ucSCLKIndex);
+ mm_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MM_Dependency_Record,
+ entries, mm_dep_table,
+ vce_state_record->ucVCEClockIndex);
+ *flag = vce_state_record->ucFlag;
+
+ vce_state->evclk = mm_dep_record->ulEClk;
+ vce_state->ecclk = mm_dep_record->ulEClk;
+ vce_state->sclk = sclk_dep_record->ulSclk;
+
+ if (vce_state_record->ucMCLKIndex >= mclk_dep_table->ucNumEntries)
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table,
+ mclk_dep_table->ucNumEntries - 1);
+ else
+ mclk_dep_record = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_MCLK_Dependency_Record,
+ entries, mclk_dep_table,
+ vce_state_record->ucMCLKIndex);
+
+ vce_state->mclk = mclk_dep_record->ulMclk;
+ return 0;
+}
+
/**
* Create a Power State out of an entry in the PowerPlay table.
* This function is called by the hardware back-end.
@@ -1172,15 +1274,17 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr,
* @param power_state The address of the PowerState instance being created.
* @return -1 if the entry cannot be retrieved.
*/
-int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
+int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr,
uint32_t entry_index, struct pp_power_state *power_state,
int (*call_back_func)(struct pp_hwmgr *, void *,
struct pp_power_state *, void *, uint32_t))
{
int result = 0;
- const ATOM_Tonga_State_Array * state_arrays;
+ const ATOM_Tonga_State_Array *state_arrays;
const ATOM_Tonga_State *state_entry;
const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
+ int i, j;
+ uint32_t flags = 0;
PP_ASSERT_WITH_CODE((NULL != pp_table), "Missing PowerPlay Table!", return -1;);
power_state->classification.bios_index = entry_index;
@@ -1197,7 +1301,9 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries),
"Invalid PowerPlay Table State Array Entry.", return -1);
- state_entry = &(state_arrays->states[entry_index]);
+ state_entry = GET_FLEXIBLE_ARRAY_MEMBER_ADDR(
+ ATOM_Tonga_State, entries,
+ state_arrays, entry_index);
result = call_back_func(hwmgr, (void *)state_entry, power_state,
(void *)pp_table,
@@ -1210,5 +1316,13 @@ int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr,
PP_StateClassificationFlag_Boot))
result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware));
+ hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr);
+
+ if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) {
+ for (j = 0; j < i; j++)
+ ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags);
+ }
+
return result;
}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
index d24b8887f466..b9710abdff01 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
@@ -20,14 +20,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef TONGA_PROCESSPPTABLES_H
-#define TONGA_PROCESSPPTABLES_H
+#ifndef _PROCESSPPTABLES_V1_0_H
+#define _PROCESSPPTABLES_V1_0_H
#include "hwmgr.h"
-extern const struct pp_table_func tonga_pptable_funcs;
-extern int tonga_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
-extern int tonga_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
+extern const struct pp_table_func pptable_v1_0_funcs;
+extern int get_number_of_powerplay_table_entries_v1_0(struct pp_hwmgr *hwmgr);
+extern int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t entry_index,
struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *,
struct pp_power_state *, void *, uint32_t));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 6c321b0d8a1e..ccf7ebeaf892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -1523,7 +1523,7 @@ int get_number_of_vce_state_table_entries(
int get_vce_state_table_entry(struct pp_hwmgr *hwmgr,
unsigned long i,
- struct PP_VCEState *vce_state,
+ struct pp_vce_state *vce_state,
void **clock_info,
unsigned long *flag)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index b5edb5105986..6eb6db199250 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -21,9 +21,53 @@
*
*/
-#include "polaris10_clockpowergating.h"
+#include "smu7_hwmgr.h"
+#include "smu7_clockpowergating.h"
+#include "smu7_common.h"
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_UVDDPM_Enable :
+ PPSMC_MSG_UVDDPM_Disable);
+}
+
+static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_VCEDPM_Enable :
+ PPSMC_MSG_VCEDPM_Disable);
+}
+
+static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_SAMUDPM_Enable :
+ PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_UVD_TABLE);
+ return smu7_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_VCE_TABLE);
+ return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
+}
+
+static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ if (!bgate)
+ smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
+ return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -31,7 +75,7 @@ int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -47,7 +91,7 @@ int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -55,7 +99,7 @@ int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr->smumgr,
@@ -63,7 +107,7 @@ int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -72,7 +116,7 @@ int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
@@ -81,27 +125,24 @@ int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = false;
data->vce_power_gated = false;
data->samu_power_gated = false;
- polaris10_phm_powerup_uvd(hwmgr);
- polaris10_phm_powerup_vce(hwmgr);
- polaris10_phm_powerup_samu(hwmgr);
+ smu7_powerup_uvd(hwmgr);
+ smu7_powerup_vce(hwmgr);
+ smu7_powerup_samu(hwmgr);
return 0;
}
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = bgate;
@@ -109,11 +150,11 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
- polaris10_update_uvd_dpm(hwmgr, true);
- polaris10_phm_powerdown_uvd(hwmgr);
+ smu7_update_uvd_dpm(hwmgr, true);
+ smu7_powerdown_uvd(hwmgr);
} else {
- polaris10_phm_powerup_uvd(hwmgr);
- polaris10_update_uvd_dpm(hwmgr, false);
+ smu7_powerup_uvd(hwmgr);
+ smu7_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
@@ -122,9 +163,9 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
return 0;
}
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->vce_power_gated == bgate)
return 0;
@@ -135,11 +176,11 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
- polaris10_update_vce_dpm(hwmgr, true);
- polaris10_phm_powerdown_vce(hwmgr);
+ smu7_update_vce_dpm(hwmgr, true);
+ smu7_powerdown_vce(hwmgr);
} else {
- polaris10_phm_powerup_vce(hwmgr);
- polaris10_update_vce_dpm(hwmgr, false);
+ smu7_powerup_vce(hwmgr);
+ smu7_update_vce_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
@@ -147,9 +188,9 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return 0;
}
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->samu_power_gated == bgate)
return 0;
@@ -157,22 +198,25 @@ int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
data->samu_power_gated = bgate;
if (bgate) {
- polaris10_update_samu_dpm(hwmgr, true);
- polaris10_phm_powerdown_samu(hwmgr);
+ smu7_update_samu_dpm(hwmgr, true);
+ smu7_powerdown_samu(hwmgr);
} else {
- polaris10_phm_powerup_samu(hwmgr);
- polaris10_update_samu_dpm(hwmgr, false);
+ smu7_powerup_samu(hwmgr);
+ smu7_update_samu_dpm(hwmgr, false);
}
return 0;
}
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id)
{
PPSMC_Msg msg;
uint32_t value;
+ if (!(hwmgr->feature_mask & PP_ENABLE_GFX_CG_THRU_SMU))
+ return 0;
+
switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
case PP_GROUP_GFX:
switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
@@ -185,7 +229,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
@@ -195,7 +239,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -208,7 +252,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -219,7 +263,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -232,7 +276,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -245,7 +289,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -259,12 +303,12 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
break;
@@ -279,7 +323,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -289,7 +333,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -302,7 +346,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -313,7 +357,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -326,7 +370,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
@@ -336,7 +380,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -349,7 +393,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -360,7 +404,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -373,7 +417,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -384,7 +428,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
@@ -397,18 +441,18 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
if (smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr, msg, value))
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
break;
default:
- return -1;
+ return -EINVAL;
}
@@ -419,7 +463,7 @@ int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
{
struct cgs_system_info sys_info = {0};
uint32_t active_cus;
@@ -432,8 +476,8 @@ int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable
if (result)
return -EINVAL;
- else
- active_cus = sys_info.value;
+
+ active_cus = sys_info.value;
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index 33af5f511ab8..d52a28c343e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,15 +21,20 @@
*
*/
-#ifndef _FIJI_CLOCK_POWER_GATING_H_
-#define _FIJI_CLOCK_POWER_GATING_H_
+#ifndef _SMU7_CLOCK_POWER_GATING_H_
+#define _SMU7_CLOCK__POWER_GATING_H_
-#include "fiji_hwmgr.h"
+#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
-extern int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-extern int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
+int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id);
+int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
new file mode 100644
index 000000000000..f967613191cf
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_DYN_DEFAULTS_H
+#define _SMU7_DYN_DEFAULTS_H
+
+
+/* We need to fill in the default values */
+
+
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
+#define SMU7_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
+
+
+#define SMU7_THERMALPROTECTCOUNTER_DFLT 0x200
+#define SMU7_STATICSCREENTHRESHOLDUNIT_DFLT 0
+#define SMU7_STATICSCREENTHRESHOLD_DFLT 0x00C8
+#define SMU7_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
+#define SMU7_REFERENCEDIVIDER_DFLT 4
+
+#define SMU7_ULVVOLTAGECHANGEDELAY_DFLT 1687
+
+#define SMU7_CGULVPARAMETER_DFLT 0x00040035
+#define SMU7_CGULVCONTROL_DFLT 0x00007450
+#define SMU7_TARGETACTIVITY_DFLT 50
+#define SMU7_MCLK_TARGETACTIVITY_DFLT 10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
new file mode 100644
index 000000000000..13f2b705ea49
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -0,0 +1,4393 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pptable_v1_0.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "process_pptables_v1_0.h"
+#include "cgs_common.h"
+
+#include "smu7_common.h"
+
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
+#include "smu7_dyn_defaults.h"
+#include "smu7_thermal.h"
+#include "smu7_clockpowergating.h"
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0 0x05
+#define MC_CG_SEQ_DRAMCONF_S1 0x06
+#define MC_CG_SEQ_YCLK_SUSPEND 0x04
+#define MC_CG_SEQ_YCLK_RESUME 0x0a
+
+#define SMC_CG_IND_START 0xc0030000
+#define SMC_CG_IND_END 0xc0040000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+#define MEM_FREQ_LOW_LATENCY 25000
+#define MEM_FREQ_HIGH_LATENCY 80000
+
+#define MEM_LATENCY_HIGH 45
+#define MEM_LATENCY_LOW 35
+#define MEM_LATENCY_ERR 0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+ DPM_EVENT_SRC_ANALOG = 0,
+ DPM_EVENT_SRC_EXTERNAL = 1,
+ DPM_EVENT_SRC_DIGITAL = 2,
+ DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct smu7_power_state *cast_phw_smu7_power_state(
+ struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (struct smu7_power_state *)hw_ps;
+}
+
+const struct smu7_power_state *cast_const_phw_smu7_power_state(
+ const struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (const struct smu7_power_state *)hw_ps;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+ cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+ hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+ return 0;
+}
+
+uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speedCntl = 0;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+ ixPCIE_LC_SPEED_CNTL);
+ return((uint16_t)PHM_GET_FIELD(speedCntl,
+ PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+ uint32_t link_width;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+ PP_ASSERT_WITH_CODE((7 >= link_width),
+ "Invalid PCIe lane width!", return 0);
+
+ return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param pHwMgr the address of the powerplay hardware manager.
+* @return always PP_Result_OK
+*/
+int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+
+ return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+*/
+static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+ const struct smu7_hwmgr *data =
+ (const struct smu7_hwmgr *)(hwmgr->backend);
+
+ return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+ /* enable voltage control */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+ return 0;
+}
+
+static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
+ struct phm_clock_voltage_dependency_table *voltage_dependency_table
+ )
+{
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != voltage_table),
+ "Voltage Dependency Table empty.", return -EINVAL;);
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+ voltage_table->count = voltage_dependency_table->count;
+
+ for (i = 0; i < voltage_dependency_table->count; i++) {
+ voltage_table->entries[i].value =
+ voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+
+/**
+* Create Voltage Tables.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ int result = 0;
+ uint32_t tmp;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->mvdd_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve MVDD table.",
+ return result);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
+ hwmgr->dyn_state.mvdd_dependency_on_mclk);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ return result;);
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->vddci_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve VDDCI table.",
+ return result);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
+ hwmgr->dyn_state.vddci_dependency_on_mclk);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ return result);
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* VDDGFX has only SVI2 voltage control */
+ result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
+ table_info->vddgfx_lookup_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
+ }
+
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
+ &data->vddc_voltage_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve VDDC table.", return result;);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
+ hwmgr->dyn_state.vddc_dependency_on_mclk);
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+ table_info->vddc_lookup_table);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+ }
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+ PP_ASSERT_WITH_CODE(
+ (data->vddc_voltage_table.count <= tmp),
+ "Too many voltage values for VDDC. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddc_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ PP_ASSERT_WITH_CODE(
+ (data->vddgfx_voltage_table.count <= tmp),
+ "Too many voltage values for VDDC. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddgfx_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+ PP_ASSERT_WITH_CODE(
+ (data->vddci_voltage_table.count <= tmp),
+ "Too many voltage values for VDDCI. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->vddci_voltage_table)));
+
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+ PP_ASSERT_WITH_CODE(
+ (data->mvdd_voltage_table.count <= tmp),
+ "Too many voltage values for MVDD. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(tmp,
+ &(data->mvdd_voltage_table)));
+
+ return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_program_static_screen_threshold_parameters(
+ struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* Set static screen threshold unit */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+ data->static_screen_threshold_unit);
+ /* Set static screen threshold */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+ data->static_screen_threshold);
+
+ return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+ uint32_t display_gap =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP, DISPLAY_GAP_IGNORE);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* Clear reset for voting clients before enabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+ return 0;
+}
+
+static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+ uint32_t arb_src, uint32_t arb_dest)
+{
+ uint32_t mc_arb_dram_timing;
+ uint32_t mc_arb_dram_timing2;
+ uint32_t burst_time;
+ uint32_t mc_cg_config;
+
+ switch (arb_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+ mc_cg_config |= 0x0000000F;
+ cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+ return 0;
+}
+
+static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+ return smu7_copy_and_switch_arb_sets(hwmgr,
+ MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return smu7_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = NULL;
+
+ uint32_t i, max_entry;
+ uint32_t tmp;
+
+ PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+ data->use_pcie_power_saving_levels), "No pcie performance levels!",
+ return -EINVAL);
+
+ if (table_info != NULL)
+ pcie_table = table_info->pcie_table;
+
+ if (data->use_pcie_performance_levels &&
+ !data->use_pcie_power_saving_levels) {
+ data->pcie_gen_power_saving = data->pcie_gen_performance;
+ data->pcie_lane_power_saving = data->pcie_lane_performance;
+ } else if (!data->use_pcie_performance_levels &&
+ data->use_pcie_power_saving_levels) {
+ data->pcie_gen_performance = data->pcie_gen_power_saving;
+ data->pcie_lane_performance = data->pcie_lane_power_saving;
+ }
+ tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+ phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+ tmp,
+ MAX_REGULAR_DPM_NUMBER);
+
+ if (pcie_table != NULL) {
+ /* max_entry is used to make sure we reserve one PCIE level
+ * for boot level (fix for A+A PSPP issue).
+ * If PCIE table from PPTable have ULV entry + 8 entries,
+ * then ignore the last entry.*/
+ max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
+ for (i = 1; i < max_entry; i++) {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ pcie_table->entries[i].gen_speed),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ pcie_table->entries[i].lane_width));
+ }
+ data->dpm_table.pcie_speed_table.count = max_entry - 1;
+ smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
+ } else {
+ /* Hardcode Pcie Table */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ data->dpm_table.pcie_speed_table.count = 6;
+ }
+ /* Populate last level for boot PCIE level, but do not increment count. */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ data->dpm_table.pcie_speed_table.count,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ return 0;
+}
+
+static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.sclk_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_GRAPHICS),
+ MAX_REGULAR_DPM_NUMBER);
+ phm_reset_single_dpm_table(
+ &data->dpm_table.mclk_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.vddc_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_VDDC),
+ MAX_REGULAR_DPM_NUMBER);
+ phm_reset_single_dpm_table(
+ &data->dpm_table.vddci_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
+
+ phm_reset_single_dpm_table(
+ &data->dpm_table.mvdd_table,
+ smum_get_mac_definition(hwmgr->smumgr,
+ SMU_MAX_LEVELS_MVDD),
+ MAX_REGULAR_DPM_NUMBER);
+ return 0;
+}
+/*
+ * This function is to initialize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+
+static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
+ hwmgr->dyn_state.vddc_dependency_on_mclk;
+ struct phm_cac_leakage_table *std_voltage_table =
+ hwmgr->dyn_state.cac_leakage_table;
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
+ "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
+ "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+ "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
+ "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
+
+
+ /* Initialize Sclk DPM table based on allow Sclk values*/
+ data->dpm_table.sclk_table.count = 0;
+
+ for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
+ allowed_vdd_sclk_table->entries[i].clk) {
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+ allowed_vdd_sclk_table->entries[i].clk;
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
+ "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
+ allowed_vdd_mclk_table->entries[i].clk) {
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+ allowed_vdd_mclk_table->entries[i].clk;
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.count++;
+ }
+ }
+
+ /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
+ for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
+ data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
+ /* param1 is for corresponding std voltage */
+ data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
+ }
+
+ data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
+ allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ if (NULL != allowed_vdd_mclk_table) {
+ /* Initialize Vddci DPM table based on allow Mclk values */
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
+ }
+ data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
+ }
+
+ allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
+
+ if (NULL != allowed_vdd_mclk_table) {
+ /*
+ * Initialize MVDD DPM table based on allow Mclk
+ * values
+ */
+ for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
+ data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
+ data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
+ }
+ data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
+ }
+
+ return 0;
+}
+
+static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+
+ if (table_info == NULL)
+ return -EINVAL;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+ "SCLK dependency table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+ "SCLK dependency table count is 0.",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+ "MCLK dependency table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+ "MCLK dependency table count is 0",
+ return -EINVAL);
+
+ /* Initialize Sclk DPM table based on allow Sclk values */
+ data->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+ dep_sclk_table->entries[i].clk) {
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+ dep_sclk_table->entries[i].clk;
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+ [data->dpm_table.mclk_table.count - 1].value !=
+ dep_mclk_table->entries[i].clk) {
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+ dep_mclk_table->entries[i].clk;
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.mclk_table.count++;
+ }
+ }
+
+ return 0;
+}
+
+int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ smu7_reset_dpm_tables(hwmgr);
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ smu7_setup_dpm_tables_v1(hwmgr);
+ else if (hwmgr->pp_table_version == PP_TABLE_V0)
+ smu7_setup_dpm_tables_v0(hwmgr);
+
+ smu7_setup_default_pcie_table(hwmgr);
+
+ /* save a copy of the default DPM table */
+ memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+ sizeof(struct smu7_dpm_table));
+ return 0;
+}
+
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reference_clock, tmp;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+ if (tmp)
+ return TCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ reference_clock = mode_info.ref_clock;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+ if (0 != tmp)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+ return 0;
+}
+
+static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ SCLK_PWRMGT_OFF, 0);
+ return 0;
+}
+
+static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+ return 0;
+}
+
+static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
+static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to enable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ } else {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters, HandshakeDisables);
+
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+ SMU_UVD_MCLK_HANDSHAKE_DISABLE);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
+ return 0;
+}
+
+static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* enable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ "Failed to enable SCLK DPM during DPM Start Function!",
+ return -EINVAL);
+
+ /* enable MCLK dpm */
+ if (0 == data->mclk_dpm_key_disabled) {
+ if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
+ smu7_disable_handshake_uvd(hwmgr);
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Enable)),
+ "Failed to enable MCLK DPM during DPM Start Function!",
+ return -EINVAL);
+
+ PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
+
+ return 0;
+}
+
+static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /*enable general power management */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 1);
+
+ /* enable sclk deep sleep */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 1);
+
+ /* prepare for PCIE DPM */
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+ VoltageChangeTimeout), 0x1000);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ SWRST_COMMAND_1, RESETLC, 0x0);
+
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Enable)),
+ "Failed to enable voltage DPM during DPM Start Function!",
+ return -EINVAL);
+
+
+ if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+ return -EINVAL;
+ }
+
+ /* enable PCIE dpm */
+ if (0 == data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Enable)),
+ "Failed to enable pcie DPM during DPM Start Function!",
+ return -EINVAL);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ "Failed to enable AC DC GPIO Interrupt!",
+ );
+ }
+
+ return 0;
+}
+
+static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable SCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ }
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable MCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
+ }
+
+ return 0;
+}
+
+static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -EINVAL);
+ }
+
+ smu7_disable_sclk_mclk_dpm(hwmgr);
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable voltage DPM when DPM is disabled",
+ return 0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
+
+ return 0;
+}
+
+static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+ bool protection;
+ enum DPM_EVENT_SRC src;
+
+ switch (sources) {
+ default:
+ printk(KERN_ERR "Unknown throttling event sources.");
+ /* fall through */
+ case 0:
+ protection = false;
+ /* src is unused */
+ break;
+ case (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External):
+ protection = true;
+ src = DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External) |
+ (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+ break;
+ }
+ /* Order matters - don't enable thermal protection for the wrong source. */
+ if (protection) {
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+ DPM_EVENT_SRC, src);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS,
+ !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController));
+ } else
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS, 1);
+}
+
+static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (!(data->active_auto_throttle_sources & (1 << source))) {
+ data->active_auto_throttle_sources |= 1 << source;
+ smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ data->pcie_performance_request = true;
+
+ return 0;
+}
+
+int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result = 0;
+ int result = 0;
+
+ tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is already running",
+ );
+
+ if (smu7_voltage_control(hwmgr)) {
+ tmp_result = smu7_enable_voltage_control(hwmgr);
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "Failed to enable voltage control!",
+ result = tmp_result);
+
+ tmp_result = smu7_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to contruct voltage tables!",
+ result = tmp_result);
+ }
+ smum_initialize_mc_reg_table(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+ tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program static screen threshold parameters!",
+ result = tmp_result);
+
+ tmp_result = smu7_enable_display_gap(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable display gap!", result = tmp_result);
+
+ tmp_result = smu7_program_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program voting clients!", result = tmp_result);
+
+ tmp_result = smum_process_firmware_header(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to process firmware header!", result = tmp_result);
+
+ tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize switch from ArbF0 to F1!",
+ result = tmp_result);
+
+ result = smu7_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup default DPM tables!", return result);
+
+ tmp_result = smum_init_smc_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize SMC table!", result = tmp_result);
+
+ tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
+
+ tmp_result = smu7_enable_sclk_control(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SCLK control!", result = tmp_result);
+
+ tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable voltage control!", result = tmp_result);
+
+ tmp_result = smu7_enable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ULV!", result = tmp_result);
+
+ tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_enable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_start_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to start DPM!", result = tmp_result);
+
+ tmp_result = smu7_enable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SMC CAC!", result = tmp_result);
+
+ tmp_result = smu7_enable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable power containment!", result = tmp_result);
+
+ tmp_result = smu7_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to power control set level!", result = tmp_result);
+
+ tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = smu7_pcie_performance_request(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "pcie performance request failed!", result = tmp_result);
+
+ return 0;
+}
+
+int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = smu7_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = smu7_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
+ tmp_result = smu7_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = smu7_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = smu7_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = smu7_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = smu7_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
+}
+
+int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+ return 0;
+}
+
+static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ data->dll_default_on = false;
+ data->mclk_dpm0_activity_target = 0xa;
+ data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
+ data->vddc_vddgfx_delta = 300;
+ data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
+ data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
+ data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+
+ data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
+ data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
+ data->enable_tdc_limit_feature = true;
+ data->enable_pkg_pwr_tracking_feature = true;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
+
+ data->fast_watermark_threshold = 100;
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDGFX)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
+ data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDGFX);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl);
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI);
+
+ if ((hwmgr->pp_table_version != PP_TABLE_V0)
+ && (table_info->cac_dtp_table->usClockStretchAmount != 0))
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+
+ data->pcie_gen_performance.max = PP_PCIEGen1;
+ data->pcie_gen_performance.min = PP_PCIEGen3;
+ data->pcie_gen_power_saving.max = PP_PCIEGen1;
+ data->pcie_gen_power_saving.min = PP_PCIEGen3;
+ data->pcie_lane_performance.max = 0;
+ data->pcie_lane_performance.min = 16;
+ data->pcie_lane_power_saving.max = 0;
+ data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t vv_id;
+ uint16_t vddc = 0;
+ uint16_t vddgfx = 0;
+ uint16_t i, j;
+ uint32_t sclk = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
+
+
+ for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+ vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ if ((hwmgr->pp_table_version == PP_TABLE_V1)
+ && !phm_get_sclk_for_voltage_evv(hwmgr,
+ table_info->vddgfx_lookup_table, vv_id, &sclk)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ sclk_table = table_info->vdd_dep_on_sclk;
+
+ for (j = 1; j < sclk_table->count; j++) {
+ if (sclk_table->entries[j].clk == sclk &&
+ sclk_table->entries[j].cks_enable == 0) {
+ sclk += 5000;
+ break;
+ }
+ }
+ }
+ if (0 == atomctrl_get_voltage_evv_on_sclk
+ (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+ vv_id, &vddgfx)) {
+ /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+ PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddgfx != 0 && vddgfx != vv_id) {
+ data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+ data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
+ data->vddcgfx_leakage.count++;
+ }
+ } else {
+ printk("Error retrieving EVV voltage value!\n");
+ }
+ }
+ } else {
+ if ((hwmgr->pp_table_version == PP_TABLE_V0)
+ || !phm_get_sclk_for_voltage_evv(hwmgr,
+ table_info->vddc_lookup_table, vv_id, &sclk)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ if (table_info == NULL)
+ return -EINVAL;
+ sclk_table = table_info->vdd_dep_on_sclk;
+
+ for (j = 1; j < sclk_table->count; j++) {
+ if (sclk_table->entries[j].clk == sclk &&
+ sclk_table->entries[j].cks_enable == 0) {
+ sclk += 5000;
+ break;
+ }
+ }
+ }
+
+ if (phm_get_voltage_evv_on_sclk(hwmgr,
+ VOLTAGE_TYPE_VDDC,
+ sclk, vv_id, &vddc) == 0) {
+ if (vddc >= 2000 || vddc == 0)
+ return -EINVAL;
+ } else {
+ printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ continue;
+ }
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddc != 0 && vddc != vv_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+ data->vddc_leakage.count++;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pointer to changing voltage
+ * @param pointer to leakage table
+ */
+static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t index;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+ for (index = 0; index < leakage_table->count; index++) {
+ /* if this voltage matches a leakage voltage ID */
+ /* patch with actual leakage voltage */
+ if (leakage_table->leakage_id[index] == *voltage) {
+ *voltage = leakage_table->actual_voltage[index];
+ break;
+ }
+ }
+
+ if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+ printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pointer to voltage lookup table
+* @param pointer to leakage table
+* @return always 0
+*/
+static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *lookup_table,
+ struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t i;
+
+ for (i = 0; i < lookup_table->count; i++)
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+ &lookup_table->entries[i].us_vdd, leakage_table);
+
+ return 0;
+}
+
+static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
+ struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
+ uint16_t *vddc)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+ table_info->max_clock_voltage_on_dc.vddc;
+ return 0;
+}
+
+static int smu7_patch_voltage_dependency_tables_with_lookup_table(
+ struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ uint8_t voltage_id;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ voltage_id = sclk_table->entries[entry_id].vddInd;
+ sclk_table->entries[entry_id].vddgfx =
+ table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
+ }
+ } else {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ voltage_id = sclk_table->entries[entry_id].vddInd;
+ sclk_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+ }
+
+ for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+ voltage_id = mclk_table->entries[entry_id].vddInd;
+ mclk_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+
+ for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+ voltage_id = mm_table->entries[entry_id].vddcInd;
+ mm_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
+
+ return 0;
+
+}
+
+static int phm_add_voltage(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *look_up_table,
+ phm_ppt_v1_voltage_lookup_record *record)
+{
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE((NULL != look_up_table),
+ "Lookup Table empty.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 != look_up_table->count),
+ "Lookup Table empty.", return -EINVAL);
+
+ i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ PP_ASSERT_WITH_CODE((i >= look_up_table->count),
+ "Lookup Table is full.", return -EINVAL);
+
+ /* This is to avoid entering duplicate calculated records. */
+ for (i = 0; i < look_up_table->count; i++) {
+ if (look_up_table->entries[i].us_vdd == record->us_vdd) {
+ if (look_up_table->entries[i].us_calculated == 1)
+ return 0;
+ break;
+ }
+ }
+
+ look_up_table->entries[i].us_calculated = 1;
+ look_up_table->entries[i].us_vdd = record->us_vdd;
+ look_up_table->entries[i].us_cac_low = record->us_cac_low;
+ look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
+ look_up_table->entries[i].us_cac_high = record->us_cac_high;
+ /* Only increment the count when we're appending, not replacing duplicate entry. */
+ if (i == look_up_table->count)
+ look_up_table->count++;
+
+ return 0;
+}
+
+
+static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ struct phm_ppt_v1_voltage_lookup_record v_record;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
+ phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
+ if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
+ v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+ sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+ else
+ v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
+ sclk_table->entries[entry_id].vdd_offset;
+
+ sclk_table->entries[entry_id].vddc =
+ v_record.us_cac_low = v_record.us_cac_mid =
+ v_record.us_cac_high = v_record.us_vdd;
+
+ phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
+ }
+
+ for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
+ if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
+ v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+ mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
+ else
+ v_record.us_vdd = mclk_table->entries[entry_id].vddc +
+ mclk_table->entries[entry_id].vdd_offset;
+
+ mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+ v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+ phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+ }
+ }
+ return 0;
+}
+
+static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+ uint8_t entry_id;
+ struct phm_ppt_v1_voltage_lookup_record v_record;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
+ if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
+ v_record.us_vdd = mm_table->entries[entry_id].vddc +
+ mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
+ else
+ v_record.us_vdd = mm_table->entries[entry_id].vddc +
+ mm_table->entries[entry_id].vddgfx_offset;
+
+ /* Add the calculated VDDGFX to the VDDGFX lookup table */
+ mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
+ v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
+ phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
+ }
+ }
+ return 0;
+}
+
+static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+ uint32_t table_size, i, j;
+ struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+ table_size = lookup_table->count;
+
+ PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+ "Lookup table is empty", return -EINVAL);
+
+ /* Sorting voltages */
+ for (i = 0; i < table_size - 1; i++) {
+ for (j = i + 1; j > 0; j--) {
+ if (lookup_table->entries[j].us_vdd <
+ lookup_table->entries[j - 1].us_vdd) {
+ tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+ lookup_table->entries[j - 1] = lookup_table->entries[j];
+ lookup_table->entries[j] = tmp_voltage_lookup_record;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ int tmp_result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+ table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
+ if (tmp_result != 0)
+ result = tmp_result;
+
+ smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
+ &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
+ } else {
+
+ tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
+ table_info->vddc_lookup_table, &(data->vddc_leakage));
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+ &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+ if (tmp_result)
+ result = tmp_result;
+ }
+
+ tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+ if (tmp_result)
+ result = tmp_result;
+
+ return result;
+}
+
+static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+ table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+ "VDD dependency on SCLK table is missing.",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+ "VDD dependency on SCLK table has to have is missing.",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+ "VDD dependency on MCLK table is missing",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+ "VDD dependency on MCLK table has to have is missing.",
+ return -EINVAL);
+
+ table_info->max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+ table_info->max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+ hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
+
+ return 0;
+}
+
+int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table;
+ uint32_t i;
+ uint32_t hw_revision, sub_vendor_id, sub_sys_id;
+ struct cgs_system_info sys_info = {0};
+
+ if (table_info != NULL) {
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ lookup_table = table_info->vddc_lookup_table;
+ } else
+ return 0;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ sub_sys_id = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ sub_vendor_id = (uint32_t)sys_info.value;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
+ ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
+ (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
+ (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
+ if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+ return 0;
+
+ for (i = 0; i < lookup_table->count; i++) {
+ if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+ dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
+{
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+ uint32_t temp_reg;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+ switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+ case 0:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+ break;
+ case 1:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+ break;
+ case 2:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+ break;
+ case 3:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+ break;
+ case 4:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+ break;
+ default:
+ PP_ASSERT_WITH_CODE(0,
+ "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+ );
+ break;
+ }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+ }
+
+ if (table_info == NULL)
+ return 0;
+
+ if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+ (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
+
+ table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+ table_info->cac_dtp_table->usOperatingTempStep = 1;
+ table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+ table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+ table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+ table_info->cac_dtp_table->usOperatingTempStep;
+
+ hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+ table_info->cac_dtp_table->usTargetOperatingTemp;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
+ }
+
+ return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pointer to changing voltage
+ * @param pointer to leakage table
+ */
+static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+ uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
+{
+ uint32_t index;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+ for (index = 0; index < leakage_table->count; index++) {
+ /* if this voltage matches a leakage voltage ID */
+ /* patch with actual leakage voltage */
+ if (leakage_table->leakage_id[index] == *voltage) {
+ *voltage = leakage_table->actual_voltage[index];
+ break;
+ }
+ }
+
+ if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+ printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+
+static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddci_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_vce_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+
+static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_uvd_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
+ struct phm_phase_shedding_limits_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_samu_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_acp_clock_voltage_dependency_table *tab)
+{
+ uint16_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab)
+ for (i = 0; i < tab->count; i++)
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
+ &data->vddc_leakage);
+
+ return 0;
+}
+
+static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
+ struct phm_clock_and_voltage_limits *tab)
+{
+ uint32_t vddc, vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab) {
+ vddc = tab->vddc;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
+ &data->vddc_leakage);
+ tab->vddc = vddc;
+ vddci = tab->vddci;
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
+ &data->vddci_leakage);
+ tab->vddci = vddci;
+ }
+
+ return 0;
+}
+
+static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
+{
+ uint32_t i;
+ uint32_t vddc;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (tab) {
+ for (i = 0; i < tab->count; i++) {
+ vddc = (uint32_t)(tab->entries[i].Vddc);
+ smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
+ tab->entries[i].Vddc = (uint16_t)vddc;
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
+{
+ int tmp;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
+ if (tmp)
+ return -EINVAL;
+
+ tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
+ if (tmp)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
+ "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
+ "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
+ "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
+ "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
+
+ data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
+ data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
+ data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
+ data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+ }
+
+ if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
+
+ return 0;
+}
+
+int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data;
+ int result;
+
+ data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ smu7_patch_voltage_workaround(hwmgr);
+ smu7_init_dpm_defaults(hwmgr);
+
+ /* Get leakage voltage based on leakage ID. */
+ result = smu7_get_evv_voltages(hwmgr);
+
+ if (result) {
+ printk("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -EINVAL;
+ }
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ smu7_complete_dependency_tables(hwmgr);
+ smu7_set_private_data_based_on_pptable_v1(hwmgr);
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ smu7_patch_dependency_tables_with_leakage(hwmgr);
+ smu7_set_private_data_based_on_pptable_v0(hwmgr);
+ }
+
+ /* Initalize Dynamic State Adjustment Rule Settings */
+ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+ if (0 == result) {
+ struct cgs_system_info sys_info = {0};
+
+ data->is_tlu_enabled = false;
+
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ SMU7_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ else
+ data->pcie_gen_cap = (uint32_t)sys_info.value;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ else
+ data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ smu7_thermal_parameter_init(hwmgr);
+ } else {
+ /* Ignore return value in here, we are cleaning up a mess. */
+ phm_hwmgr_backend_fini(hwmgr);
+ }
+
+ return 0;
+}
+
+static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t level, tmp;
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ }
+ }
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ phm_apply_dal_min_voltage_request(hwmgr);
+/* TO DO for v0 iceland and Ci*/
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+
+ return 0;
+}
+
+static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (!smum_is_dpm_running(hwmgr))
+ return -EINVAL;
+
+ if (!data->pcie_dpm_key_disabled) {
+ smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
+ }
+
+ return smu7_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data =
+ (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t level;
+
+ if (!data->sclk_dpm_key_disabled)
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ (level));
+ }
+ }
+
+ return 0;
+
+}
+static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu7_force_dpm_highest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu7_force_dpm_lowest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu7_unforce_dpm_levels(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ hwmgr->dpm_level = level;
+
+ return ret;
+}
+
+static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+ return sizeof(struct smu7_power_state);
+}
+
+
+static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+ const struct pp_power_state *current_ps)
+{
+
+ struct smu7_power_state *smu7_ps =
+ cast_phw_smu7_power_state(&request_ps->hardware);
+ uint32_t sclk;
+ uint32_t mclk;
+ struct PP_Clocks minimum_clocks = {0};
+ bool disable_mclk_switching;
+ bool disable_mclk_switching_for_frame_lock;
+ struct cgs_display_info info = {0};
+ const struct phm_clock_and_voltage_limits *max_limits;
+ uint32_t i;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int32_t count;
+ int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+
+ PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
+ "VI should always have 2 performance levels",
+ );
+
+ max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+ &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+ /* Cap clock DPM tables at DC MAX if it is in DC. */
+ if (PP_PowerSource_DC == hwmgr->power_source) {
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
+ smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
+ if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
+ smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
+ }
+ }
+
+ smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+ smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+ minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
+ minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+ stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+ for (count = table_info->vdd_dep_on_sclk->count - 1;
+ count >= 0; count--) {
+ if (stable_pstate_sclk >=
+ table_info->vdd_dep_on_sclk->entries[count].clk) {
+ stable_pstate_sclk =
+ table_info->vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+
+ if (count < 0)
+ stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+ stable_pstate_mclk = max_limits->mclk;
+
+ minimum_clocks.engineClock = stable_pstate_sclk;
+ minimum_clocks.memoryClock = stable_pstate_mclk;
+ }
+
+ if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+ minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+ if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+ minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+ smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+ if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.engineClock),
+ "Overdrive sclk exceeds limit",
+ hwmgr->gfx_arbiter.sclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+ if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+ smu7_ps->performance_levels[1].engine_clock =
+ hwmgr->gfx_arbiter.sclk_over_drive;
+ }
+
+ if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+ "Overdrive mclk exceeds limit",
+ hwmgr->gfx_arbiter.mclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+ if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+ smu7_ps->performance_levels[1].memory_clock =
+ hwmgr->gfx_arbiter.mclk_over_drive;
+ }
+
+ disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+ hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+
+ disable_mclk_switching = (1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock;
+
+ sclk = smu7_ps->performance_levels[0].engine_clock;
+ mclk = smu7_ps->performance_levels[0].memory_clock;
+
+ if (disable_mclk_switching)
+ mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+
+ if (sclk < minimum_clocks.engineClock)
+ sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+ max_limits->sclk : minimum_clocks.engineClock;
+
+ if (mclk < minimum_clocks.memoryClock)
+ mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+ max_limits->mclk : minimum_clocks.memoryClock;
+
+ smu7_ps->performance_levels[0].engine_clock = sclk;
+ smu7_ps->performance_levels[0].memory_clock = mclk;
+
+ smu7_ps->performance_levels[1].engine_clock =
+ (smu7_ps->performance_levels[1].engine_clock >=
+ smu7_ps->performance_levels[0].engine_clock) ?
+ smu7_ps->performance_levels[1].engine_clock :
+ smu7_ps->performance_levels[0].engine_clock;
+
+ if (disable_mclk_switching) {
+ if (mclk < smu7_ps->performance_levels[1].memory_clock)
+ mclk = smu7_ps->performance_levels[1].memory_clock;
+
+ smu7_ps->performance_levels[0].memory_clock = mclk;
+ smu7_ps->performance_levels[1].memory_clock = mclk;
+ } else {
+ if (smu7_ps->performance_levels[1].memory_clock <
+ smu7_ps->performance_levels[0].memory_clock)
+ smu7_ps->performance_levels[1].memory_clock =
+ smu7_ps->performance_levels[0].memory_clock;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+ smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+ smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+ smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+ }
+ }
+ return 0;
+}
+
+
+static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ if (low)
+ return smu7_ps->performance_levels[0].memory_clock;
+ else
+ return smu7_ps->performance_levels
+ [smu7_ps->performance_level_count-1].memory_clock;
+}
+
+static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ if (low)
+ return smu7_ps->performance_levels[0].engine_clock;
+ else
+ return smu7_ps->performance_levels
+ [smu7_ps->performance_level_count-1].engine_clock;
+}
+
+static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
+ ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+ uint16_t size;
+ uint8_t frev, crev;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+ /* First retrieve the Boot clocks and VDDC from the firmware info table.
+ * We assume here that fw_info is unchanged if this call fails.
+ */
+ fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+ hwmgr->device, index,
+ &size, &frev, &crev);
+ if (!fw_info)
+ /* During a test, there is no firmware info table. */
+ return 0;
+
+ /* Patch the state. */
+ data->vbios_boot_state.sclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultEngineClock);
+ data->vbios_boot_state.mclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultMemoryClock);
+ data->vbios_boot_state.mvdd_bootup_value =
+ le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+ data->vbios_boot_state.vddc_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+ data->vbios_boot_state.vddci_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+ data->vbios_boot_state.pcie_gen_bootup_value =
+ smu7_get_current_pcie_speed(hwmgr);
+
+ data->vbios_boot_state.pcie_lane_bootup_value =
+ (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
+
+ /* set boot power state */
+ ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+ ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+ ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+ ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+ return 0;
+}
+
+static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned long ret = 0;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ result = pp_tables_get_num_of_entries(hwmgr, &ret);
+ return result ? 0 : ret;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
+ return result;
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
+ void *state, struct pp_power_state *power_state,
+ void *pp_table, uint32_t classification_flag)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *smu7_power_state =
+ (struct smu7_power_state *)(&(power_state->hardware));
+ struct smu7_performance_level *performance_level;
+ ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+ ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+ (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+ PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (PPTable_Generic_SubTable_Header *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
+ ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+ (ATOM_Tonga_MCLK_Dependency_Table *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+ /* The following fields are not initialized here: id orderedList allStatesList */
+ power_state->classification.ui_label =
+ (le16_to_cpu(state_entry->usClassification) &
+ ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+ ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+ power_state->classification.flags = classification_flag;
+ /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+ power_state->classification.temporary_state = false;
+ power_state->classification.to_be_deleted = false;
+
+ power_state->validation.disallowOnDC =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_DISALLOW_ON_DC));
+
+ power_state->pcie.lanes = 0;
+
+ power_state->display.disableFrameModulation = false;
+ power_state->display.limitRefreshrate = false;
+ power_state->display.enableVariBright =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+ power_state->validation.supportedPowerLevels = 0;
+ power_state->uvd_clocks.VCLK = 0;
+ power_state->uvd_clocks.DCLK = 0;
+ power_state->temperatures.min = 0;
+ power_state->temperatures.max = 0;
+
+ performance_level = &(smu7_power_state->performance_levels
+ [smu7_power_state->performance_level_count++]);
+
+ PP_ASSERT_WITH_CODE(
+ (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ "Performance levels exceeds SMC limit!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(
+ (smu7_power_state->performance_level_count <=
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+ "Performance levels exceeds Driver limit!",
+ return -EINVAL);
+
+ /* Performance levels are arranged from low to high. */
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexLow].ulMclk;
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenLow);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ performance_level = &(smu7_power_state->performance_levels
+ [smu7_power_state->performance_level_count++]);
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexHigh].ulMclk;
+
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenHigh);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ int result;
+ struct smu7_power_state *ps;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+
+ state->hardware.magic = PHM_VIslands_Magic;
+
+ ps = (struct smu7_power_state *)(&state->hardware);
+
+ result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
+ smu7_get_pp_table_entry_callback_func_v1);
+
+ /* This is the earliest time we have all the dependency table and the VBIOS boot state
+ * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+ * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+ */
+ if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+ if (dep_mclk_table->entries[0].clk !=
+ data->vbios_boot_state.mclk_bootup_value)
+ printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot MCLK level");
+ if (dep_mclk_table->entries[0].vddci !=
+ data->vbios_boot_state.vddci_bootup_value)
+ printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot VDDCI level");
+ }
+
+ /* set DC compatible flag if this state supports DC */
+ if (!state->validation.disallowOnDC)
+ ps->dc_compatible = true;
+
+ if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+ data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+ ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+ if (!result) {
+ uint32_t i;
+
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Performance:
+ data->use_pcie_performance_levels = true;
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_performance.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_performance.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_performance.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.max =
+ ps->performance_levels[i].pcie_lane;
+ if (data->pcie_lane_performance.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ case PP_StateUILabel_Battery:
+ data->use_pcie_power_saving_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_power_saving.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_power_saving.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_power_saving.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_power_saving.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *power_state,
+ unsigned int index, const void *clock_info)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
+ const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
+ struct smu7_performance_level *performance_level;
+ uint32_t engine_clock, memory_clock;
+ uint16_t pcie_gen_from_bios;
+
+ engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
+ memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
+
+ if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
+ data->highest_mclk = memory_clock;
+
+ performance_level = &(ps->performance_levels
+ [ps->performance_level_count++]);
+
+ PP_ASSERT_WITH_CODE(
+ (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ "Performance levels exceeds SMC limit!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(
+ (ps->performance_level_count <=
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+ "Performance levels exceeds Driver limit!",
+ return -EINVAL);
+
+ /* Performance levels are arranged from low to high. */
+ performance_level->memory_clock = memory_clock;
+ performance_level->engine_clock = engine_clock;
+
+ pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
+
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
+
+ return 0;
+}
+
+static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ int result;
+ struct smu7_power_state *ps;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_clock_voltage_dependency_table *dep_mclk_table =
+ hwmgr->dyn_state.vddci_dependency_on_mclk;
+
+ memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
+
+ state->hardware.magic = PHM_VIslands_Magic;
+
+ ps = (struct smu7_power_state *)(&state->hardware);
+
+ result = pp_tables_get_entry(hwmgr, entry_index, state,
+ smu7_get_pp_table_entry_callback_func_v0);
+
+ /*
+ * This is the earliest time we have all the dependency table
+ * and the VBIOS boot state as
+ * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
+ * state if there is only one VDDCI/MCLK level, check if it's
+ * the same as VBIOS boot state
+ */
+ if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+ if (dep_mclk_table->entries[0].clk !=
+ data->vbios_boot_state.mclk_bootup_value)
+ printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot MCLK level");
+ if (dep_mclk_table->entries[0].v !=
+ data->vbios_boot_state.vddci_bootup_value)
+ printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot VDDCI level");
+ }
+
+ /* set DC compatible flag if this state supports DC */
+ if (!state->validation.disallowOnDC)
+ ps->dc_compatible = true;
+
+ if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+ data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+ ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+ if (!result) {
+ uint32_t i;
+
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Performance:
+ data->use_pcie_performance_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_performance.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_performance.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_performance.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_performance.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ case PP_StateUILabel_Battery:
+ data->use_pcie_power_saving_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_power_saving.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_power_saving.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_power_saving.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_power_saving.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
+
+ return 0;
+}
+
+static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+ uint32_t sclk, mclk, activity_percent;
+ uint32_t offset;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *value = sclk;
+ return 0;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *value = mclk;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ AverageGraphicsActivity);
+
+ activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+ activity_percent += 0x80;
+ activity_percent >>= 8;
+ *value = activity_percent > 100 ? 100 : activity_percent;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = smu7_thermal_get_temperature(hwmgr);
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *value = data->uvd_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *value = data->vce_power_gated ? 0 : 1;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ uint32_t sclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].engine_clock;
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ uint32_t mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+ struct PP_Clocks min_clocks = {0};
+ uint32_t i;
+ struct cgs_display_info info = {0};
+
+ data->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ else {
+ /* TODO: Check SCLK in DAL's minimum clocks
+ * in case DeepSleep divider update is required.
+ */
+ if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+ (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
+ data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+ return 0;
+}
+
+static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+ const struct smu7_power_state *smu7_ps)
+{
+ uint32_t i;
+ uint32_t sclk, max_sclk = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+ for (i = 0; i < smu7_ps->performance_level_count; i++) {
+ sclk = smu7_ps->performance_levels[i].engine_clock;
+ if (max_sclk < sclk)
+ max_sclk = sclk;
+ }
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+ return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+ dpm_table->pcie_speed_table.dpm_levels
+ [dpm_table->pcie_speed_table.count - 1].value :
+ dpm_table->pcie_speed_table.dpm_levels[i].value);
+ }
+
+ return 0;
+}
+
+static int smu7_request_link_speed_change_before_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_nps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ const struct smu7_power_state *polaris10_cps =
+ cast_const_phw_smu7_power_state(states->pcurrent_state);
+
+ uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
+ uint16_t current_link_speed;
+
+ if (data->force_pcie_gen == PP_PCIEGenInvalid)
+ current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
+ else
+ current_link_speed = data->force_pcie_gen;
+
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->pspp_notify_required = false;
+
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+ case PP_PCIEGen3:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+ break;
+ data->force_pcie_gen = PP_PCIEGen2;
+ if (current_link_speed == PP_PCIEGen2)
+ break;
+ case PP_PCIEGen2:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+ break;
+ default:
+ data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ data->pspp_notify_required = true;
+ }
+
+ return 0;
+}
+
+static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_FreezeLevel),
+ "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ DPMTABLE_OD_UPDATE_MCLK)) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_FreezeLevel),
+ "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result = 0;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t sclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].engine_clock;
+ uint32_t mclk = smu7_ps->performance_levels
+ [smu7_ps->performance_level_count - 1].memory_clock;
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ uint32_t dpm_count, clock_percent;
+ uint32_t i;
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ dpm_table->sclk_table.dpm_levels
+ [dpm_table->sclk_table.count - 1].value = sclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ /* Need to do calculation based on the golden DPM table
+ * as the Heatmap GPU Clock axis is also based on the default values
+ */
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->sclk_table.dpm_levels
+ [golden_dpm_table->sclk_table.count - 1].value != 0),
+ "Divide by 0!",
+ return -EINVAL);
+ dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+ for (i = dpm_count; i > 1; i--) {
+ if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+ clock_percent =
+ ((sclk
+ - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+ ) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value +
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent)/100;
+
+ } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+ clock_percent =
+ ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+ - sclk) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value -
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ dpm_table->mclk_table.dpm_levels
+ [dpm_table->mclk_table.count - 1].value = mclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->mclk_table.dpm_levels
+ [golden_dpm_table->mclk_table.count-1].value != 0),
+ "Divide by 0!",
+ return -EINVAL);
+ dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+ for (i = dpm_count; i > 1; i--) {
+ if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+ clock_percent = ((mclk -
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value +
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+
+ } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+ clock_percent = (
+ (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+ * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value -
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ result = smum_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ /*populate MCLK dpm table to SMU7 */
+ result = smum_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ return result;
+}
+
+static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+ struct smu7_single_dpm_table *dpm_table,
+ uint32_t low_limit, uint32_t high_limit)
+{
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit)
+ || (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return 0;
+}
+
+static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
+ const struct smu7_power_state *smu7_ps)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t high_limit_count;
+
+ PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
+ "power state did not have any performance level",
+ return -EINVAL);
+
+ high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
+
+ smu7_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.sclk_table),
+ smu7_ps->performance_levels[0].engine_clock,
+ smu7_ps->performance_levels[high_limit_count].engine_clock);
+
+ smu7_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.mclk_table),
+ smu7_ps->performance_levels[0].memory_clock,
+ smu7_ps->performance_levels[high_limit_count].memory_clock);
+
+ return 0;
+}
+
+static int smu7_generate_dpm_level_enable_mask(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+
+ result = smu7_trim_dpm_states(hwmgr, smu7_ps);
+ if (result)
+ return result;
+
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -EINVAL);
+ }
+
+ data->need_update_smu7_dpm_table = 0;
+
+ return 0;
+}
+
+static int smu7_notify_link_speed_change_after_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct smu7_power_state *smu7_ps =
+ cast_const_phw_smu7_power_state(states->pnew_state);
+ uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
+ uint8_t request;
+
+ if (data->pspp_notify_required) {
+ if (target_link_speed == PP_PCIEGen3)
+ request = PCIE_PERF_REQ_GEN3;
+ else if (target_link_speed == PP_PCIEGen2)
+ request = PCIE_PERF_REQ_GEN2;
+ else
+ request = PCIE_PERF_REQ_GEN1;
+
+ if (request == PCIE_PERF_REQ_GEN1 &&
+ smu7_get_current_pcie_speed(hwmgr) > 0)
+ return 0;
+
+ if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+ if (PP_PCIEGen2 == target_link_speed)
+ printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+ else
+ printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+ }
+ }
+
+ return 0;
+}
+
+static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+}
+
+static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int tmp_result, result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to find DPM states clocks in DPM table!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ smu7_request_link_speed_change_before_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to request link speed change before state change!",
+ result = tmp_result);
+ }
+
+ tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+ tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to populate and upload SCLK MCLK DPM levels!",
+ result = tmp_result);
+
+ tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to generate DPM level enabled mask!",
+ result = tmp_result);
+
+ tmp_result = smum_update_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update SCLK threshold!",
+ result = tmp_result);
+
+ tmp_result = smu7_notify_smc_display(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify smc display settings!",
+ result = tmp_result);
+
+ tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to unfreeze SCLK MCLK DPM!",
+ result = tmp_result);
+
+ tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to upload DPM level enabled mask!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ smu7_notify_link_speed_change_after_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify link speed change after state change!",
+ result = tmp_result);
+ }
+ data->apply_optimized_settings = false;
+ return result;
+}
+
+static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+ PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+ return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+}
+
+int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+ uint32_t num_active_displays = 0;
+ struct cgs_display_info info = {0};
+
+ info.mode_info = NULL;
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ num_active_displays = info.display_count;
+
+ if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+ smu7_notify_smc_display_change(hwmgr, false);
+
+ return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always OK
+*/
+int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t num_active_displays = 0;
+ uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+ uint32_t display_gap2;
+ uint32_t pre_vbi_time_in_us;
+ uint32_t frame_time_in_us;
+ uint32_t ref_clock;
+ uint32_t refresh_rate = 0;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_active_displays = info.display_count;
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ ref_clock = mode_info.ref_clock;
+ refresh_rate = mode_info.refresh_rate;
+
+ if (0 == refresh_rate)
+ refresh_rate = 60;
+
+ frame_time_in_us = 1000000 / refresh_rate;
+
+ pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
+ display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ PreVBlankGap), 0x64);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ SMU_SoftRegisters,
+ VBlankTimeout),
+ (frame_time_in_us - pre_vbi_time_in_us));
+
+ return 0;
+}
+
+int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ return smu7_program_display_gap(hwmgr);
+}
+
+/**
+* Set maximum target operating fan output RPM
+*
+* @param hwmgr: the address of the powerplay hardware manager.
+* @param usMaxFanRpm: max operating fan RPM value.
+* @return The response that came from the SMC.
+*/
+static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *thermal_interrupt_info)
+{
+ return 0;
+}
+
+bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0, 0, NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+ (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
+ hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+ is_update_required = true;
+ }
+ return is_update_required;
+}
+
+static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
+ const struct smu7_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct smu7_power_state *psa;
+ const struct smu7_power_state *psb;
+ int i;
+
+ if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+ return -EINVAL;
+
+ psa = cast_const_phw_smu7_power_state(pstate1);
+ psb = cast_const_phw_smu7_power_state(pstate2);
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+ return 0;
+}
+
+int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t vbios_version;
+ uint32_t tmp;
+
+ /* Read MC indirect register offset 0x9F bits [3:0] to see
+ * if VBIOS has already loaded a full version of MC ucode
+ * or not.
+ */
+
+ smu7_get_mc_microcode_version(hwmgr);
+ vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+
+ data->need_long_memory_training = false;
+
+ cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
+ ixMC_IO_DEBUG_UP_13);
+ tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+ if (tmp & (1 << 23)) {
+ data->mem_latency_high = MEM_LATENCY_HIGH;
+ data->mem_latency_low = MEM_LATENCY_LOW;
+ } else {
+ data->mem_latency_high = 330;
+ data->mem_latency_low = 330;
+ }
+
+ return 0;
+}
+
+static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->clock_registers.vCG_SPLL_FUNC_CNTL =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
+ data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
+ data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
+ data->clock_registers.vDLL_CNTL =
+ cgs_read_register(hwmgr->device, mmDLL_CNTL);
+ data->clock_registers.vMCLK_PWRMGT_CNTL =
+ cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
+ data->clock_registers.vMPLL_AD_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
+ data->clock_registers.vMPLL_DQ_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
+ data->clock_registers.vMPLL_FUNC_CNTL =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
+ data->clock_registers.vMPLL_FUNC_CNTL_1 =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
+ data->clock_registers.vMPLL_FUNC_CNTL_2 =
+ cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
+ data->clock_registers.vMPLL_SS1 =
+ cgs_read_register(hwmgr->device, mmMPLL_SS1);
+ data->clock_registers.vMPLL_SS2 =
+ cgs_read_register(hwmgr->device, mmMPLL_SS2);
+ return 0;
+
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t temp;
+
+ temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+ data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+ ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+ MC_SEQ_MISC0_GDDR5_SHIFT));
+
+ return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+ return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+
+ return 0;
+}
+
+static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ data->low_sclk_interrupt_threshold = 0;
+ return 0;
+}
+
+int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ smu7_upload_mc_firmware(hwmgr);
+
+ tmp_result = smu7_read_clock_registers(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to read clock registers!", result = tmp_result);
+
+ tmp_result = smu7_get_memory_type(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get memory type!", result = tmp_result);
+
+ tmp_result = smu7_enable_acpi_power_management(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ACPI power management!", result = tmp_result);
+
+ tmp_result = smu7_init_power_gate_state(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init power gate state!", result = tmp_result);
+
+ tmp_result = smu7_get_mc_microcode_version(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get MC microcode version!", result = tmp_result);
+
+ tmp_result = smu7_init_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init sclk threshold!", result = tmp_result);
+
+ return result;
+}
+
+static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+ case PP_MCLK:
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!data->pcie_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = smu7_get_current_pcie_speed(hwmgr);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_set_static_mode(hwmgr, mode);
+ } else
+ /* restart auto-manage */
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+ return 0;
+}
+
+static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->fan_ctrl_is_in_default_mode)
+ return hwmgr->fan_ctrl_default_mode;
+ else
+ return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct smu7_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct smu7_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct smu7_power_state *smu7_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
+
+ smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
+
+static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
+ struct phm_clock_voltage_dependency_table *sclk_table;
+ int i;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
+ return -EINVAL;
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ clocks->clock[i] = dep_sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
+ for (i = 0; i < sclk_table->count; i++) {
+ clocks->clock[i] = sclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
+ return data->mem_latency_high;
+ else if (clk >= MEM_FREQ_HIGH_LATENCY)
+ return data->mem_latency_low;
+ else
+ return MEM_LATENCY_ERR;
+}
+
+static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
+ int i;
+ struct phm_clock_voltage_dependency_table *mclk_table;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ if (table_info == NULL)
+ return -EINVAL;
+ dep_mclk_table = table_info->vdd_dep_on_mclk;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ clocks->clock[i] = dep_mclk_table->entries[i].clk;
+ clocks->latency[i] = smu7_get_mem_latency(hwmgr,
+ dep_mclk_table->entries[i].clk);
+ clocks->count++;
+ }
+ } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
+ for (i = 0; i < mclk_table->count; i++) {
+ clocks->clock[i] = mclk_table->entries[i].clk;
+ clocks->count++;
+ }
+ }
+ return 0;
+}
+
+static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ switch (type) {
+ case amd_pp_sys_clock:
+ smu7_get_sclks(hwmgr, clocks);
+ break;
+ case amd_pp_mem_clock:
+ smu7_get_mclks(hwmgr, clocks);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
+ .backend_init = &smu7_hwmgr_backend_init,
+ .backend_fini = &phm_hwmgr_backend_fini,
+ .asic_setup = &smu7_setup_asic_task,
+ .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
+ .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
+ .force_dpm_level = &smu7_force_dpm_level,
+ .power_state_set = smu7_set_power_state_tasks,
+ .get_power_state_size = smu7_get_power_state_size,
+ .get_mclk = smu7_dpm_get_mclk,
+ .get_sclk = smu7_dpm_get_sclk,
+ .patch_boot_state = smu7_dpm_patch_boot_state,
+ .get_pp_table_entry = smu7_get_pp_table_entry,
+ .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
+ .powerdown_uvd = smu7_powerdown_uvd,
+ .powergate_uvd = smu7_powergate_uvd,
+ .powergate_vce = smu7_powergate_vce,
+ .disable_clock_power_gating = smu7_disable_clock_power_gating,
+ .update_clock_gatings = smu7_update_clock_gatings,
+ .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
+ .display_config_changed = smu7_display_configuration_changed_task,
+ .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
+ .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
+ .get_temperature = smu7_thermal_get_temperature,
+ .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
+ .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
+ .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
+ .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
+ .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
+ .register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
+ .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
+ .check_states_equal = smu7_check_states_equal,
+ .set_fan_control_mode = smu7_set_fan_control_mode,
+ .get_fan_control_mode = smu7_get_fan_control_mode,
+ .force_clock_level = smu7_force_clock_level,
+ .print_clock_levels = smu7_print_clock_levels,
+ .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .get_sclk_od = smu7_get_sclk_od,
+ .set_sclk_od = smu7_set_sclk_od,
+ .get_mclk_od = smu7_get_mclk_od,
+ .set_mclk_od = smu7_set_mclk_od,
+ .get_clock_by_type = smu7_get_clock_by_type,
+ .read_sensor = smu7_read_sensor,
+ .dynamic_state_management_disable = smu7_disable_dpm_tasks,
+};
+
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+ PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+ for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
+ if (hwmgr->pp_table_version == PP_TABLE_V0)
+ hwmgr->pptable_func = &pptable_funcs;
+ else if (hwmgr->pp_table_version == PP_TABLE_V1)
+ hwmgr->pptable_func = &pptable_v1_0_funcs;
+
+ pp_smu7_thermal_initialize(hwmgr);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index 33c33947e827..27e7f76ad8a6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -21,81 +21,100 @@
*
*/
-#ifndef POLARIS10_HWMGR_H
-#define POLARIS10_HWMGR_H
+#ifndef _SMU7_HWMGR_H
+#define _SMU7_HWMGR_H
#include "hwmgr.h"
-#include "smu74.h"
-#include "smu74_discrete.h"
#include "ppatomctrl.h"
-#include "polaris10_ppsmc.h"
-#include "polaris10_powertune.h"
-#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
+#define SMU7_MAX_HARDWARE_POWERLEVELS 2
-#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0
-#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3
+#define SMU7_VOLTAGE_CONTROL_NONE 0x0
+#define SMU7_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define SMU7_VOLTAGE_CONTROL_BY_SVID2 0x2
+#define SMU7_VOLTAGE_CONTROL_MERGED 0x3
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
-struct polaris10_performance_level {
+enum gpu_pt_config_reg_type {
+ GPU_CONFIGREG_MMR = 0,
+ GPU_CONFIGREG_SMC_IND,
+ GPU_CONFIGREG_DIDT_IND,
+ GPU_CONFIGREG_GC_CAC_IND,
+ GPU_CONFIGREG_CACHE,
+ GPU_CONFIGREG_MAX
+};
+
+struct gpu_pt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+ enum gpu_pt_config_reg_type type;
+};
+
+struct smu7_performance_level {
uint32_t memory_clock;
uint32_t engine_clock;
uint16_t pcie_gen;
uint16_t pcie_lane;
};
-struct polaris10_uvd_clocks {
+struct smu7_thermal_temperature_setting {
+ long temperature_low;
+ long temperature_high;
+ long temperature_shutdown;
+};
+
+struct smu7_uvd_clocks {
uint32_t vclk;
uint32_t dclk;
};
-struct polaris10_vce_clocks {
+struct smu7_vce_clocks {
uint32_t evclk;
uint32_t ecclk;
};
-struct polaris10_power_state {
+struct smu7_power_state {
uint32_t magic;
- struct polaris10_uvd_clocks uvd_clks;
- struct polaris10_vce_clocks vce_clks;
+ struct smu7_uvd_clocks uvd_clks;
+ struct smu7_vce_clocks vce_clks;
uint32_t sam_clk;
uint16_t performance_level_count;
bool dc_compatible;
uint32_t sclk_threshold;
- struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
+ struct smu7_performance_level performance_levels[SMU7_MAX_HARDWARE_POWERLEVELS];
};
-struct polaris10_dpm_level {
+struct smu7_dpm_level {
bool enabled;
uint32_t value;
uint32_t param1;
};
-#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define SMU7_MAX_DEEPSLEEP_DIVIDER_ID 5
#define MAX_REGULAR_DPM_NUMBER 8
-#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
+#define SMU7_MINIMUM_ENGINE_CLOCK 2500
-struct polaris10_single_dpm_table {
+struct smu7_single_dpm_table {
uint32_t count;
- struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+ struct smu7_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
};
-struct polaris10_dpm_table {
- struct polaris10_single_dpm_table sclk_table;
- struct polaris10_single_dpm_table mclk_table;
- struct polaris10_single_dpm_table pcie_speed_table;
- struct polaris10_single_dpm_table vddc_table;
- struct polaris10_single_dpm_table vddci_table;
- struct polaris10_single_dpm_table mvdd_table;
+struct smu7_dpm_table {
+ struct smu7_single_dpm_table sclk_table;
+ struct smu7_single_dpm_table mclk_table;
+ struct smu7_single_dpm_table pcie_speed_table;
+ struct smu7_single_dpm_table vddc_table;
+ struct smu7_single_dpm_table vddci_table;
+ struct smu7_single_dpm_table mvdd_table;
};
-struct polaris10_clock_registers {
+struct smu7_clock_registers {
uint32_t vCG_SPLL_FUNC_CNTL;
uint32_t vCG_SPLL_FUNC_CNTL_2;
uint32_t vCG_SPLL_FUNC_CNTL_3;
@@ -116,42 +135,35 @@ struct polaris10_clock_registers {
#define DISABLE_MC_LOADMICROCODE 1
#define DISABLE_MC_CFGPROGRAMMING 2
-struct polaris10_voltage_smio_registers {
+struct smu7_voltage_smio_registers {
uint32_t vS0_VID_LOWER_SMIO_CNTL;
};
-#define POLARIS10_MAX_LEAKAGE_COUNT 8
+#define SMU7_MAX_LEAKAGE_COUNT 8
-struct polaris10_leakage_voltage {
+struct smu7_leakage_voltage {
uint16_t count;
- uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
+ uint16_t leakage_id[SMU7_MAX_LEAKAGE_COUNT];
+ uint16_t actual_voltage[SMU7_MAX_LEAKAGE_COUNT];
};
-struct polaris10_vbios_boot_state {
+struct smu7_vbios_boot_state {
uint16_t mvdd_bootup_value;
uint16_t vddc_bootup_value;
uint16_t vddci_bootup_value;
+ uint16_t vddgfx_bootup_value;
uint32_t sclk_bootup_value;
uint32_t mclk_bootup_value;
uint16_t pcie_gen_bootup_value;
uint16_t pcie_lane_bootup_value;
};
-/* Ultra Low Voltage parameter structure */
-struct polaris10_ulv_parm {
- bool ulv_supported;
- uint32_t cg_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct polaris10_performance_level ulv_power_level;
-};
-
-struct polaris10_display_timing {
+struct smu7_display_timing {
uint32_t min_clock_in_sr;
uint32_t num_existing_displays;
};
-struct polaris10_dpmlevel_enable_mask {
+struct smu7_dpmlevel_enable_mask {
uint32_t uvd_dpm_enable_mask;
uint32_t vce_dpm_enable_mask;
uint32_t acp_dpm_enable_mask;
@@ -161,22 +173,15 @@ struct polaris10_dpmlevel_enable_mask {
uint32_t pcie_dpm_enable_mask;
};
-struct polaris10_pcie_perf_range {
+struct smu7_pcie_perf_range {
uint16_t max;
uint16_t min;
};
-struct polaris10_range_table {
- uint32_t trans_lower_frequency; /* in 10khz */
- uint32_t trans_upper_frequency;
-};
-struct polaris10_hwmgr {
- struct polaris10_dpm_table dpm_table;
- struct polaris10_dpm_table golden_dpm_table;
- SMU74_Discrete_DpmTable smc_state_table;
- struct SMU74_Discrete_Ulv ulv_setting;
+struct smu7_hwmgr {
+ struct smu7_dpm_table dpm_table;
+ struct smu7_dpm_table golden_dpm_table;
- struct polaris10_range_table range_table[NUM_SCLK_RANGE];
uint32_t voting_rights_clients0;
uint32_t voting_rights_clients1;
uint32_t voting_rights_clients2;
@@ -188,12 +193,11 @@ struct polaris10_hwmgr {
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
- uint32_t vddc_vddci_delta;
-
+ uint32_t vdd_gfx_control;
+ uint32_t vddc_vddgfx_delta;
uint32_t active_auto_throttle_sources;
- struct polaris10_clock_registers clock_registers;
- struct polaris10_voltage_smio_registers voltage_smio_registers;
+ struct smu7_clock_registers clock_registers;
bool is_memory_gddr5;
uint16_t acpi_vddc;
@@ -203,8 +207,9 @@ struct polaris10_hwmgr {
uint32_t pcie_gen_cap;
uint32_t pcie_lane_cap;
uint32_t pcie_spc_cap;
- struct polaris10_leakage_voltage vddc_leakage;
- struct polaris10_leakage_voltage Vddci_leakage;
+ struct smu7_leakage_voltage vddc_leakage;
+ struct smu7_leakage_voltage vddci_leakage;
+ struct smu7_leakage_voltage vddcgfx_leakage;
uint32_t mvdd_control;
uint32_t vddc_mask_low;
@@ -213,30 +218,23 @@ struct polaris10_hwmgr {
uint16_t min_vddc_in_pptable;
uint16_t max_vddci_in_pptable;
uint16_t min_vddci_in_pptable;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edcwr_enable_threshold;
bool is_uvd_enabled;
- struct polaris10_vbios_boot_state vbios_boot_state;
+ struct smu7_vbios_boot_state vbios_boot_state;
bool pcie_performance_request;
bool battery_state;
bool is_tlu_enabled;
+ bool disable_handshake;
+ bool smc_voltage_control_enabled;
+ bool vbi_time_out_support;
- /* ---- SMC SRAM Address of firmware header tables ---- */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- uint32_t mc_reg_table_start;
- uint32_t fan_table_start;
- uint32_t arb_table_start;
-
+ uint32_t soft_regs_start;
/* ---- Stuff originally coming from Evergreen ---- */
uint32_t vddci_control;
struct pp_atomctrl_voltage_table vddc_voltage_table;
struct pp_atomctrl_voltage_table vddci_voltage_table;
struct pp_atomctrl_voltage_table mvdd_voltage_table;
+ struct pp_atomctrl_voltage_table vddgfx_voltage_table;
uint32_t mgcg_cgtt_local2;
uint32_t mgcg_cgtt_local3;
@@ -250,7 +248,7 @@ struct polaris10_hwmgr {
bool performance_request_registered;
/* ---- Low Power Features ---- */
- struct polaris10_ulv_parm ulv;
+ bool ulv_supported;
/* ---- CAC Stuff ---- */
uint32_t cac_table_start;
@@ -264,8 +262,8 @@ struct polaris10_hwmgr {
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
- const struct polaris10_pt_defaults *power_tune_defaults;
- struct SMU74_Discrete_PmFuses power_tune_table;
+
+
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
@@ -273,23 +271,22 @@ struct polaris10_hwmgr {
bool vddc_phase_shed_control;
/* ---- DI/DT ---- */
- struct polaris10_display_timing display_timing;
- uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+ struct smu7_display_timing display_timing;
/* ---- Thermal Temperature Setting ---- */
- struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask;
+ struct smu7_thermal_temperature_setting thermal_temp_setting;
+ struct smu7_dpmlevel_enable_mask dpm_level_enable_mask;
uint32_t need_update_smu7_dpm_table;
uint32_t sclk_dpm_key_disabled;
uint32_t mclk_dpm_key_disabled;
uint32_t pcie_dpm_key_disabled;
uint32_t min_engine_clocks;
- struct polaris10_pcie_perf_range pcie_gen_performance;
- struct polaris10_pcie_perf_range pcie_lane_performance;
- struct polaris10_pcie_perf_range pcie_gen_power_saving;
- struct polaris10_pcie_perf_range pcie_lane_power_saving;
+ struct smu7_pcie_perf_range pcie_gen_performance;
+ struct smu7_pcie_perf_range pcie_lane_performance;
+ struct smu7_pcie_perf_range pcie_gen_power_saving;
+ struct smu7_pcie_perf_range pcie_lane_power_saving;
bool use_pcie_performance_levels;
bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
uint32_t mclk_activity_target;
uint32_t mclk_dpm0_activity_target;
uint32_t low_sclk_interrupt_threshold;
@@ -309,49 +306,48 @@ struct polaris10_hwmgr {
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
+
uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2;
+ uint16_t mem_latency_high;
+ uint16_t mem_latency_low;
};
/* To convert to Q8.8 format for firmware */
-#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256
-
-enum Polaris10_I2CLineID {
- Polaris10_I2CLineID_DDC1 = 0x90,
- Polaris10_I2CLineID_DDC2 = 0x91,
- Polaris10_I2CLineID_DDC3 = 0x92,
- Polaris10_I2CLineID_DDC4 = 0x93,
- Polaris10_I2CLineID_DDC5 = 0x94,
- Polaris10_I2CLineID_DDC6 = 0x95,
- Polaris10_I2CLineID_SCLSDA = 0x96,
- Polaris10_I2CLineID_DDCVGA = 0x97
+#define SMU7_Q88_FORMAT_CONVERSION_UNIT 256
+
+enum SMU7_I2CLineID {
+ SMU7_I2CLineID_DDC1 = 0x90,
+ SMU7_I2CLineID_DDC2 = 0x91,
+ SMU7_I2CLineID_DDC3 = 0x92,
+ SMU7_I2CLineID_DDC4 = 0x93,
+ SMU7_I2CLineID_DDC5 = 0x94,
+ SMU7_I2CLineID_DDC6 = 0x95,
+ SMU7_I2CLineID_SCLSDA = 0x96,
+ SMU7_I2CLineID_DDCVGA = 0x97
};
-#define POLARIS10_I2C_DDC1DATA 0
-#define POLARIS10_I2C_DDC1CLK 1
-#define POLARIS10_I2C_DDC2DATA 2
-#define POLARIS10_I2C_DDC2CLK 3
-#define POLARIS10_I2C_DDC3DATA 4
-#define POLARIS10_I2C_DDC3CLK 5
-#define POLARIS10_I2C_SDA 40
-#define POLARIS10_I2C_SCL 41
-#define POLARIS10_I2C_DDC4DATA 65
-#define POLARIS10_I2C_DDC4CLK 66
-#define POLARIS10_I2C_DDC5DATA 0x48
-#define POLARIS10_I2C_DDC5CLK 0x49
-#define POLARIS10_I2C_DDC6DATA 0x4a
-#define POLARIS10_I2C_DDC6CLK 0x4b
-#define POLARIS10_I2C_DDCVGADATA 0x4c
-#define POLARIS10_I2C_DDCVGACLK 0x4d
-
-#define POLARIS10_UNUSED_GPIO_PIN 0x7F
-
-int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
-
-int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+#define SMU7_I2C_DDC1DATA 0
+#define SMU7_I2C_DDC1CLK 1
+#define SMU7_I2C_DDC2DATA 2
+#define SMU7_I2C_DDC2CLK 3
+#define SMU7_I2C_DDC3DATA 4
+#define SMU7_I2C_DDC3CLK 5
+#define SMU7_I2C_SDA 40
+#define SMU7_I2C_SCL 41
+#define SMU7_I2C_DDC4DATA 65
+#define SMU7_I2C_DDC4CLK 66
+#define SMU7_I2C_DDC5DATA 0x48
+#define SMU7_I2C_DDC5CLK 0x49
+#define SMU7_I2C_DDC6DATA 0x4a
+#define SMU7_I2C_DDC6CLK 0x4b
+#define SMU7_I2C_DDCVGADATA 0x4c
+#define SMU7_I2C_DDCVGACLK 0x4d
+
+#define SMU7_UNUSED_GPIO_PIN 0x7F
+uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index b9cb240a135d..41b634ffa5b0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -20,546 +20,364 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
#include "hwmgr.h"
#include "smumgr.h"
-#include "polaris10_hwmgr.h"
-#include "polaris10_powertune.h"
-#include "polaris10_smumgr.h"
-#include "smu74_discrete.h"
+#include "smu7_hwmgr.h"
+#include "smu7_powertune.h"
#include "pp_debug.h"
-#include "gca/gfx_8_0_d.h"
-#include "gca/gfx_8_0_sh_mask.h"
-#include "oss/oss_3_0_sh_mask.h"
+#include "smu7_common.h"
#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
-struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg GCCACConfig_Polaris11[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
-
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
- { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, GPU_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, GPU_CONFIGREG_GC_CAC_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris10[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ 0xFFFFFFFF }
};
-struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = {
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
* Offset Mask Shift Value Type
* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
*/
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
-
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
- { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ 0xFFFFFFFF }
};
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
- { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- polaris10_hwmgr->power_tune_defaults =
- &polaris10_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
- SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table=
- &hwmgr->thermal_controller.advanceFanControlParameters;
- int i, j, k;
- const uint16_t *pdef1;
- const uint16_t *pdef2;
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
- pdef1 = defaults->BAPMTI_R;
- pdef2 = defaults->BAPMTI_RC;
-
- for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU74_DTE_SOURCES; j++) {
- for (k = 0; k < SMU74_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
- dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
- data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- data->power_tune_table.SviLoadLineTrimVddC = 3;
- data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
-
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
- uint32_t temp;
-
- if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
- || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
{
uint32_t en = enable ? 1 : 0;
@@ -608,29 +426,29 @@ static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
return result;
}
-static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
- struct polaris10_pt_config_reg *cac_config_regs)
+static int smu7_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+ const struct gpu_pt_config_reg *cac_config_regs)
{
- struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+ const struct gpu_pt_config_reg *config_regs = cac_config_regs;
uint32_t cache = 0;
uint32_t data = 0;
PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+ if (config_regs->type == GPU_CONFIGREG_CACHE)
cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
else {
switch (config_regs->type) {
- case POLARIS10_CONFIGREG_SMC_IND:
+ case GPU_CONFIGREG_SMC_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
break;
- case POLARIS10_CONFIGREG_DIDT_IND:
+ case GPU_CONFIGREG_DIDT_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
break;
- case POLARIS10_CONFIGREG_GC_CAC_IND:
+ case GPU_CONFIGREG_GC_CAC_IND:
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
break;
@@ -644,15 +462,15 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
data |= cache;
switch (config_regs->type) {
- case POLARIS10_CONFIGREG_SMC_IND:
+ case GPU_CONFIGREG_SMC_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
break;
- case POLARIS10_CONFIGREG_DIDT_IND:
+ case GPU_CONFIGREG_DIDT_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
break;
- case POLARIS10_CONFIGREG_GC_CAC_IND:
+ case GPU_CONFIGREG_GC_CAC_IND:
cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
break;
@@ -669,7 +487,7 @@ static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
return 0;
}
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
uint32_t num_se = 0;
@@ -699,20 +517,20 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
if (hwmgr->chip_id == CHIP_POLARIS10) {
- result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
}
}
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
- result = polaris10_enable_didt(hwmgr, true);
+ result = smu7_enable_didt(hwmgr, true);
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
/* TO DO Post DIDT enable clock gating */
@@ -721,7 +539,7 @@ int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
return 0;
}
-int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
@@ -731,7 +549,7 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
/* TO DO Pre DIDT disable clock gating */
- result = polaris10_enable_didt(hwmgr, false);
+ result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
/* TO DO Post DIDT enable clock gating */
}
@@ -739,95 +557,9 @@ int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
return 0;
}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- if (polaris10_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
-
- if (polaris10_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
- if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- if (0 != polaris10_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- if (polaris10_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- if (polaris10_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&data->power_tune_table,
- (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -843,9 +575,9 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -860,9 +592,9 @@ int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
@@ -871,21 +603,27 @@ int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
return 0;
}
-static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int smc_result;
int result = 0;
+ struct phm_cac_tdp_table *cac_table;
data->power_containment_features = 0;
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ cac_table = table_info->cac_dtp_table;
+ else
+ cac_table = hwmgr->dyn_state.cac_dtp_table;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
@@ -905,15 +643,13 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
- struct phm_cac_tdp_table *cac_table =
- table_info->cac_dtp_table;
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
- if (polaris10_set_power_limit(hwmgr, default_limit))
+ if (smu7_set_power_limit(hwmgr, default_limit))
printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
}
}
@@ -921,9 +657,9 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -963,14 +699,19 @@ int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+ struct phm_cac_tdp_table *cac_table;
+
int adjust_percent, target_tdp;
int result = 0;
+ if (hwmgr->pp_table_version == PP_TABLE_V1)
+ cac_table = table_info->cac_dtp_table;
+ else
+ cac_table = hwmgr->dyn_state.cac_dtp_table;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
@@ -981,7 +722,7 @@ int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
* but message to be 8 bit fraction for messages
*/
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
- result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+ result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
index bc78e28f010d..22f86b6bf1be 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
@@ -20,17 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef POLARIS10_POWERTUNE_H
-#define POLARIS10_POWERTUNE_H
-
-enum polaris10_pt_config_reg_type {
- POLARIS10_CONFIGREG_MMR = 0,
- POLARIS10_CONFIGREG_SMC_IND,
- POLARIS10_CONFIGREG_DIDT_IND,
- POLARIS10_CONFIGREG_GC_CAC_IND,
- POLARIS10_CONFIGREG_CACHE,
- POLARIS10_CONFIGREG_MAX
-};
+#ifndef _SMU7_POWERTUNE_H
+#define _SMU7_POWERTUNE_H
#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
@@ -52,43 +43,20 @@ enum polaris10_pt_config_reg_type {
#define ixGC_CAC_CNTL 0x0000
#define ixDIDT_SQ_STALL_CTRL 0x0004
-#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
#define ixDIDT_TD_STALL_CTRL 0x0044
#define ixDIDT_TD_TUNING_CTRL 0x0045
#define ixDIDT_TCP_STALL_CTRL 0x0064
#define ixDIDT_TCP_TUNING_CTRL 0x0065
-struct polaris10_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum polaris10_pt_config_reg_type type;
-};
-
-struct polaris10_pt_defaults {
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-
- uint32_t DisplayCac;
- uint32_t BAPM_TEMP_GRADIENT;
- uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
- uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
-};
-void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
-int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
-#endif /* POLARIS10_POWERTUNE_H */
+int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr);
+int smu7_enable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_disable_power_containment(struct pp_hwmgr *hwmgr);
+int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int smu7_power_control_set_level(struct pp_hwmgr *hwmgr);
+int smu7_enable_didt_config(struct pp_hwmgr *hwmgr);
+int smu7_disable_didt_config(struct pp_hwmgr *hwmgr);
+#endif /* DGPU_POWERTUNE_H */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 92976b68d6fd..29d0319b22e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,20 +20,17 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
#include <asm/div64.h>
-#include "fiji_thermal.h"
-#include "fiji_hwmgr.h"
-#include "fiji_smumgr.h"
-#include "fiji_ppsmc.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+#include "smu7_thermal.h"
+#include "smu7_hwmgr.h"
+#include "smu7_common.h"
+
+int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info)
{
-
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true;
@@ -55,7 +52,7 @@ int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
return 0;
}
-int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
uint32_t duty100;
@@ -63,7 +60,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ return -ENODEV;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -84,7 +81,7 @@ int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
return 0;
}
-int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
@@ -92,7 +89,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0))
- return 0;
+ return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD);
@@ -100,9 +97,9 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (tach_period == 0)
return -EINVAL;
- crystal_clock_freq = tonga_get_xclk(hwmgr);
+ crystal_clock_freq = smu7_get_xclk(hwmgr);
- *speed = 60 * crystal_clock_freq * 10000/ tach_period;
+ *speed = 60 * crystal_clock_freq * 10000 / tach_period;
return 0;
}
@@ -113,7 +110,7 @@ int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
* @exception Should always succeed.
*/
-int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
if (hwmgr->fan_ctrl_is_in_default_mode) {
@@ -139,7 +136,7 @@ int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
* @param hwmgr the address of the powerplay hardware manager.
* @exception Should always succeed.
*/
-int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->fan_ctrl_is_in_default_mode) {
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,7 +149,7 @@ int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
return 0;
}
-int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
@@ -187,7 +184,7 @@ int fiji_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
}
-int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
@@ -198,7 +195,7 @@ int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (0% - 100%) to be set.
* @exception Fails is the 100% setting appears to be 0.
*/
-int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
uint32_t duty100;
@@ -213,7 +210,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
- fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100);
@@ -228,7 +225,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
- return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
@@ -236,7 +233,7 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the powerplay hardware manager.
* @exception Always succeeds.
*/
-int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
int result;
@@ -245,11 +242,11 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- result = fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
- result = fiji_fan_ctrl_start_smc_fan_control(hwmgr);
+ result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
} else
- result = fiji_fan_ctrl_set_default_mode(hwmgr);
+ result = smu7_fan_ctrl_set_default_mode(hwmgr);
return result;
}
@@ -260,7 +257,7 @@ int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
* @param speed is the percentage value (min - max) to be set.
* @exception Fails is the speed not lie between min and max.
*/
-int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
uint32_t tach_period;
uint32_t crystal_clock_freq;
@@ -272,14 +269,18 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
- crystal_clock_freq = tonga_get_xclk(hwmgr);
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+ crystal_clock_freq = smu7_get_xclk(hwmgr);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
- return fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
/**
@@ -287,7 +288,7 @@ int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
*
* @param hwmgr The address of the hardware manager.
*/
-int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
int temp;
@@ -296,7 +297,7 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
/* Bit 9 means the reading is lower than the lowest usable value. */
if (temp & 0x200)
- temp = FIJI_THERMAL_MAXIMUM_TEMP_READING;
+ temp = SMU7_THERMAL_MAXIMUM_TEMP_READING;
else
temp = temp & 0x1ff;
@@ -312,12 +313,12 @@ int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr)
* @param range Temperature range to be programmed for high and low alert signals
* @exception PP_Result_BadInput if the input data is not valid.
*/
-static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
uint32_t low_temp, uint32_t high_temp)
{
- uint32_t low = FIJI_THERMAL_MINIMUM_ALERT_TEMP *
+ uint32_t low = SMU7_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = FIJI_THERMAL_MAXIMUM_ALERT_TEMP *
+ uint32_t high = SMU7_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
if (low < low_temp)
@@ -346,7 +347,7 @@ static int fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
*
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -365,13 +366,13 @@ static int fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
*
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+ alert &= ~(SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
@@ -383,13 +384,13 @@ static int fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr)
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
-static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (FIJI_THERMAL_HIGH_ALERT_MASK | FIJI_THERMAL_LOW_ALERT_MASK);
+ alert |= (SMU7_THERMAL_HIGH_ALERT_MASK | SMU7_THERMAL_LOW_ALERT_MASK);
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_THERMAL_INT, THERM_INT_MASK, alert);
@@ -402,129 +403,17 @@ static int fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr)
* Currently just disables alerts.
* @param hwmgr The address of the hardware manager.
*/
-int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
{
- int result = fiji_thermal_disable_alert(hwmgr);
+ int result = smu7_thermal_disable_alert(hwmgr);
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- fiji_fan_ctrl_set_default_mode(hwmgr);
+ if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+ smu7_fan_ctrl_set_default_mode(hwmgr);
return result;
}
/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (data->fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = fiji_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- data->sram_end);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-/**
* Start the fan control on the SMC.
* @param hwmgr the address of the powerplay hardware manager.
* @param pInput the pointer to input data
@@ -533,7 +422,7 @@ int tf_fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
/* If the fantable setup has failed we could have disabled
@@ -543,8 +432,8 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
*/
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- fiji_fan_ctrl_start_smc_fan_control(hwmgr);
- fiji_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
@@ -559,7 +448,7 @@ int tf_fiji_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
@@ -567,7 +456,7 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (range == NULL)
return -EINVAL;
- return fiji_thermal_set_temperature_range(hwmgr, range->min, range->max);
+ return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
}
/**
@@ -579,10 +468,10 @@ int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from initialize thermal controller routine
*/
-int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_initialize(hwmgr);
+ return smu7_thermal_initialize(hwmgr);
}
/**
@@ -594,10 +483,10 @@ int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from enable alert routine
*/
-int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_enable_alert(hwmgr);
+ return smu7_thermal_enable_alert(hwmgr);
}
/**
@@ -609,53 +498,54 @@ int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from disable alert routine
*/
-static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
void *input, void *output, void *storage, int result)
{
- return fiji_thermal_disable_alert(hwmgr);
+ return smu7_thermal_disable_alert(hwmgr);
}
static const struct phm_master_table_item
-fiji_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_fiji_thermal_initialize},
- {NULL, tf_fiji_thermal_set_temperature_range},
- {NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_start_thermal_controller_master_list[] = {
+ {NULL, tf_smu7_thermal_initialize},
+ {NULL, tf_smu7_thermal_set_temperature_range},
+ {NULL, tf_smu7_thermal_enable_alert},
+ {NULL, smum_thermal_avfs_enable},
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- {NULL, tf_fiji_thermal_setup_fan_table},
- {NULL, tf_fiji_thermal_start_smc_fan_control},
+ {NULL, smum_thermal_setup_fan_table},
+ {NULL, tf_smu7_thermal_start_smc_fan_control},
{NULL, NULL}
};
static const struct phm_master_table_header
-fiji_thermal_start_thermal_controller_master = {
+phm_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
- fiji_thermal_start_thermal_controller_master_list
+ phm_thermal_start_thermal_controller_master_list
};
static const struct phm_master_table_item
-fiji_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_fiji_thermal_disable_alert},
- {NULL, tf_fiji_thermal_set_temperature_range},
- {NULL, tf_fiji_thermal_enable_alert},
+phm_thermal_set_temperature_range_master_list[] = {
+ {NULL, tf_smu7_thermal_disable_alert},
+ {NULL, tf_smu7_thermal_set_temperature_range},
+ {NULL, tf_smu7_thermal_enable_alert},
{NULL, NULL}
};
static const struct phm_master_table_header
-fiji_thermal_set_temperature_range_master = {
+phm_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
- fiji_thermal_set_temperature_range_master_list
+ phm_thermal_set_temperature_range_master_list
};
-int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- fiji_fan_ctrl_set_default_mode(hwmgr);
+ smu7_fan_ctrl_set_default_mode(hwmgr);
return 0;
}
@@ -664,17 +554,17 @@ int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
* @param hwmgr The address of the hardware manager.
* @exception Any error code from the low-level communication.
*/
-int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr)
+int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
{
int result;
result = phm_construct_table(hwmgr,
- &fiji_thermal_set_temperature_range_master,
+ &phm_thermal_set_temperature_range_master,
&(hwmgr->set_temperature_range));
if (!result) {
result = phm_construct_table(hwmgr,
- &fiji_thermal_start_thermal_controller_master,
+ &phm_thermal_start_thermal_controller_master,
&(hwmgr->start_thermal_controller));
if (result)
phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
new file mode 100644
index 000000000000..6face973be43
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_THERMAL_H_
+#define _SMU7_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define SMU7_THERMAL_HIGH_ALERT_MASK 0x1
+#define SMU7_THERMAL_LOW_ALERT_MASK 0x2
+
+#define SMU7_THERMAL_MINIMUM_TEMP_READING -256
+#define SMU7_THERMAL_MAXIMUM_TEMP_READING 255
+
+#define SMU7_THERMAL_MINIMUM_ALERT_TEMP 0
+#define SMU7_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+extern int smu7_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
+extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
deleted file mode 100644
index e58d038a997b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_ppsmc.h"
-#include "tonga_hwmgr.h"
-
-int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_uvd_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerOFF);
- return 0;
-}
-
-int tonga_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_uvd_power_gating(hwmgr)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 1);
- } else {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 0);
- }
- }
-
- return 0;
-}
-
-int tonga_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_VCEPowerOFF);
- return 0;
-}
-
-int tonga_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
- if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_VCEPowerON);
- return 0;
-}
-
-int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
- int ret = 0;
-
- switch (block) {
- case PHM_AsicBlock_UVD_MVC:
- case PHM_AsicBlock_UVD:
- case PHM_AsicBlock_UVD_HD:
- case PHM_AsicBlock_UVD_SD:
- if (gating == PHM_ClockGateSetting_StaticOff)
- ret = tonga_phm_powerdown_uvd(hwmgr);
- else
- ret = tonga_phm_powerup_uvd(hwmgr);
- break;
- case PHM_AsicBlock_GFX:
- default:
- break;
- }
-
- return ret;
-}
-
-int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
-
- tonga_phm_powerup_uvd(hwmgr);
- tonga_phm_powerup_vce(hwmgr);
-
- return 0;
-}
-
-int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
-
- data->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- tonga_update_uvd_dpm(hwmgr, true);
- tonga_phm_powerdown_uvd(hwmgr);
- } else {
- tonga_phm_powerup_uvd(hwmgr);
- cgs_set_powergating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
-
- tonga_update_uvd_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_set_power_state_input states;
- const struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
-
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- states.pcurrent_state = &(pcurrent->hardware);
- states.pnew_state = &(requested->hardware);
-
- if (phm_cf_want_vce_power_gating(hwmgr)) {
- if (data->vce_power_gated != bgate) {
- if (bgate) {
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- tonga_enable_disable_vce_dpm(hwmgr, false);
- data->vce_power_gated = true;
- } else {
- tonga_phm_powerup_vce(hwmgr);
- data->vce_power_gated = false;
- cgs_set_powergating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
-
- tonga_update_vce_dpm(hwmgr, &states);
- tonga_enable_disable_vce_dpm(hwmgr, true);
- return 0;
- }
- }
- } else {
- tonga_update_vce_dpm(hwmgr, &states);
- tonga_enable_disable_vce_dpm(hwmgr, true);
- return 0;
- }
-
- if (!data->vce_power_gated)
- tonga_update_vce_dpm(hwmgr, &states);
-
- return 0;
-}
-
-int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id)
-{
- PPSMC_Msg msg;
- uint32_t value;
-
- switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
- case PP_GROUP_GFX:
- switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
- case PP_BLOCK_GFX_CG:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_GFX_CGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_GFX_CGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_GFX_MG:
- /* For GFX MGCG, there are three different ones;
- * CPF, RLC, and all others. CPF MGCG will not be used for Tonga.
- * For GFX MGLS, Tonga will not support it.
- * */
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = (CG_RLC_MGCG_MASK | CG_GFX_OTHERS_MGCG_MASK);
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- default:
- return -1;
- }
- break;
-
- case PP_GROUP_SYS:
- switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
- case PP_BLOCK_SYS_BIF:
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_BIF_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_MC:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_MC_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_MC_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
-
- }
- break;
-
- case PP_BLOCK_SYS_HDP:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_HDP_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
-
- value = CG_SYS_HDP_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_SDMA:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_SDMA_MGCG_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
-
- if (PP_STATE_SUPPORT_LS & *msg_id) {
- msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
-
- value = CG_SYS_SDMA_MGLS_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- case PP_BLOCK_SYS_ROM:
- if (PP_STATE_SUPPORT_CG & *msg_id) {
- msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG)
- ? PPSMC_MSG_EnableClockGatingFeature
- : PPSMC_MSG_DisableClockGatingFeature;
- value = CG_SYS_ROM_MASK;
-
- if (0 != smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg, value))
- return -1;
- }
- break;
-
- default:
- return -1;
-
- }
- break;
-
- default:
- return -1;
-
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
deleted file mode 100644
index 080d69d77f04..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_DYN_DEFAULTS_H
-#define TONGA_DYN_DEFAULTS_H
-
-
-/** \file
- * Volcanic Islands Dynamic default parameters.
- */
-
-enum TONGAdpm_TrendDetection {
- TONGAdpm_TrendDetection_AUTO,
- TONGAdpm_TrendDetection_UP,
- TONGAdpm_TrendDetection_DOWN
-};
-typedef enum TONGAdpm_TrendDetection TONGAdpm_TrendDetection;
-
-/* Bit vector representing same fields as hardware register. */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy */
-/* HDP_busy */
-/* IH_busy */
-/* DRM_busy */
-/* DRMDMA_busy */
-/* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-/* AVP_busy */
-/* SDMA enabled */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* SH_Gfx_busy */
-/* RB_Gfx_busy */
-/* VCE_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* RB_Gfx_busy */
-/* ACP_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility. */
-/* FE_Gfx_busy */
-/* SH_Gfx_busy */
-/* UVD_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy */
-/* VCE_busy */
-/* ACP_busy */
-/* SAMU_busy */
-
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP, DRMDMA */
-#define PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP, DRMDMA */
-
-
-/* thermal protection counter (units).*/
-#define PPTONGA_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-/* static screen threshold */
-#define PPTONGA_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPTONGA_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPTONGA_REFERENCEDIVIDER_DFLT 4
-
-/*
- * ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-
-#define PPTONGA_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPTONGA_CGULVPARAMETER_DFLT 0x00040035
-#define PPTONGA_CGULVCONTROL_DFLT 0x00007450
-#define PPTONGA_TARGETACTIVITY_DFLT 30 /*30% */
-#define PPTONGA_MCLK_TARGETACTIVITY_DFLT 10 /*10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
deleted file mode 100644
index c7dc111221c2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ /dev/null
@@ -1,6276 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include <atombios.h>
-#include "tonga_hwmgr.h"
-#include "pptable.h"
-#include "processpptables.h"
-#include "tonga_processpptables.h"
-#include "tonga_pptable.h"
-#include "pp_debug.h"
-#include "tonga_ppsmc.h"
-#include "cgs_common.h"
-#include "pppcielanes.h"
-#include "tonga_dyn_defaults.h"
-#include "smumgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_clockpowergating.h"
-#include "tonga_thermal.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-#include "cgs_linux.h"
-#include "eventmgr.h"
-#include "amd_pcie_helpers.h"
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0 0x05
-#define MC_CG_SEQ_DRAMCONF_S1 0x06
-#define MC_CG_SEQ_YCLK_SUSPEND 0x04
-#define MC_CG_SEQ_YCLK_RESUME 0x0a
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-#define SMC_RAM_END 0x40000
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 200
-#define VDDC_VDDGFX_DELTA 300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-typedef uint32_t PECI_RegistryValue;
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
- {600, 1050, 3, 0},
- {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
- {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5} };
-
-/* Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
-
-static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct tonga_power_state *cast_phw_tonga_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (struct tonga_power_state *)hw_ps;
-}
-
-const struct tonga_power_state *cast_const_phw_tonga_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwTonga_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (const struct tonga_power_state *)hw_ps;
-}
-
-int tonga_add_voltage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *look_up_table,
- phm_ppt_v1_voltage_lookup_record *record)
-{
- uint32_t i;
- PP_ASSERT_WITH_CODE((NULL != look_up_table),
- "Lookup Table empty.", return -1;);
- PP_ASSERT_WITH_CODE((0 != look_up_table->count),
- "Lookup Table empty.", return -1;);
- PP_ASSERT_WITH_CODE((SMU72_MAX_LEVELS_VDDGFX >= look_up_table->count),
- "Lookup Table is full.", return -1;);
-
- /* This is to avoid entering duplicate calculated records. */
- for (i = 0; i < look_up_table->count; i++) {
- if (look_up_table->entries[i].us_vdd == record->us_vdd) {
- if (look_up_table->entries[i].us_calculated == 1)
- return 0;
- else
- break;
- }
- }
-
- look_up_table->entries[i].us_calculated = 1;
- look_up_table->entries[i].us_vdd = record->us_vdd;
- look_up_table->entries[i].us_cac_low = record->us_cac_low;
- look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
- look_up_table->entries[i].us_cac_high = record->us_cac_high;
- /* Only increment the count when we're appending, not replacing duplicate entry. */
- if (i == look_up_table->count)
- look_up_table->count++;
-
- return 0;
-}
-
-int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
- PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
-}
-
-uint8_t tonga_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage)
-{
- uint8_t count = (uint8_t) (voltage_table->count);
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count),
- "Voltage Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage bigger than requested */
- if (voltage_table->entries[i].value >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-/**
- * @brief PhwTonga_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param hwmgr - pointer to hardware manager
- * @param lookupTable - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t tonga_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
- uint16_t voltage)
-{
- uint8_t count = (uint8_t) (look_up_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage equal or bigger than requested */
- if (look_up_table->entries[i].us_vdd >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i-1;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- /*
- * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
- * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
- * whereas voltage control is a fundemental change that will not be disabled
- */
-
- return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
-}
-
-/**
- * Re-generate the DPM level mask value
- * @param hwmgr the address of the hardware manager
- */
-static uint32_t tonga_get_dpm_level_enable_mask_value(
- struct tonga_single_dpm_table * dpm_table)
-{
- uint32_t i;
- uint32_t mask_value = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask_value = mask_value << 1;
-
- if (dpm_table->dpm_levels[i-1].enabled)
- mask_value |= 0x1;
- else
- mask_value &= 0xFFFFFFFE;
- }
- return mask_value;
-}
-
-/**
- * Retrieve DPM default values from registry (if available)
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- phw_tonga_ulv_parm *ulv = &(data->ulv);
- uint32_t tmp;
-
- ulv->ch_ulv_parameter = PPTONGA_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPTONGA_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit = PPTONGA_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold = PPTONGA_STATICSCREENTHRESHOLD_DFLT;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- tmp = 0;
- if (tmp == 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- tmp = 0;
- if (0 != tmp)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMemoryTransition);
-
- data->mclk_strobe_mode_threshold = 40000;
- data->mclk_stutter_mode_threshold = 30000;
- data->mclk_edc_enable_threshold = 40000;
- data->mclk_edc_wr_enable_threshold = 40000;
-
- tmp = 0;
- if (tmp != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMCLS);
-
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
-
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- tmp = 0;
-
- if (tmp)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = tonga_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end
- );
- }
-
- return result;
-}
-
-/**
- * Find SCLK value that is associated with specified virtual_voltage_Id.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param virtual_voltage_Id voltageId to look for.
- * @param sclk output value .
- * @return always 0 if success and 2 if association not found
- */
-static int tonga_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, uint32_t *sclk)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -1);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < pptable_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = pptable_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
- break;
- }
-
- PP_ASSERT_WITH_CODE(entryId < pptable_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -1;
- );
-
- *sclk = pptable_info->vdd_dep_on_sclk->entries[entryId].clk;
-
- return 0;
-}
-
-/**
- * Get Leakage VDDC based on leakage ID.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return 2 if vddgfx returned is greater than 2V or if BIOS
- */
-int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- uint16_t virtual_voltage_id;
- uint16_t vddc = 0;
- uint16_t vddgfx = 0;
- uint16_t i, j;
- uint32_t sclk = 0;
-
- /* retrieve voltage for leakage ID (0xff01 + i) */
- for (i = 0; i < TONGA_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-
- /* in split mode we should have only vddgfx EVV leakages */
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
- pptable_info->vddgfx_lookup_table, virtual_voltage_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
- if (0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
- virtual_voltage_id, &vddgfx)) {
- /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
- data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
- data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
- data->vddcgfx_leakage.count++;
- }
- } else {
- printk("Error retrieving EVV voltage value!\n");
- }
- }
- } else {
- /* in merged mode we have only vddc EVV leakages */
- if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
- pptable_info->vddc_lookup_table,
- virtual_voltage_id, &sclk)) {
- if (0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
- virtual_voltage_id, &vddc)) {
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != virtual_voltage_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
- data->vddc_leakage.count++;
- }
- } else {
- printk("Error retrieving EVV voltage value!\n");
- }
- }
- }
- }
-
- return 0;
-}
-
-int tonga_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* enable SCLK dpm */
- if (0 == data->sclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
- }
-
- /* enable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
-
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x100005);/*Read */
-
- udelay(10);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x500005);/* write */
-
- }
-
- return 0;
-}
-
-int tonga_start_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* enable general power management */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
-
- /* prepare for PCIE DPM */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
- offsetof(SMU72_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (0 != tonga_enable_sclk_mclk_dpm(hwmgr)) {
- PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
- }
-
- /* enable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1
- );
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt);
- }
-
- return 0;
-}
-
-int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable SCLK DPM when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable)),
- "Failed to disable SCLK DPM during DPM stop Function!",
- return -1);
- }
-
- /* disable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable MCLK DPM when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable)),
- "Failed to Disable MCLK DPM during DPM stop Function!",
- return -1);
- }
-
- return 0;
-}
-
-int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable PCIE DPM when DPM is disabled",
- return -1
- );
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable)),
- "Failed to disable pcie DPM during DPM stop Function!",
- return -1);
- }
-
- if (0 != tonga_disable_sclk_mclk_dpm(hwmgr))
- PP_ASSERT_WITH_CODE(0, "Failed to disable Sclk DPM and Mclk DPM!", return -1);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(
- !tonga_is_dpm_running(hwmgr),
- "Trying to Disable Voltage CNTL when DPM is disabled",
- return -1
- );
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Disable)),
- "Failed to disable voltage DPM during DPM stop Function!",
- return -1);
-
- return 0;
-}
-
-int tonga_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
-
- return 0;
-}
-
-/**
- * Send a message to the SMC and return a parameter
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: pointer to the received parameter
- * @return The response that came from the SMC.
- */
-PPSMC_Result tonga_send_msg_to_smc_return_parameter(
- struct pp_hwmgr *hwmgr,
- PPSMC_Msg msg,
- uint32_t *parameter)
-{
- int result;
-
- result = smum_send_msg_to_smc(hwmgr->smumgr, msg);
-
- if ((0 == result) && parameter) {
- *parameter = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- }
-
- return result;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t level_mask = 1 << n;
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to force SCLK when DPM is disabled",
- return -1;);
- if (0 == data->sclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_SCLKDPM_SetEnabledMask),
- level_mask) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t level_mask = 1 << n;
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Force MCLK when DPM is disabled",
- return -1;);
- if (0 == data->mclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_MCLKDPM_SetEnabledMask),
- level_mask) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Force PCIE level when DPM is disabled",
- return -1;);
- if (0 == data->pcie_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)(PPSMC_MSG_PCIeDPM_ForceLevel),
- n) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * Set the initial state by calling SMC to switch to this state directly
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_set_boot_state(struct pp_hwmgr *hwmgr)
-{
- /*
- * SMC only stores one state that SW will ask to switch too,
- * so we switch the the just uploaded one
- */
- return (0 == tonga_disable_sclk_mclk_dpm(hwmgr)) ? 0 : 1;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->dpm_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->soft_regs_start = tmp;
- tonga_smu->ulSoftRegsStart = tmp;
- }
-
- error |= (0 != result);
-
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->mc_reg_table_start = tmp;
- }
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->fan_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->arb_table_start = tmp;
- }
-
- error |= (0 != result);
-
-
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (0 == result) {
- hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- return error ? 1 : 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
- data->clock_registers.vDLL_CNTL =
- cgs_read_register(hwmgr->device, mmDLL_CNTL);
- data->clock_registers.vMCLK_PWRMGT_CNTL =
- cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
- data->clock_registers.vMPLL_AD_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
- data->clock_registers.vMPLL_DQ_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL_1 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
- data->clock_registers.vMPLL_FUNC_CNTL_2 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
- data->clock_registers.vMPLL_SS1 =
- cgs_read_register(hwmgr->device, mmMPLL_SS1);
- data->clock_registers.vMPLL_SS2 =
- cgs_read_register(hwmgr->device, mmMPLL_SS2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
- data->pg_acp_init = true;
-
- return 0;
-}
-
-/**
- * Checks if DPM is enabled
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
-{
- /*
- * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
- * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
- * whereas voltage control is a fundemental change that will not be disabled
- */
- return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
-}
-
-/**
- * Checks if DPM is stopped
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (tonga_is_dpm_running(hwmgr)) {
- /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
- if (!data->dpm_table_start) {
- return 1;
- }
- }
-
- return 0;
-}
-
-/**
- * Remove repeated voltage values and create table with unique values.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param voltage_table the pointer to changing voltage table
- * @return 1 in success
- */
-
-static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
- pp_atomctrl_voltage_table *voltage_table)
-{
- uint32_t table_size, i, j;
- uint16_t vvalue;
- bool bVoltageFound = false;
- pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
- table_size = sizeof(pp_atomctrl_voltage_table);
- table = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- memset(table, 0x00, table_size);
- table->mask_low = voltage_table->mask_low;
- table->phase_delay = voltage_table->phase_delay;
-
- for (i = 0; i < voltage_table->count; i++) {
- vvalue = voltage_table->entries[i].value;
- bVoltageFound = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- bVoltageFound = true;
- break;
- }
- }
-
- if (!bVoltageFound) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- voltage_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(table, voltage_table, sizeof(pp_atomctrl_voltage_table));
-
- kfree(table);
-
- return 0;
-}
-
-static int tonga_get_svi2_vdd_ci_voltage_table(
- struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *voltage_dependency_table)
-{
- uint32_t i;
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_voltage_table *vddci_voltage_table = &(data->vddci_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != voltage_dependency_table->count),
- "Voltage Dependency Table empty.", return -1;);
-
- vddci_voltage_table->mask_low = 0;
- vddci_voltage_table->phase_delay = 0;
- vddci_voltage_table->count = voltage_dependency_table->count;
-
- for (i = 0; i < voltage_dependency_table->count; i++) {
- vddci_voltage_table->entries[i].value =
- voltage_dependency_table->entries[i].vddci;
- vddci_voltage_table->entries[i].smio_low = 0;
- }
-
- result = tonga_trim_voltage_table(hwmgr, vddci_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result;);
-
- return 0;
-}
-
-
-
-static int tonga_get_svi2_vdd_voltage_table(
- struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *look_up_table,
- pp_atomctrl_voltage_table *voltage_table)
-{
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((0 != look_up_table->count),
- "Voltage Lookup Table empty.", return -1;);
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = look_up_table->count;
-
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = look_up_table->entries[i].us_vdd;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/*
- * -------------------------------------------------------- Voltage Tables --------------------------------------------------------------------------
- * If the voltage table would be bigger than what will fit into the state table on the SMC keep only the higher entries.
- */
-
-static void tonga_trim_voltage_table_to_fit_state_table(
- struct pp_hwmgr *hwmgr,
- uint32_t max_voltage_steps,
- pp_atomctrl_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps) {
- return;
- }
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i = 0; i < max_voltage_steps; i++) {
- voltage_table->entries[i] = voltage_table->entries[i + diff];
- }
-
- voltage_table->count = max_voltage_steps;
-
- return;
-}
-
-/**
- * Create Voltage Tables.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result;
-
- /* MVDD has only GPIO voltage control */
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- /* GPIO voltage */
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.", return result;);
- } else if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- /* SVI2 voltage */
- result = tonga_get_svi2_vdd_ci_voltage_table(hwmgr,
- pptable_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* VDDGFX has only SVI2 voltage control */
- result = tonga_get_svi2_vdd_voltage_table(hwmgr,
- pptable_info->vddgfx_lookup_table, &(data->vddgfx_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
- }
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- /* VDDC has only SVI2 voltage control */
- result = tonga_get_svi2_vdd_voltage_table(hwmgr,
- pptable_info->vddc_lookup_table, &(data->vddc_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.", return result;);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU72_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->vddgfx_voltage_table.count <= (SMU72_MAX_LEVELS_VDDGFX)),
- "Too many voltage values for VDDGFX. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDGFX, &(data->vddgfx_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU72_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
- );
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU72_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- tonga_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU72_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
- );
-
- return 0;
-}
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- table->VddcTable[count] =
- PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
- }
- return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
- for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
- table->VddGfxTable[count] =
- PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
- }
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- table->SmioTable1.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
- table->SmioTable1.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->vddci_voltage_table.entries[count].smio_low;
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- table->SmioTable2.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->mvdd_voltage_table.entries[count].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
- }
-
- return 0;
-}
-
-/**
- * Convert a voltage value in mv unit to VID number required by SMU firmware
- */
-static uint8_t convert_to_vid(uint16_t vddc)
-{
- return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
-}
-
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
- struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table = pptable_info->vddc_lookup_table;
-
- /* pTables is already swapped, so in order to use the value from it, we need to swap it back. */
- uint32_t vddcLevelCount = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
- uint32_t vddgfxLevelCount = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
- for (count = 0; count < vddcLevelCount; count++) {
- /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
- index = tonga_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
-
- if ((data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2)) {
- /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
- for (count = 0; count < vddgfxLevelCount; count++) {
- index = tonga_get_voltage_index(vddgfx_lookup_table,
- data->vddgfx_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
- }
- } else {
- for (count = 0; count < vddcLevelCount; count++) {
- index = tonga_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
- }
-
- return 0;
-}
-
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result;
-
- result = tonga_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDC voltage table to SMC", return -1);
-
- result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDCI voltage table to SMC", return -1);
-
- result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDGFX voltage table to SMC", return -1);
-
- result = tonga_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate MVDD voltage table to SMC", return -1);
-
- result = tonga_populate_cac_tables(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC", return -1);
-
- return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* Splitted mode */
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= config;
- } else {
- printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should be both on SVI2 control in splitted mode! \n");
- }
- } else {
- /* Merged mode */
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- printk(KERN_ERR "[ powerplay ] VDDC should be on SVI2 control in merged mode! \n");
- }
- }
-
- /* Set Vddci Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- } else if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- }
-
- /* Set Mvdd Voltage Controller */
- if (TONGA_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -1;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i].vddgfx);
-
- voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i].vddc);
-
- if (allowed_clock_voltage_table->entries[i].vddci) {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddci);
- } else {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddc - data->vddc_vddci_delta);
- }
-
- if (allowed_clock_voltage_table->entries[i].mvdd) {
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
- }
-
- voltage->Phases = 1;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- voltage->VddGfx = tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddgfx);
- voltage->Vddc = tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddc);
-
- if (allowed_clock_voltage_table->entries[i-1].vddci) {
- voltage->Vddci = tonga_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i-1].vddci);
- }
- if (allowed_clock_voltage_table->entries[i-1].mvdd) {
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
- }
-
- return 0;
-}
-
-/**
- * Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults) == 0) ? 0 : 1;
-}
-
-int tonga_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- int result = 0;
- SMU72_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = tonga_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (0 != result) {
- break;
- }
- }
- }
-
- if (0 == result) {
- result = tonga_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU72_Discrete_MCArbDramTimingTable),
- data->sram_end
- );
- }
-
- return result;
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- tonga_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t) (mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->UvdLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->UvdLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->UvdLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- //CONVERT_FROM_HOST_TO_SMC_UL((uint32_t)table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t) (mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->VceLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->VceLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->VceLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock", return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t) (mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->AcpLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->AcpLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->AcpLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- tonga_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) ?
- tonga_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- tonga_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - data->vddc_vddci_delta);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(0 == result,
- "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
- if (data->is_memory_GDDR5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500) {
- mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
- } else {
- if (memory_clock < 65000) {
- mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
- }
-
- return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000) {
- mc_para_index = 0;
- } else if (memory_clock >= 80000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
-
- return mc_para_index;
-}
-
-static int tonga_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *memory_level
- )
-{
- uint32_t minMvdd = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dllStateOn;
- struct cgs_display_info info = {0};
-
-
- if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &minMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
- }
-
- if (data->mvdd_control == TONGA_VOLTAGE_CONTROL_NONE) {
- memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
- } else {
- memory_level->MinMvdd = minMvdd;
- }
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((data->mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= data->mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0))
- memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= data->mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_GDDR5) {
- memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((data->mclk_edc_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((data->mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dllStateOn = data->dll_defaule_on;
- }
- } else {
- memory_level->StrobeRatio =
- tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = tonga_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
-
- if (0 == result) {
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-/**
- * Populates the SMC MVDD structure using the provided memory clock.
- *
- * @param hwmgr the address of the hardware manager
- * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
- * @param voltage the SMC VOLTAGE structure to be populated
- */
-int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (TONGA_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < pptable_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= pptable_info->vdd_dep_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- smio_pattern->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < pptable_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.", return -1);
-
- } else {
- return -1;
- }
-
- return 0;
-}
-
-
-static int tonga_populate_smv_acpi_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- table->ACPILevel.MinVoltage = data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
- CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
- table->MemoryACPILevel.MinVoltage = data->smc_state_table.MemoryLevel[0].MinVoltage;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int tonga_find_boot_level(struct tonga_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
-{
- int result = 0;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0; /* 0 == DPM[0] (low), etc. */
- table->MemoryBootLevel = 0; /* 0 == DPM[0] (low), etc. */
-
- /* find boot level from dpm table*/
- result = tonga_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
-
- if (0 != result) {
- data->smc_state_table.GraphicsBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
- in dependency table. Using Graphics DPM level 0!");
- result = 0;
- }
-
- result = tonga_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
-
- if (0 != result) {
- data->smc_state_table.MemoryBootLevel = 0;
- printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
- in dependency table. Using Memory DPM level 0!");
- result = 0;
- }
-
- table->BootVoltage.Vddc =
- tonga_get_voltage_id(&(data->vddc_voltage_table),
- data->vbios_boot_state.vddc_bootup_value);
- table->BootVoltage.VddGfx =
- tonga_get_voltage_id(&(data->vddgfx_voltage_table),
- data->vbios_boot_state.vddgfx_bootup_value);
- table->BootVoltage.Vddci =
- tonga_get_voltage_id(&(data->vddci_voltage_table),
- data->vbios_boot_state.vddci_bootup_value);
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return result;
-}
-
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
- const tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
- uint32_t min_engine_clock_in_sr)
-{
- uint32_t i, temp;
- uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
-
- PP_ASSERT_WITH_CODE((engine_clock >= min),
- "Engine clock can't satisfy stutter requirement!", return 0);
-
- for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
- temp = engine_clock >> i;
-
- if(temp >= min || i == 0)
- break;
- }
- return (uint8_t)i;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t engine_clock, uint16_t sclk_activity_level_threshold, SMU72_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t threshold;
- uint32_t mvdd;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-
- /* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
- &graphic_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- threshold = engine_clock * data->fast_watemark_threshold / 100;
-/*
- *get the DAL clock. do it in funture.
- PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
- data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- tonga_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_insr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t level_array_adress = data->dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS; /* 64 -> long; 32 -> int*/
- SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
- uint32_t i, maxEntry;
- uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
- PECI_RegistryValue reg_value;
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = tonga_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &(data->smc_state_table.GraphicsLevel[i]));
-
- if (0 != result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
-
- if (0 == i) {
- reg_value = 0;
- if (reg_value != 0)
- data->smc_state_table.GraphicsLevel[0].UpHyst = (uint8_t)reg_value;
- }
-
- if (1 == i) {
- reg_value = 0;
- if (reg_value != 0)
- data->smc_state_table.GraphicsLevel[1].UpHyst = (uint8_t)reg_value;
- }
- }
-
- /* Only enable level 0 for now. */
- data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- tonga_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
- "There must be 1 or more PCIE levels defined in PPTable.", return -1);
- maxEntry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
- (uint8_t) ((i < maxEntry) ? i : maxEntry);
- }
- } else {
- if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0!");
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(highest_pcie_level_enabled+1))) != 0)) {
- highest_pcie_level_enabled++;
- }
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<lowest_pcie_level_enabled)) == 0)) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
- count++;
- }
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change*/
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result)
- return result;
-
- return 0;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU72_Discrete_MemoryLevel) * SMU72_MAX_LEVELS_MEMORY;
- SMU72_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero", return -1);
- result = tonga_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
- &(data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
- return result;
- }
- }
-
- /* Only enable level 0 for now.*/
- data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result) {
- return result;
- }
-
- return 0;
-}
-
-struct TONGA_DLL_SPEED_SETTING {
- uint16_t Min; /* Minimum Data Rate*/
- uint16_t Max; /* Maximum Data Rate*/
- uint32_t dll_speed; /* The desired DLL_SPEED setting*/
-};
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
-
-
-static int tonga_reset_single_dpm_table(
- struct pp_hwmgr *hwmgr,
- struct tonga_single_dpm_table *dpm_table,
- uint32_t count)
-{
- uint32_t i;
- if (!(count <= MAX_REGULAR_DPM_NUMBER))
- printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
- table entries to exceed max number! \n");
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
- dpm_table->dpm_levels[i].enabled = false;
- }
-
- return 0;
-}
-
-static void tonga_setup_pcie_table_entry(
- struct tonga_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint32_t i, maxEntry;
-
- if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU72_MAX_LEVELS_LINK);
-
- if (pcie_table != NULL) {
- /*
- * maxEntry is used to make sure we reserve one PCIE level for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries, then ignore the last entry.
- */
- maxEntry = (SMU72_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU72_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < maxEntry; i++) {
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i-1,
- get_pcie_gen_support(data->pcie_gen_cap, pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- }
- data->dpm_table.pcie_speed_table.count = maxEntry - 1;
- } else {
- /* Hardcode Pcie Table */
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- tonga_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
-
- return 0;
-
-}
-
-/*
- * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these state tables to the allowed range based
- * on the power policy or external client requests, such as UVD request, etc.
- */
-static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_sclk_table =
- pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *allowed_vdd_mclk_table =
- pptable_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
- "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- /* clear the state table to reset everything to default */
- memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU72_MAX_LEVELS_GRAPHICS);
- tonga_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU72_MAX_LEVELS_MEMORY);
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.VddcTable, SMU72_MAX_LEVELS_VDDC); */
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_gfx_table, SMU72_MAX_LEVELS_VDDGFX);*/
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.vdd_ci_table, SMU72_MAX_LEVELS_VDDCI);*/
- /* tonga_reset_single_dpm_table(hwmgr, &tonga_hwmgr->dpm_table.mvdd_table, SMU72_MAX_LEVELS_MVDD);*/
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Sclk DPM table based on allow Sclk values*/
- data->dpm_table.sclk_table.count = 0;
-
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
- data->dpm_table.sclk_table.count++;
- }
- }
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
- allowed_vdd_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
- allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels*/
- tonga_setup_default_pcie_tables(hwmgr);
-
- /* save a copy of the default DPM table*/
- memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct tonga_dpm_table));
-
- return 0;
-}
-
-int tonga_populate_smc_initial_state(struct pp_hwmgr *hwmgr,
- const struct tonga_power_state *bootState)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t) (pptable_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if (pptable_info->vdd_dep_on_sclk->entries[level].clk >=
- bootState->performance_levels[0].engine_clock) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t) (pptable_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (pptable_info->vdd_dep_on_mclk->entries[level].clk >=
- bootState->performance_levels[0].memory_clock) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU72_Discrete_DpmTable *table = &(data->smc_state_table);
- const phw_tonga_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- PECI_RegistryValue reg_value;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
- result = tonga_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result;);
- memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
- if (TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- tonga_populate_smc_voltage_tables(hwmgr, table);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
- }
-
- if (data->is_memory_GDDR5) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
- }
-
- i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
- if (i == 1 || i == 0) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_12CHANNEL;
- }
-
- if (ulv->ulv_supported && pptable_info->us_ulv_voltage_offset) {
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
- }
-
- result = tonga_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
-
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
-
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
-
- result = tonga_populate_smv_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
-
- result = tonga_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
-
- result = tonga_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
-
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
- /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
- /* need to populate the ARB settings for the initial state. */
- result = tonga_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
-
- result = tonga_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
-
- result = tonga_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result;);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = tonga_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!", return result;);
- }
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- pptable_info->cac_dtp_table->usTargetOperatingTemp *
- TONGA_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (pptable_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- TONGA_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
-
- /*
- * Cail reads current link status and reports it as cap (we cannot change this due to some previous issues we had)
- * SMC drops the link status to lowest level after enabling DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
- * but this time Cail reads current link status which was set to low by SMC and reports it as cap to powerplay
- * To avoid it, we set PCIeBootLinkLevel to highest dpm level
- */
- PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -1);
-
- table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
- table->PCIeGenInterval = 1;
-
- result = tonga_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- reg_value = 0;
- if ((0 == reg_value) &&
- (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment))) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = TONGA_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- /* ACDC Switch GPIO */
- reg_value = 0;
- if ((0 == reg_value) &&
- (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment))) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = TONGA_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
-
- reg_value = 0;
- if (1 == reg_value) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
- }
-
- reg_value = 0;
- if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
- THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
- table->ThermOutPolarity =
- (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1:0;
-
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal)){
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- }
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++) {
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU72_Discrete_DpmTable)-3 * sizeof(SMU72_PIDController),
- data->sram_end);
-
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result;);
-
- return result;
-}
-
-/* Look up the voltaged based on DAL's requested level. and then send the requested VDDC voltage to SMC*/
-static void tonga_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
- return;
-}
-
-int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- PPSMC_Result result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Apply minimum voltage based on DAL's request level */
- tonga_apply_dal_minimum_voltage_request(hwmgr);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (tonga_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
- if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Sclk Dpm enable Mask failed", return -1);
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (tonga_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
-
- if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Mclk Dpm enable Mask failed", return -1);
- }
- }
-
- return 0;
-}
-
-
-int tonga_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level, tmp;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->pcie_dpm_key_disabled) {
- /* PCIE */
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
- "force highest pcie dpm state failed!", return -1);
- }
- }
- }
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* SCLK */
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
- "force highest sclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Sclk_Index does not match the level \n");
-
- }
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* MCLK */
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
- "force highest mclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Mclk_Index does not match the level \n");
- }
- }
- }
-
- return 0;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
-{
- cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
- hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
- return 0;
-}
-
-/**
- * Initialize Dynamic State Adjustment Rule Settings
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size;
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- hwmgr->dyn_state.mclk_sclk_ratio = 4;
- hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
- hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
-static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- pptable_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -1);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -1);
-
- data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
- data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
-
- pptable_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- pptable_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- pptable_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- pptable_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- pptable_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- pptable_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- pptable_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
- pptable_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- int result = 1;
-
- PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
- "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
- return result);
-
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
- hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel)),
- "unforce pcie level failed!",
- return -1);
- }
-
- result = tonga_upload_dpm_level_enable_mask(hwmgr);
-
- return result;
-}
-
-static uint32_t tonga_get_lowest_enable_level(
- struct pp_hwmgr *hwmgr, uint32_t level_mask)
-{
- uint32_t level = 0;
-
- while (0 == (level_mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int tonga_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->pcie_dpm_key_disabled) {
- /* PCIE */
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_pcie(hwmgr, level)),
- "force lowest pcie dpm state failed!", return -1);
- }
- }
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* SCLK */
- if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state(hwmgr, level)),
- "force sclk dpm state failed!", return -1);
-
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Sclk_Index does not match the level \n");
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* MCLK */
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
- level = tonga_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == tonga_dpm_force_state_mclk(hwmgr, level)),
- "force lowest mclk dpm state failed!", return -1);
- if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX) != level)
- printk(KERN_ERR "[ powerplay ] Target_and_current_Profile_Index. \
- Curr_Mclk_Index does not match the level \n");
- }
- }
-
- return 0;
-}
-
-static int tonga_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddgfx =
- pptable_info->vddgfx_lookup_table->entries[voltageId].us_vdd;
- }
- } else {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- pptable_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int tonga_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- phm_ppt_v1_voltage_lookup_record v_record;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
- phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- if (sclk_table->entries[entryId].vdd_offset & (1 << 15))
- v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
- sclk_table->entries[entryId].vdd_offset - 0xFFFF;
- else
- v_record.us_vdd = sclk_table->entries[entryId].vddgfx +
- sclk_table->entries[entryId].vdd_offset;
-
- sclk_table->entries[entryId].vddc =
- v_record.us_cac_low = v_record.us_cac_mid =
- v_record.us_cac_high = v_record.us_vdd;
-
- tonga_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- if (mclk_table->entries[entryId].vdd_offset & (1 << 15))
- v_record.us_vdd = mclk_table->entries[entryId].vddc +
- mclk_table->entries[entryId].vdd_offset - 0xFFFF;
- else
- v_record.us_vdd = mclk_table->entries[entryId].vddc +
- mclk_table->entries[entryId].vdd_offset;
-
- mclk_table->entries[entryId].vddgfx = v_record.us_cac_low =
- v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
- tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
- }
- }
-
- return 0;
-
-}
-
-static int tonga_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t entryId;
- phm_ppt_v1_voltage_lookup_record v_record;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- for (entryId = 0; entryId < mm_table->count; entryId++) {
- if (mm_table->entries[entryId].vddgfx_offset & (1 << 15))
- v_record.us_vdd = mm_table->entries[entryId].vddc +
- mm_table->entries[entryId].vddgfx_offset - 0xFFFF;
- else
- v_record.us_vdd = mm_table->entries[entryId].vddc +
- mm_table->entries[entryId].vddgfx_offset;
-
- /* Add the calculated VDDGFX to the VDDGFX lookup table */
- mm_table->entries[entryId].vddgfx = v_record.us_cac_low =
- v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
- tonga_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
- }
- }
- return 0;
-}
-
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void tonga_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, phw_tonga_leakage_voltage *pLeakageTable)
-{
- uint32_t leakage_index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (leakage_index = 0; leakage_index < pLeakageTable->count; leakage_index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (pLeakageTable->leakage_id[leakage_index] == *voltage) {
- *voltage = pLeakageTable->actual_voltage[leakage_index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "[ powerplay ] Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
- * Patch voltage lookup table by EVV leakages.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to voltage lookup table
- * @param pointer to leakage table
- * @return always 0
- */
-static int tonga_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- phw_tonga_leakage_voltage *pLeakageTable)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++) {
- tonga_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, pLeakageTable);
- }
-
- return 0;
-}
-
-static int tonga_patch_clock_voltage_lomits_with_vddc_leakage(struct pp_hwmgr *hwmgr,
- phw_tonga_leakage_voltage *pLeakageTable, uint16_t *Vddc)
-{
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddc, pLeakageTable);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- pptable_info->max_clock_voltage_on_dc.vddc;
-
- return 0;
-}
-
-static int tonga_patch_clock_voltage_limits_with_vddgfx_leakage(
- struct pp_hwmgr *hwmgr, phw_tonga_leakage_voltage *pLeakageTable,
- uint16_t *Vddgfx)
-{
- tonga_patch_with_vdd_leakage(hwmgr, (uint16_t *)Vddgfx, pLeakageTable);
- return 0;
-}
-
-int tonga_sort_lookup_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -1);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd < lookup_table->entries[j-1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j-1];
- lookup_table->entries[j-1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int tonga_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (data->vdd_gfx_control == TONGA_VOLTAGE_CONTROL_BY_SVID2) {
- tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
- pptable_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_patch_clock_voltage_limits_with_vddgfx_leakage(hwmgr,
- &(data->vddcgfx_leakage), &pptable_info->max_clock_voltage_on_dc.vddgfx);
- if (tmp_result != 0)
- result = tmp_result;
- } else {
- tmp_result = tonga_patch_lookup_table_with_leakage(hwmgr,
- pptable_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_patch_clock_voltage_lomits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &pptable_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result != 0)
- result = tmp_result;
- }
-
- tmp_result = tonga_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddgfx_lookup_table);
- if (tmp_result != 0)
- result = tmp_result;
-
- tmp_result = tonga_sort_lookup_table(hwmgr, pptable_info->vddc_lookup_table);
- if (tmp_result != 0)
- result = tmp_result;
-
- return result;
-}
-
-int tonga_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-int tonga_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = tonga_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = tonga_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = tonga_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = tonga_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = tonga_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-/**
- * Enable voltage control
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
- * Checks if we want to support voltage control
- *
- * @param hwmgr the address of the powerplay hardware manager.
- */
-bool cf_tonga_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- return(TONGA_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/*---------------------------MC----------------------------*/
-
-uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
-{
- bool result = true;
-
- switch (inReg) {
- case mmMC_SEQ_RAS_TIMING:
- *outReg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *outReg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *outReg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *outReg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *outReg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *outReg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *outReg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *outReg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *outReg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *outReg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *outReg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-int tonga_set_s0_mc_reg_index(phw_tonga_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
- ? address : table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_tonga_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -1);
-
- for (i = 0; i < table->last; i++) {
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_tonga_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- switch (table->mc_reg_address[i].s1) {
- /*
- * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- */
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-int tonga_set_valid_flag(phw_tonga_mc_reg_table *table)
-{
- uint8_t i, j;
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- pp_atomctrl_mc_reg_table *table;
- phw_tonga_mc_reg_table *ni_table = &data->tonga_mc_reg_table;
- uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (0 == result)
- result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
- if (0 == result) {
- tonga_set_s0_mc_reg_index(ni_table);
- result = tonga_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (0 == result)
- tonga_set_valid_flag(ni_table);
-
- kfree(table);
- return result;
-}
-
-/*
-* Copy one arb setting to another and then switch the active set.
-* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
-*/
-int tonga_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arbFreqSrc, uint32_t arbFreqDest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arbFreqSrc) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
-
- default:
- return -1;
- }
-
- switch (arbFreqDest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
-
- default:
- return -1;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
-
- return 0;
-}
-
-/**
- * Initial switch from ARB F0->F1
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int tonga_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return tonga_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /*
- * This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure is the field 'current'.
- * This solution is ugly, but we never write the whole table only individual fields in it.
- * In reality this field should not be in that structure but in a soft register.
- */
- result = tonga_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (0 != result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return tonga_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- const struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < data->tonga_mc_reg_table.last; j++) {
- if (data->tonga_mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array out of boundary", return -1);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(data->tonga_mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/*convert register values from driver to SMC format */
-void tonga_convert_mc_registers(
- const phw_tonga_mc_reg_entry * pEntry,
- SMU72_Discrete_MCRegisterSet *pData,
- uint32_t numEntries, uint32_t validflag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < numEntries; j++) {
- if (validflag & 1<<j) {
- pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
- i++;
- }
- }
-}
-
-/* find the entry in the memory range table, then populate the value to SMC's tonga_mc_reg_table */
-int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_hwmgr *hwmgr,
- const uint32_t memory_clock,
- SMU72_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- const tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < data->tonga_mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- data->tonga_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == data->tonga_mc_reg_table.num_entries) && (i > 0))
- --i;
-
- tonga_convert_mc_registers(&data->tonga_mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, data->tonga_mc_reg_table.last, data->tonga_mc_reg_table.validflag);
-
- return 0;
-}
-
-int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- int result = 0;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_reg_table->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- memset(&data->mc_reg_table, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for driver state!", return result;);
-
- return tonga_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
- (uint8_t *)&data->mc_reg_table, sizeof(SMU72_Discrete_MCRegisters), data->sram_end);
-}
-
-/**
- * Programs static screed detection parameters
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
- * Setup display gap for glitch free memory clock switching.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- return 0;
-}
-
-/**
- * Programs activity state transition voting clients
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-
-int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_check_for_dpm_stopped(hwmgr);
-
- if (cf_tonga_voltage_control(hwmgr)) {
- tmp_result = tonga_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable voltage control!", result = tmp_result);
-
- tmp_result = tonga_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!", result = tmp_result);
- }
-
- tmp_result = tonga_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", result = tmp_result);
-
- tmp_result = tonga_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!", result = tmp_result);
-
- tmp_result = tonga_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = tonga_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = tonga_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = tonga_initial_switch_from_arb_f0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!", result = tmp_result);
-
- tmp_result = tonga_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = tonga_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = tonga_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate initialize MC Reg table!", result = tmp_result);
-
- tmp_result = tonga_notify_smc_display_change(hwmgr, false);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify no display!", result = tmp_result);
-
- /* enable SCLK control */
- tmp_result = tonga_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- /* enable DPM */
- tmp_result = tonga_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- return result;
-}
-
-int tonga_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_check_for_dpm_running(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "SMC is still running!", return 0);
-
- tmp_result = tonga_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = tonga_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to reset to default!", result = tmp_result);
-
- return result;
-}
-
-int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = tonga_set_boot_state(hwmgr);
- if (0 != result)
- printk(KERN_ERR "[ powerplay ] Failed to reset asic via set boot state! \n");
-
- return result;
-}
-
-int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-/**
- * Initializes the Volcanic Islands Hardware Manager
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return 1 if success; otherwise appropriate error code.
- */
-int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- SMU72_Discrete_DpmTable *table = NULL;
- tonga_hwmgr *data;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phw_tonga_ulv_parm *ulv;
- struct cgs_system_info sys_info = {0};
-
- PP_ASSERT_WITH_CODE((NULL != hwmgr),
- "Invalid Parameter!", return -1;);
-
- data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_defaule_on = false;
- data->sram_end = SMC_RAM_END;
-
- data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[1] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[2] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[3] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[4] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[5] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[6] = PPTONGA_TARGETACTIVITY_DFLT;
- data->activity_target[7] = PPTONGA_TARGETACTIVITY_DFLT;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
- data->vddc_vddgfx_delta = VDDC_VDDGFX_DELTA;
- data->mclk_activity_target = PPTONGA_MCLK_TARGETACTIVITY_DFLT;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableVoltageIsland);
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
- data->pcc_monitor_enabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
-
- data->gpio_debug = 0;
- data->engine_clock_data = 0;
- data->memory_clock_data = 0;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- /* need to set voltage control types before EVV patching*/
- data->voltage_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
- data->force_pcie_gen = PP_PCIEGenInvalid;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
- data->voltage_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDGFX)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
- data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDGFX);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) {
- data->mvdd_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
- }
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->mvdd_control) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (TONGA_VOLTAGE_CONTROL_NONE == data->vdd_ci_control)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- if (pptable_info->cac_dtp_table->usClockStretchAmount != 0)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- /* Initializes DPM default values*/
- tonga_initialize_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID.*/
- PP_ASSERT_WITH_CODE((0 == tonga_get_evv_voltage(hwmgr)),
- "Get EVV Voltage Failed. Abort Driver loading!", return -1);
-
- tonga_complete_dependency_tables(hwmgr);
-
- /* Parse pptable data read from VBIOS*/
- tonga_set_private_var_based_on_pptale(hwmgr);
-
- /* ULV Support*/
- ulv = &(data->ulv);
- ulv->ulv_supported = false;
-
- /* Initalize Dynamic State Adjustment Rule Settings*/
- result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (result)
- printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
- data->uvd_enabled = false;
-
- table = &(data->smc_state_table);
-
- /*
- * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
- * Peak Current Control feature is enabled and we should program PCC HW register
- */
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
- uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
-
- switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
- case 0:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
- break;
- case 1:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
- break;
- case 2:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
- break;
- case 3:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
- break;
- case 4:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
- break;
- default:
- printk(KERN_ERR "[ powerplay ] Failed to setup PCC HW register! \
- Wrong GPIO assigned for VDDC_PCC_GPIO_PINID! \n");
- break;
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCNB_PWRMGT_CNTL, temp_reg);
- }
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMU7);
-
- data->vddc_phase_shed_control = false;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_UVD)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- if (sys_info.value & AMD_PG_SUPPORT_VCE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
- }
-
- if (0 == result) {
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- TONGA_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- tonga_hwmgr_backend_fini(hwmgr);
- }
-
- return result;
-}
-
-static int tonga_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = tonga_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = tonga_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = tonga_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
- return ret;
-}
-
-static int tonga_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *prequest_ps,
- const struct pp_power_state *pcurrent_ps)
-{
- struct tonga_power_state *tonga_ps =
- cast_phw_tonga_power_state(&prequest_ps->hardware);
-
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(tonga_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (tonga_ps->performance_levels[i].memory_clock > max_limits->mclk)
- tonga_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (tonga_ps->performance_levels[i].engine_clock > max_limits->sclk)
- tonga_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- tonga_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
- tonga_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
-
- tonga_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = pptable_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
- if (stable_pstate_sclk >= pptable_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = pptable_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- tonga_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- tonga_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- tonga_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = tonga_ps->performance_levels[0].engine_clock;
- mclk = tonga_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
-
- tonga_ps->performance_levels[0].engine_clock = sclk;
- tonga_ps->performance_levels[0].memory_clock = mclk;
-
- tonga_ps->performance_levels[1].engine_clock =
- (tonga_ps->performance_levels[1].engine_clock >= tonga_ps->performance_levels[0].engine_clock) ?
- tonga_ps->performance_levels[1].engine_clock :
- tonga_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < tonga_ps->performance_levels[1].memory_clock)
- mclk = tonga_ps->performance_levels[1].memory_clock;
-
- tonga_ps->performance_levels[0].memory_clock = mclk;
- tonga_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (tonga_ps->performance_levels[1].memory_clock < tonga_ps->performance_levels[0].memory_clock)
- tonga_ps->performance_levels[1].memory_clock = tonga_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
- for (i=0; i < tonga_ps->performance_level_count; i++) {
- tonga_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- tonga_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- tonga_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- tonga_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-int tonga_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct tonga_power_state);
-}
-
-static int tonga_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- if (low)
- return tonga_ps->performance_levels[0].memory_clock;
- else
- return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
-}
-
-static int tonga_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- if (low)
- return tonga_ps->performance_levels[0].engine_clock;
- else
- return tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
-}
-
-static uint16_t tonga_get_current_pcie_speed(
- struct pp_hwmgr *hwmgr)
-{
- uint32_t speed_cntl = 0;
-
- speed_cntl = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speed_cntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int tonga_get_current_pcie_lane_number(
- struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL,
- LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-static int tonga_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_power_state *ps = (struct tonga_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value = tonga_get_current_pcie_speed(hwmgr);
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)tonga_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int tonga_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- struct tonga_power_state *tonga_ps =
- (struct tonga_power_state *)(&(power_state->hardware));
-
- struct tonga_performance_level *performance_level;
-
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
-
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
-
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(tonga_ps->performance_levels
- [tonga_ps->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (tonga_ps->performance_level_count < SMU72_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (tonga_ps->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock =
- le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexLow].ulMclk);
-
- performance_level->engine_clock =
- le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexLow].ulSclk);
-
- performance_level->pcie_gen = get_pcie_gen_support(
- data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
-
- performance_level->pcie_lane = get_pcie_lane_support(
- data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level =
- &(tonga_ps->performance_levels[tonga_ps->performance_level_count++]);
-
- performance_level->memory_clock =
- le32_to_cpu(mclk_dep_table->entries[state_entry->ucMemoryClockIndexHigh].ulMclk);
-
- performance_level->engine_clock =
- le32_to_cpu(sclk_dep_table->entries[state_entry->ucEngineClockIndexHigh].ulSclk);
-
- performance_level->pcie_gen = get_pcie_gen_support(
- data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
-
- performance_level->pcie_lane = get_pcie_lane_support(
- data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int tonga_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *ps)
-{
- int result;
- struct tonga_power_state *tonga_ps;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- ps->hardware.magic = PhwTonga_Magic;
-
- tonga_ps = cast_phw_tonga_power_state(&(ps->hardware));
-
- result = tonga_get_powerplay_table_entry(hwmgr, entry_index, ps,
- tonga_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!ps->validation.disallowOnDC)
- tonga_ps->dc_compatible = true;
-
- if (ps->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = tonga_ps->performance_levels[0].pcie_gen;
- else if (ps->classification.flags & PP_StateClassificationFlag_Boot) {
- if (data->bacos.best_match == 0xffff) {
- /* For V.I. use boot state as base BACO state */
- data->bacos.best_match = PP_StateClassificationFlag_Boot;
- data->bacos.performance_level = tonga_ps->performance_levels[0];
- }
- }
-
- tonga_ps->uvd_clocks.VCLK = ps->uvd_clocks.VCLK;
- tonga_ps->uvd_clocks.DCLK = ps->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (ps->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
-
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- tonga_ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_performance.min >
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- tonga_ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < tonga_ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- tonga_ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- tonga_ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- tonga_ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- tonga_ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- tonga_ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static void
-tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent;
- uint32_t offset;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency));
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency));
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
-
- offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
- struct tonga_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < psclk_table->count; i++) {
- if (sclk == psclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= psclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /* TODO: Check SCLK in DAL's minimum clocks in case DeepSleep divider update is required.*/
- if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i=0; i < pmclk_table->count; i++) {
- if (mclk == pmclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= pmclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t tonga_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
- for (i = 0; i < hw_ps->performance_level_count; i++) {
- sclk = hw_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < pdpm_table->sclk_table.count; i++) {
- if (pdpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= pdpm_table->pcie_speed_table.count) ?
- pdpm_table->pcie_speed_table.dpm_levels[pdpm_table->pcie_speed_table.count-1].value :
- pdpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int tonga_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
- const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = tonga_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
-
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t sclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].engine_clock;
- uint32_t mclk = tonga_ps->performance_levels[tonga_ps->performance_level_count-1].memory_clock;
- struct tonga_dpm_table *pdpm_table = &data->dpm_table;
-
- struct tonga_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) {
- clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) /
- pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value +
- (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
-
- } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) {
- clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) /
- pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value;
-
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value -
- (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100;
- } else
- pdpm_table->sclk_table.dpm_levels[i].value =
- pgolden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) {
- clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) /
- pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
- pdpm_table->mclk_table.dpm_levels[i].value =
- pgolden_dpm_table->mclk_table.dpm_levels[i].value +
- (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
-
- } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) /
- pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value;
-
- pdpm_table->mclk_table.dpm_levels[i].value =
- pgolden_dpm_table->mclk_table.dpm_levels[i].value -
- (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100;
- } else
- pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct tonga_single_dpm_table * pdpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < pdpm_table->count; i++) {
- if ((pdpm_table->dpm_levels[i].value < low_limit) ||
- (pdpm_table->dpm_levels[i].value > high_limit))
- pdpm_table->dpm_levels[i].enabled = false;
- else
- pdpm_table->dpm_levels[i].enabled = true;
- }
- return 0;
-}
-
-static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1;
-
- tonga_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- hw_state->performance_levels[0].engine_clock,
- hw_state->performance_levels[high_limit_count].engine_clock);
-
- tonga_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- hw_state->performance_levels[0].memory_clock,
- hw_state->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
-
- result = tonga_trim_dpm_states(hwmgr, tonga_ps);
- if (0 != result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- if (data->uvd_enabled)
- data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
-
- data->dpm_level_enable_mask.pcie_dpm_enable_mask = tonga_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_VCEDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_VCEDPM_Disable);
-}
-
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *ptable_information = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = (uint8_t) (ptable_information->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return tonga_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_nps = cast_const_phw_tonga_power_state(states->pnew_state);
- const struct tonga_power_state *tonga_cps = cast_const_phw_tonga_power_state(states->pcurrent_state);
-
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (tonga_nps->vce_clocks.EVCLK > 0 && (tonga_cps == NULL || tonga_cps->vce_clocks.EVCLK == 0)) {
- data->smc_state_table.VceBootLevel = (uint8_t) (pptable_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = data->dpm_table_start + offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.VceBootLevel));
-
- tonga_enable_disable_vce_dpm(hwmgr, true);
- } else if (tonga_nps->vce_clocks.EVCLK == 0 && tonga_cps != NULL && tonga_cps->vce_clocks.EVCLK > 0)
- tonga_enable_disable_vce_dpm(hwmgr, false);
-
- return 0;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- uint32_t address;
- int32_t result;
-
- if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
-
- memset(&data->mc_reg_table, 0, sizeof(SMU72_Discrete_MCRegisters));
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table));
-
- if(result != 0)
- return result;
-
-
- address = data->mc_reg_table_start + (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
- return tonga_copy_bytes_to_smc(hwmgr->smumgr, address,
- (uint8_t *)&data->mc_reg_table.data[0],
- sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
- data->sram_end);
-}
-
-static int tonga_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return tonga_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-static int tonga_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- const struct tonga_power_state *tonga_ps = cast_const_phw_tonga_power_state(states->pnew_state);
- uint16_t target_link_speed = tonga_get_maximum_link_speed(hwmgr, tonga_ps);
- uint8_t request;
-
- if (data->pspp_notify_required ||
- data->pcie_performance_request) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if(request == PCIE_PERF_REQ_GEN1 && tonga_get_current_pcie_speed(hwmgr) > 0) {
- data->pcie_performance_request = false;
- return 0;
- }
-
- if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- data->pcie_performance_request = false;
- return 0;
-}
-
-static int tonga_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
-{
- int tmp_result, result = 0;
-
- tmp_result = tonga_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result = tonga_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result);
- }
-
- tmp_result = tonga_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = tonga_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result);
-
- tmp_result = tonga_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result);
-
- tmp_result = tonga_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result);
-
- tmp_result = tonga_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result);
-
- tmp_result = tonga_update_and_upload_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result);
-
- tmp_result = tonga_program_memory_timing_parameters_conditionally(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result);
-
- tmp_result = tonga_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = tonga_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result = tonga_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result);
- }
-
- return result;
-}
-
-/**
-* Set maximum target operating fan output PWM
-*
-* @param pHwMgr: the address of the powerplay hardware manager.
-* @param usMaxFanPwm: max operating fan PWM in percents
-* @return The response that came from the SMC.
-*/
-static int tonga_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
-{
- uint32_t num_active_displays = 0;
- struct cgs_display_info info = {0};
- info.mode_info = NULL;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_displays = info.display_count;
-
- if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
- tonga_notify_smc_display_change(hwmgr, false);
- else
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-/**
-* Programs the display gap
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always OK
-*/
-int tonga_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if(0 == refresh_rate)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU72_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
- if (num_active_displays == 1)
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-int tonga_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
-
- tonga_program_display_gap(hwmgr);
-
- /* to do PhwTonga_CacUpdateDisplayConfiguration(pHwMgr); */
- return 0;
-}
-
-/**
-* Set maximum target operating fan output RPM
-*
-* @param pHwMgr: the address of the powerplay hardware manager.
-* @param usMaxFanRpm: max operating fan RPM value.
-* @return The response that came from the SMC.
-*/
-static int tonga_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1);
-}
-
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
-{
- uint32_t reference_clock;
- uint32_t tc;
- uint32_t divide;
-
- ATOM_FIRMWARE_INFO *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
-
- if (tc)
- return TCLK;
-
- fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index,
- &size, &frev, &crev);
-
- if (!fw_info)
- return 0;
-
- reference_clock = le16_to_cpu(fw_info->usReferenceClock);
-
- divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
-
- if (0 != divide)
- return reference_clock / 4;
-
- return reference_clock;
-}
-
-int tonga_dpm_set_interrupt_state(void *private_data,
- unsigned src_id, unsigned type,
- int enabled)
-{
- uint32_t cg_thermal_int;
- struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- switch (type) {
- case AMD_THERMAL_IRQ_LOW_TO_HIGH:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
-
- case AMD_THERMAL_IRQ_HIGH_TO_LOW:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-int tonga_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- int result;
- const struct pp_interrupt_registration_info *info =
- (const struct pp_interrupt_registration_info *)thermal_interrupt_info;
-
- if (info == NULL)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
- tonga_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
- tonga_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- return 0;
-}
-
-bool tonga_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0,0,NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
- if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
- if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
- is_update_required = true;
-*/
- return is_update_required;
-}
-
-static inline bool tonga_are_power_levels_equal(const struct tonga_performance_level *pl1,
- const struct tonga_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-int tonga_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct tonga_power_state *psa = cast_const_phw_tonga_power_state(pstate1);
- const struct tonga_power_state *psb = cast_const_phw_tonga_power_state(pstate2);
- int i;
-
- if (equal == NULL || psa == NULL || psb == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!tonga_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK));
- *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
- *equal &= (psa->acp_clk == psb->acp_clk);
-
- return 0;
-}
-
-static int tonga_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
- tonga_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- tonga_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct tonga_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = tonga_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct tonga_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct tonga_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- struct tonga_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct tonga_power_state *tonga_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
-
- tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
- .backend_init = &tonga_hwmgr_backend_init,
- .backend_fini = &tonga_hwmgr_backend_fini,
- .asic_setup = &tonga_setup_asic_task,
- .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
- .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
- .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
- .force_dpm_level = &tonga_force_dpm_level,
- .power_state_set = tonga_set_power_state_tasks,
- .get_power_state_size = tonga_get_power_state_size,
- .get_mclk = tonga_dpm_get_mclk,
- .get_sclk = tonga_dpm_get_sclk,
- .patch_boot_state = tonga_dpm_patch_boot_state,
- .get_pp_table_entry = tonga_get_pp_table_entry,
- .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
- .print_current_perforce_level = tonga_print_current_perforce_level,
- .powerdown_uvd = tonga_phm_powerdown_uvd,
- .powergate_uvd = tonga_phm_powergate_uvd,
- .powergate_vce = tonga_phm_powergate_vce,
- .disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
- .update_clock_gatings = tonga_phm_update_clock_gatings,
- .notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = tonga_display_configuration_changed_task,
- .set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = tonga_set_max_fan_rpm_output,
- .get_temperature = tonga_thermal_get_temperature,
- .stop_thermal_controller = tonga_thermal_stop_thermal_controller,
- .get_fan_speed_info = tonga_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = tonga_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = tonga_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = tonga_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = tonga_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = tonga_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = tonga_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = tonga_register_internal_thermal_interrupt,
- .check_smc_update_required_for_display_configuration = tonga_check_smc_update_required_for_display_configuration,
- .check_states_equal = tonga_check_states_equal,
- .set_fan_control_mode = tonga_set_fan_control_mode,
- .get_fan_control_mode = tonga_get_fan_control_mode,
- .force_clock_level = tonga_force_clock_level,
- .print_clock_levels = tonga_print_clock_levels,
- .get_sclk_od = tonga_get_sclk_od,
- .set_sclk_od = tonga_set_sclk_od,
- .get_mclk_od = tonga_get_mclk_od,
- .set_mclk_od = tonga_set_mclk_od,
-};
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
- hwmgr->pptable_func = &tonga_pptable_funcs;
- pp_tonga_thermal_initialize(hwmgr);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
deleted file mode 100644
index 3961884bfa9b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef TONGA_HWMGR_H
-#define TONGA_HWMGR_H
-
-#include "hwmgr.h"
-#include "smu72_discrete.h"
-#include "ppatomctrl.h"
-#include "ppinterrupt.h"
-#include "tonga_powertune.h"
-#include "pp_endian.h"
-
-#define TONGA_MAX_HARDWARE_POWERLEVELS 2
-#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
-
-struct tonga_performance_level {
- uint32_t memory_clock;
- uint32_t engine_clock;
- uint16_t pcie_gen;
- uint16_t pcie_lane;
-};
-
-struct _phw_tonga_bacos {
- uint32_t best_match;
- uint32_t baco_flags;
- struct tonga_performance_level performance_level;
-};
-typedef struct _phw_tonga_bacos phw_tonga_bacos;
-
-struct _phw_tonga_uvd_clocks {
- uint32_t VCLK;
- uint32_t DCLK;
-};
-
-typedef struct _phw_tonga_uvd_clocks phw_tonga_uvd_clocks;
-
-struct _phw_tonga_vce_clocks {
- uint32_t EVCLK;
- uint32_t ECCLK;
-};
-
-typedef struct _phw_tonga_vce_clocks phw_tonga_vce_clocks;
-
-struct tonga_power_state {
- uint32_t magic;
- phw_tonga_uvd_clocks uvd_clocks;
- phw_tonga_vce_clocks vce_clocks;
- uint32_t sam_clk;
- uint32_t acp_clk;
- uint16_t performance_level_count;
- bool dc_compatible;
- uint32_t sclk_threshold;
- struct tonga_performance_level performance_levels[TONGA_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct _phw_tonga_dpm_level {
- bool enabled;
- uint32_t value;
- uint32_t param1;
-};
-typedef struct _phw_tonga_dpm_level phw_tonga_dpm_level;
-
-#define TONGA_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define TONGA_MINIMUM_ENGINE_CLOCK 2500
-
-struct tonga_single_dpm_table {
- uint32_t count;
- phw_tonga_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct tonga_dpm_table {
- struct tonga_single_dpm_table sclk_table;
- struct tonga_single_dpm_table mclk_table;
- struct tonga_single_dpm_table pcie_speed_table;
- struct tonga_single_dpm_table vddc_table;
- struct tonga_single_dpm_table vdd_gfx_table;
- struct tonga_single_dpm_table vdd_ci_table;
- struct tonga_single_dpm_table mvdd_table;
-};
-typedef struct _phw_tonga_dpm_table phw_tonga_dpm_table;
-
-
-struct _phw_tonga_clock_regisiters {
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t vDLL_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_SS1;
- uint32_t vMPLL_SS2;
-};
-typedef struct _phw_tonga_clock_regisiters phw_tonga_clock_registers;
-
-struct _phw_tonga_voltage_smio_registers {
- uint32_t vs0_vid_lower_smio_cntl;
-};
-typedef struct _phw_tonga_voltage_smio_registers phw_tonga_voltage_smio_registers;
-
-
-struct _phw_tonga_mc_reg_entry {
- uint32_t mclk_max;
- uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_entry phw_tonga_mc_reg_entry;
-
-struct _phw_tonga_mc_reg_table {
- uint8_t last; /* number of registers*/
- uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
- uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
- phw_tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-typedef struct _phw_tonga_mc_reg_table phw_tonga_mc_reg_table;
-
-#define DISABLE_MC_LOADMICROCODE 1
-#define DISABLE_MC_CFGPROGRAMMING 2
-
-/*Ultra Low Voltage parameter structure */
-struct _phw_tonga_ulv_parm{
- bool ulv_supported;
- uint32_t ch_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct tonga_performance_level ulv_power_level;
-};
-typedef struct _phw_tonga_ulv_parm phw_tonga_ulv_parm;
-
-#define TONGA_MAX_LEAKAGE_COUNT 8
-
-struct _phw_tonga_leakage_voltage {
- uint16_t count;
- uint16_t leakage_id[TONGA_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[TONGA_MAX_LEAKAGE_COUNT];
-};
-typedef struct _phw_tonga_leakage_voltage phw_tonga_leakage_voltage;
-
-struct _phw_tonga_display_timing {
- uint32_t min_clock_insr;
- uint32_t num_existing_displays;
-};
-typedef struct _phw_tonga_display_timing phw_tonga_display_timing;
-
-struct _phw_tonga_dpmlevel_enable_mask {
- uint32_t uvd_dpm_enable_mask;
- uint32_t vce_dpm_enable_mask;
- uint32_t acp_dpm_enable_mask;
- uint32_t samu_dpm_enable_mask;
- uint32_t sclk_dpm_enable_mask;
- uint32_t mclk_dpm_enable_mask;
- uint32_t pcie_dpm_enable_mask;
-};
-typedef struct _phw_tonga_dpmlevel_enable_mask phw_tonga_dpmlevel_enable_mask;
-
-struct _phw_tonga_pcie_perf_range {
- uint16_t max;
- uint16_t min;
-};
-typedef struct _phw_tonga_pcie_perf_range phw_tonga_pcie_perf_range;
-
-struct _phw_tonga_vbios_boot_state {
- uint16_t mvdd_bootup_value;
- uint16_t vddc_bootup_value;
- uint16_t vddci_bootup_value;
- uint16_t vddgfx_bootup_value;
- uint32_t sclk_bootup_value;
- uint32_t mclk_bootup_value;
- uint16_t pcie_gen_bootup_value;
- uint16_t pcie_lane_bootup_value;
-};
-typedef struct _phw_tonga_vbios_boot_state phw_tonga_vbios_boot_state;
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-/* We need to review which fields are needed. */
-/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */
-struct tonga_hwmgr {
- struct tonga_dpm_table dpm_table;
- struct tonga_dpm_table golden_dpm_table;
-
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
- uint32_t static_screen_threshold_unit;
- uint32_t static_screen_threshold;
- uint32_t voltage_control;
- uint32_t vdd_gfx_control;
-
- uint32_t vddc_vddci_delta;
- uint32_t vddc_vddgfx_delta;
-
- struct pp_interrupt_registration_info internal_high_thermal_interrupt_info;
- struct pp_interrupt_registration_info internal_low_thermal_interrupt_info;
- struct pp_interrupt_registration_info smc_to_host_interrupt_info;
- uint32_t active_auto_throttle_sources;
-
- struct pp_interrupt_registration_info external_throttle_interrupt;
- irq_handler_func_t external_throttle_callback;
- void *external_throttle_context;
-
- struct pp_interrupt_registration_info ctf_interrupt_info;
- irq_handler_func_t ctf_callback;
- void *ctf_context;
-
- phw_tonga_clock_registers clock_registers;
- phw_tonga_voltage_smio_registers voltage_smio_registers;
-
- bool is_memory_GDDR5;
- uint16_t acpi_vddc;
- bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
- uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
- uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
- uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
- uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
- uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
- phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
- phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
- phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
-
- uint32_t mvdd_control;
- uint32_t vddc_mask_low;
- uint32_t mvdd_mask_low;
- uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/
- uint16_t min_vddc_in_pp_table;
- uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */
- uint16_t min_vddci_in_pp_table;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edc_wr_enable_threshold;
- bool is_uvd_enabled;
- bool is_xdma_enabled;
- phw_tonga_vbios_boot_state vbios_boot_state;
-
- bool battery_state;
- bool is_tlu_enabled;
- bool pcie_performance_request;
-
- /* -------------- SMC SRAM Address of firmware header tables ----------------*/
- uint32_t sram_end; /* The first address after the SMC SRAM. */
- uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */
- uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */
- uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */
- uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */
- uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */
- SMU72_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */
- SMU72_Discrete_MCRegisters mc_reg_table;
- SMU72_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */
- /* -------------- Stuff originally coming from Evergreen --------------------*/
- phw_tonga_mc_reg_table tonga_mc_reg_table;
- uint32_t vdd_ci_control;
- pp_atomctrl_voltage_table vddc_voltage_table;
- pp_atomctrl_voltage_table vddci_voltage_table;
- pp_atomctrl_voltage_table vddgfx_voltage_table;
- pp_atomctrl_voltage_table mvdd_voltage_table;
-
- uint32_t mgcg_cgtt_local2;
- uint32_t mgcg_cgtt_local3;
- uint32_t gpio_debug;
- uint32_t mc_micro_code_feature;
- uint32_t highest_mclk;
- uint16_t acpi_vdd_ci;
- uint8_t mvdd_high_index;
- uint8_t mvdd_low_index;
- bool dll_defaule_on;
- bool performance_request_registered;
-
- /* ----------------- Low Power Features ---------------------*/
- phw_tonga_bacos bacos;
- phw_tonga_ulv_parm ulv;
- /* ----------------- CAC Stuff ---------------------*/
- uint32_t cac_table_start;
- bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */
- bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */
- bool cac_enabled;
- /* ----------------- DPM2 Parameters ---------------------*/
- uint32_t power_containment_features;
- bool enable_bapm_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool disable_uvd_power_tune_feature;
- phw_tonga_pt_defaults *power_tune_defaults;
- SMU72_Discrete_PmFuses power_tune_table;
- uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */
- uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */
-
- /* ----------------- Phase Shedding ---------------------*/
- bool vddc_phase_shed_control;
- /* --------------------- DI/DT --------------------------*/
- phw_tonga_display_timing display_timing;
- /* --------- ReadRegistry data for memory and engine clock margins ---- */
- uint32_t engine_clock_data;
- uint32_t memory_clock_data;
- /* -------- Thermal Temperature Setting --------------*/
- phw_tonga_dpmlevel_enable_mask dpm_level_enable_mask;
- uint32_t need_update_smu7_dpm_table;
- uint32_t sclk_dpm_key_disabled;
- uint32_t mclk_dpm_key_disabled;
- uint32_t pcie_dpm_key_disabled;
- uint32_t min_engine_clocks; /* used to store the previous dal min sclock */
- phw_tonga_pcie_perf_range pcie_gen_performance;
- phw_tonga_pcie_perf_range pcie_lane_performance;
- phw_tonga_pcie_perf_range pcie_gen_power_saving;
- phw_tonga_pcie_perf_range pcie_lane_power_saving;
- bool use_pcie_performance_levels;
- bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS]; /* percentage value from 0-100, default 50 */
- uint32_t mclk_activity_target;
- uint32_t low_sclk_interrupt_threshold;
- uint32_t last_mclk_dpm_enable_mask;
- bool uvd_enabled;
- uint32_t pcc_monitor_enabled;
-
- /* --------- Power Gating States ------------*/
- bool uvd_power_gated; /* 1: gated, 0:not gated */
- bool vce_power_gated; /* 1: gated, 0:not gated */
- bool samu_power_gated; /* 1: gated, 0:not gated */
- bool acp_power_gated; /* 1: gated, 0:not gated */
- bool pg_acp_init;
-};
-
-typedef struct tonga_hwmgr tonga_hwmgr;
-
-#define TONGA_DPM2_NEAR_TDP_DEC 10
-#define TONGA_DPM2_ABOVE_SAFE_INC 5
-#define TONGA_DPM2_BELOW_SAFE_INC 20
-
-#define TONGA_DPM2_LTA_WINDOW_SIZE 7 /* Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size is 128, then this value should be Log2(128) = 7. */
-
-#define TONGA_DPM2_LTS_TRUNCATE 0
-
-#define TONGA_DPM2_TDP_SAFE_LIMIT_PERCENT 80 /* Maximum 100 */
-
-#define TONGA_DPM2_MAXPS_PERCENT_H 90 /* Maximum 0xFF */
-#define TONGA_DPM2_MAXPS_PERCENT_M 90 /* Maximum 0xFF */
-
-#define TONGA_DPM2_PWREFFICIENCYRATIO_MARGIN 50
-
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER 0x3FFF
-#define TONGA_DPM2_SQ_RAMP_MIN_POWER 0x12
-#define TONGA_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15
-#define TONGA_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E
-#define TONGA_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF
-
-#define TONGA_VOLTAGE_CONTROL_NONE 0x0
-#define TONGA_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define TONGA_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define TONGA_VOLTAGE_CONTROL_MERGED 0x3
-
-#define TONGA_Q88_FORMAT_CONVERSION_UNIT 256 /*To convert to Q8.8 format for firmware */
-
-#define TONGA_UNUSED_GPIO_PIN 0x7F
-
-int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int tonga_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-int tonga_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
deleted file mode 100644
index 47ef1ca2d78b..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ /dev/null
@@ -1,590 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-#include "tonga_thermal.h"
-#include "tonga_hwmgr.h"
-#include "tonga_smumgr.h"
-#include "tonga_ppsmc.h"
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-/**
-* Get Fan Speed Control Parameters.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pSpeed is the address of the structure where the result is to be placed.
-* @exception Always succeeds except if we cannot zero out the output structure.
-*/
-int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info)
-{
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- fan_speed_info->supports_percent_read = true;
- fan_speed_info->supports_percent_write = true;
- fan_speed_info->min_percent = 0;
- fan_speed_info->max_percent = 100;
-
- if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
- fan_speed_info->supports_rpm_read = true;
- fan_speed_info->supports_rpm_write = true;
- fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
- fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
- } else {
- fan_speed_info->min_rpm = 0;
- fan_speed_info->max_rpm = 0;
- }
-
- return 0;
-}
-
-/**
-* Get Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pSpeed is the address of the structure where the result is to be placed.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
- duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY);
-
- if (0 == duty100)
- return -EINVAL;
-
-
- tmp64 = (uint64_t)duty * 100;
- do_div(tmp64, duty100);
- *speed = (uint32_t)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-/**
-* Get Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the address of the structure where the result is to be placed.
-* @exception Returns not supported if no fan is found or if pulses per revolution are not set
-*/
-int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
-{
- return 0;
-}
-
-/**
-* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
-* @param hwmgr the address of the powerplay hardware manager.
-* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
-
- if (hwmgr->fan_ctrl_is_in_default_mode) {
- hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE);
- hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN);
- hwmgr->fan_ctrl_is_in_default_mode = false;
- }
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode);
-
- return 0;
-}
-
-/**
-* Reset Fan Speed Control to default mode.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Should always succeed.
-*/
-int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->fan_ctrl_is_in_default_mode) {
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin);
- hwmgr->fan_ctrl_is_in_default_mode = true;
- }
-
- return 0;
-}
-
-int tonga_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
-/*
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM))
- hwmgr->set_max_fan_rpm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM);
- else
- hwmgr->set_max_fan_pwm_output(hwmgr, hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM);
-*/
- } else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL;
- }
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
- if (result == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature != 0)
- result = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanTemperatureTarget, \
- hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature) ? 0 : -EINVAL);
-*/
- return result;
-}
-
-
-int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
-{
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL;
-}
-
-/**
-* Set Fan Speed in percent.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (0% - 100%) to be set.
-* @exception Fails is the 100% setting appears to be 0.
-*/
-int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- uint32_t duty100;
- uint32_t duty;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return -EINVAL;
-
- if (speed > 100)
- speed = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
- tonga_fan_ctrl_stop_smc_fan_control(hwmgr);
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100)
- return -EINVAL;
-
- tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
- duty = (uint32_t)tmp64;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
-
- return tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
-}
-
-/**
-* Reset Fan Speed to default.
-* @param hwmgr the address of the powerplay hardware manager.
-* @exception Always succeeds.
-*/
-int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
- result = tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- if (0 == result)
- result = tonga_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = tonga_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set Fan Speed in RPM.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param speed is the percentage value (min - max) to be set.
-* @exception Fails is the speed not lie between min and max.
-*/
-int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
-{
- return 0;
-}
-
-/**
-* Reads the remote temperature from the SIslands thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr)
-{
- int temp;
-
- temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP);
-
-/* Bit 9 means the reading is lower than the lowest usable value. */
- if (0 != (0x200 & temp))
- temp = TONGA_THERMAL_MAXIMUM_TEMP_READING;
- else
- temp = (temp & 0x1ff);
-
- temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- return temp;
-}
-
-/**
-* Set the requested temperature range for high and low alert signals
-*
-* @param hwmgr The address of the hardware manager.
-* @param range Temperature range to be programmed for high and low alert signals
-* @exception PP_Result_BadInput if the input data is not valid.
-*/
-static int tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp)
-{
- uint32_t low = TONGA_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t high = TONGA_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
-
- if (low < low_temp)
- low = low_temp;
- if (high > high_temp)
- high = high_temp;
-
- if (low > high)
- return -EINVAL;
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
-
- return 0;
-}
-
-/**
-* Programs thermal controller one-time setting registers
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_TACH_CTRL, EDGE_PER_REV,
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1);
-
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
-
- return 0;
-}
-
-/**
-* Enable thermal alerts on the RV770 thermal controller.
-*
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
- alert &= ~(TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to enable internal thermal interrupts */
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1;
-}
-
-/**
-* Disable thermal alerts on the RV770 thermal controller.
-* @param hwmgr The address of the hardware manager.
-*/
-static int tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr)
-{
- uint32_t alert;
-
- alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK);
- alert |= (TONGA_THERMAL_HIGH_ALERT_MASK | TONGA_THERMAL_LOW_ALERT_MASK);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert);
-
- /* send message to SMU to disable internal thermal interrupts */
- return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1;
-}
-
-/**
-* Uninitialize the thermal controller.
-* Currently just disables alerts.
-* @param hwmgr The address of the hardware manager.
-*/
-int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- int result = tonga_thermal_disable_alert(hwmgr);
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- tonga_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
- return 0;
-
- if (0 == data->fan_table_start) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = tonga_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- fan_table.FanControl_GL_Flag = 1;
-
- res = tonga_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end);
-/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command.
- if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0)
- res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \
- hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1);
-
- if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0)
- res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \
- hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1);
-
- if (0 != res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
-*/
- return 0;
-}
-
-/**
-* Start the fan control on the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
-/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table.
- * Make sure that we still think controlling the fan is OK.
-*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) {
- tonga_fan_ctrl_start_smc_fan_control(hwmgr);
- tonga_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
- }
-
- return 0;
-}
-
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
-
- if (range == NULL)
- return -EINVAL;
-
- return tonga_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result)
-{
- return tonga_thermal_disable_alert(hwmgr);
-}
-
-static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
- { NULL, tf_tonga_thermal_initialize },
- { NULL, tf_tonga_thermal_set_temperature_range },
- { NULL, tf_tonga_thermal_enable_alert },
-/* We should restrict performance levels to low before we halt the SMC.
- * On the other hand we are still in boot state when we do this so it would be pointless.
- * If this assumption changes we have to revisit this table.
- */
- { NULL, tf_tonga_thermal_setup_fan_table},
- { NULL, tf_tonga_thermal_start_smc_fan_control},
- { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- tonga_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
- { NULL, tf_tonga_thermal_disable_alert},
- { NULL, tf_tonga_thermal_set_temperature_range},
- { NULL, tf_tonga_thermal_enable_alert},
- { NULL, NULL }
-};
-
-static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- tonga_thermal_set_temperature_range_master_list
-};
-
-int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
-{
- if (!hwmgr->thermal_controller.fanInfo.bNoFan)
- tonga_fan_ctrl_set_default_mode(hwmgr);
- return 0;
-}
-
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr, &tonga_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range));
-
- if (0 == result) {
- result = phm_construct_table(hwmgr,
- &tonga_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (0 != result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (0 == result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
deleted file mode 100644
index aa335f267e25..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef TONGA_THERMAL_H
-#define TONGA_THERMAL_H
-
-#include "hwmgr.h"
-
-#define TONGA_THERMAL_HIGH_ALERT_MASK 0x1
-#define TONGA_THERMAL_LOW_ALERT_MASK 0x2
-
-#define TONGA_THERMAL_MINIMUM_TEMP_READING -256
-#define TONGA_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define TONGA_THERMAL_MINIMUM_ALERT_TEMP 0
-#define TONGA_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_tonga_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_tonga_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int tonga_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int tonga_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_tonga_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int tonga_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int tonga_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int tonga_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int tonga_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index b764c8c05ec8..3fb5e57a378b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -29,6 +29,19 @@
#include "amd_shared.h"
#include "cgs_common.h"
+enum amd_pp_sensors {
+ AMDGPU_PP_SENSOR_GFX_SCLK = 0,
+ AMDGPU_PP_SENSOR_VDDNB,
+ AMDGPU_PP_SENSOR_VDDGFX,
+ AMDGPU_PP_SENSOR_UVD_VCLK,
+ AMDGPU_PP_SENSOR_UVD_DCLK,
+ AMDGPU_PP_SENSOR_VCE_ECCLK,
+ AMDGPU_PP_SENSOR_GPU_LOAD,
+ AMDGPU_PP_SENSOR_GFX_MCLK,
+ AMDGPU_PP_SENSOR_GPU_TEMP,
+ AMDGPU_PP_SENSOR_VCE_POWER,
+ AMDGPU_PP_SENSOR_UVD_POWER,
+};
enum amd_pp_event {
AMD_PP_EVENT_INITIALIZE = 0,
@@ -131,9 +144,8 @@ struct amd_pp_init {
struct cgs_device *device;
uint32_t chip_family;
uint32_t chip_id;
- uint32_t rev_id;
- bool powercontainment_enabled;
};
+
enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_None = 0,
AMD_PP_DisplayConfigType_DP54 ,
@@ -261,6 +273,7 @@ enum amd_pp_clock_type {
struct amd_pp_clocks {
uint32_t count;
uint32_t clock[MAX_NUM_CLOCKS];
+ uint32_t latency[MAX_NUM_CLOCKS];
};
@@ -332,8 +345,6 @@ struct amd_powerplay_funcs {
int (*powergate_uvd)(void *handle, bool gate);
int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
void *input, void *output);
- void (*print_current_performance_level)(void *handle,
- struct seq_file *m);
int (*set_fan_control_mode)(void *handle, uint32_t mode);
int (*get_fan_control_mode)(void *handle);
int (*set_fan_speed_percent)(void *handle, uint32_t percent);
@@ -347,6 +358,7 @@ struct amd_powerplay_funcs {
int (*set_sclk_od)(void *handle, uint32_t value);
int (*get_mclk_od)(void *handle);
int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*read_sensor)(void *handle, int idx, int32_t *value);
};
struct amd_powerplay {
@@ -378,4 +390,6 @@ int amd_powerplay_get_clock_by_type(void *handle,
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *output);
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
+
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 962cb5385951..d4495839c64c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -341,7 +341,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bf0d2accf7bf..4f0fedd1e9d3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -31,15 +31,20 @@
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
+#include "power_state.h"
struct pp_instance;
struct pp_hwmgr;
-struct pp_hw_power_state;
-struct pp_power_state;
-struct PP_VCEState;
struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
+extern int amdgpu_powercontainment;
+extern int amdgpu_sclk_deep_sleep_en;
+extern unsigned amdgpu_pp_feature_mask;
+
+#define VOLTAGE_SCALE 4
+
+uint8_t convert_to_vid(uint16_t vddc);
enum DISPLAY_GAP {
DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
@@ -49,7 +54,6 @@ enum DISPLAY_GAP {
};
typedef enum DISPLAY_GAP DISPLAY_GAP;
-
struct vi_dpm_level {
bool enabled;
uint32_t value;
@@ -71,6 +75,19 @@ enum PP_Result {
#define PCIE_PERF_REQ_GEN2 3
#define PCIE_PERF_REQ_GEN3 4
+enum PP_FEATURE_MASK {
+ PP_SCLK_DPM_MASK = 0x1,
+ PP_MCLK_DPM_MASK = 0x2,
+ PP_PCIE_DPM_MASK = 0x4,
+ PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+ PP_POWER_CONTAINMENT_MASK = 0x10,
+ PP_UVD_HANDSHAKE_MASK = 0x20,
+ PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+ PP_VBI_TIME_SUPPORT_MASK = 0x80,
+ PP_ULV_MASK = 0x100,
+ PP_ENABLE_GFX_CG_THRU_SMU = 0x200
+};
+
enum PHM_BackEnd_Magic {
PHM_Dummy_Magic = 0xAA5555AA,
PHM_RV770_Magic = 0xDCBAABCD,
@@ -294,8 +311,6 @@ struct pp_hwmgr_func {
int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
- void (*print_current_perforce_level)(struct pp_hwmgr *hwmgr,
- struct seq_file *m);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr);
int (*display_config_changed)(struct pp_hwmgr *hwmgr);
@@ -342,6 +357,7 @@ struct pp_hwmgr_func {
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value);
};
struct pp_table_func {
@@ -351,7 +367,7 @@ struct pp_table_func {
int (*pptable_get_vce_state_table_entry)(
struct pp_hwmgr *hwmgr,
unsigned long i,
- struct PP_VCEState *vce_state,
+ struct pp_vce_state *vce_state,
void **clock_info,
unsigned long *flag);
};
@@ -570,22 +586,43 @@ struct phm_microcode_version_info {
uint32_t NB;
};
+#define PP_MAX_VCE_LEVELS 6
+
+enum PP_VCE_LEVEL {
+ PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
+ PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
+ PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
+ PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
+ PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
+ PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
+};
+
+
+enum PP_TABLE_VERSION {
+ PP_TABLE_V0 = 0,
+ PP_TABLE_V1,
+ PP_TABLE_V2,
+ PP_TABLE_MAX
+};
+
/**
* The main hardware manager structure.
*/
struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
- uint32_t hw_revision;
- uint32_t sub_sys_id;
- uint32_t sub_vendor_id;
+ uint32_t pp_table_version;
void *device;
struct pp_smumgr *smumgr;
const void *soft_pp_table;
uint32_t soft_pp_table_size;
void *hardcode_pp_table;
bool need_pp_table_upload;
+
+ struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS];
+ uint32_t num_vce_state_tables;
+
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
struct phm_gfx_arbiter gfx_arbiter;
@@ -614,7 +651,6 @@ struct pp_hwmgr {
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
- bool powercontainment_enabled;
uint32_t fan_ctrl_default_mode;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
@@ -624,6 +660,7 @@ struct pp_hwmgr {
struct pp_power_state *boot_ps;
struct pp_power_state *uvd_ps;
struct amd_pp_display_configuration display_config;
+ uint32_t feature_mask;
};
@@ -637,16 +674,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
- uint32_t index, uint32_t value, uint32_t mask);
-extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port, uint32_t index);
-
-extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value);
extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
@@ -654,12 +682,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t value,
uint32_t mask);
-extern void phm_wait_for_indirect_register_unequal(
- struct pp_hwmgr *hwmgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask);
+
extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr);
@@ -673,6 +696,8 @@ extern void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, st
extern int phm_reset_single_dpm_table(void *table, uint32_t count, int max);
extern void phm_setup_pcie_table_entry(void *table, uint32_t index, uint32_t pcie_gen, uint32_t pcie_lanes);
extern int32_t phm_get_dpm_level_enable_mask_value(void *table);
+extern uint8_t phm_get_voltage_id(struct pp_atomctrl_voltage_table *voltage_table,
+ uint32_t voltage);
extern uint8_t phm_get_voltage_index(struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage);
extern uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci);
extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level);
@@ -683,6 +708,10 @@ extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
+extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t id, uint16_t *voltage);
+
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
@@ -697,44 +726,6 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
PHM_FIELD_SHIFT(reg, field))
-#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \
- phm_wait_on_register(hwmgr, index, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \
- phm_wait_for_register_unequal(hwmgr, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
- phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask)
-
-/* Operations on named registers. */
-
-#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
-#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-
/* Operations on named fields. */
#define PHM_READ_FIELD(device, reg, field) \
@@ -762,60 +753,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval))
-#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
-#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
- PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
- << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
-#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
- PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \
+#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
-/* Operations on arrays of registers & fields. */
-
-#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \
- cgs_read_register(device, mm##reg + (offset))
-
-#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \
- cgs_write_register(device, mm##reg + (offset), value)
-
-#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask)
-
-#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \
- PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field)
-
-#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \
- PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \
- reg, field, fieldvalue))
-
-#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
- (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
-
-#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \
- PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \
- (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \
- PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index f497e7d98e6d..0de443612312 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -23,8 +23,7 @@
#ifndef _POLARIS10_PWRVIRUS_H
#define _POLARIS10_PWRVIRUS_H
-#define mmSMC_IND_INDEX_11 0x01AC
-#define mmSMC_IND_DATA_11 0x01AD
+
#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
index a3f0ce4d5835..9ceaed9ac52a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h
@@ -158,7 +158,7 @@ struct pp_power_state {
/*Structure to hold a VCE state entry*/
-struct PP_VCEState {
+struct pp_vce_state {
uint32_t evclk;
uint32_t ecclk;
uint32_t sclk;
@@ -171,30 +171,28 @@ enum PP_MMProfilingState {
PP_MMProfilingState_Stopped
};
-struct PP_Clock_Engine_Request {
- unsigned long clientType;
- unsigned long ctxid;
+struct pp_clock_engine_request {
+ unsigned long client_type;
+ unsigned long ctx_id;
uint64_t context_handle;
unsigned long sclk;
- unsigned long sclkHardMin;
+ unsigned long sclk_hard_min;
unsigned long mclk;
unsigned long iclk;
unsigned long evclk;
unsigned long ecclk;
- unsigned long ecclkHardMin;
+ unsigned long ecclk_hard_min;
unsigned long vclk;
unsigned long dclk;
- unsigned long samclk;
- unsigned long acpclk;
- unsigned long sclkOverdrive;
- unsigned long mclkOverdrive;
+ unsigned long sclk_over_drive;
+ unsigned long mclk_over_drive;
unsigned long sclk_threshold;
unsigned long flag;
unsigned long vclk_ceiling;
unsigned long dclk_ceiling;
unsigned long num_cus;
- unsigned long pmflag;
- enum PP_MMProfilingState MMProfilingState;
+ unsigned long pm_flag;
+ enum PP_MMProfilingState mm_profiling_state;
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index d7d83b7c7f95..bfdbec10cdd5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -43,5 +43,8 @@
} while (0)
+#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \
+ (type *)((char *)&(ptr)->member + (sizeof(type) * (n)))
+
#endif /* PP_DEBUG_H */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
new file mode 100644
index 000000000000..71c9b2d28640
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h
@@ -0,0 +1,510 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_H
+#define SMU71_H
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__VARIANT__ICELAND 1
+#define SMU__DGPU_ONLY 1
+#define SMU__DYNAMIC_MCARB_SETTINGS 1
+
+enum SID_OPTION {
+ SID_OPTION_HI,
+ SID_OPTION_LO,
+ SID_OPTION_COUNT
+};
+
+typedef struct {
+ uint32_t high;
+ uint32_t low;
+} data_64_t;
+
+typedef struct {
+ data_64_t high;
+ data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU71_MAX_LEVELS_VDDC 8
+#define SMU71_MAX_LEVELS_VDDCI 4
+#define SMU71_MAX_LEVELS_MVDD 4
+#define SMU71_MAX_LEVELS_VDDNB 8
+
+#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE
+#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
+#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS
+#define SMU71_MAX_ENTRIES_SMIO 32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+#if defined SMU__DGPU_ONLY
+#define SMU71_DTE_ITERATIONS 5
+#define SMU71_DTE_SOURCES 3
+#define SMU71_DTE_SINKS 1
+#define SMU71_NUM_CPU_TES 0
+#define SMU71_NUM_GPU_TES 1
+#define SMU71_NUM_NON_TES 2
+
+#endif
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+#endif
+
+struct SMU71_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUpperLim;
+ int32_t LFWindupLowerLim;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU71_PIDController SMU71_PIDController;
+
+struct SMU7_LocalDpmScoreboard
+{
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfSclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t GfxClkSlow;
+ uint8_t GpioClampMode;
+
+ uint8_t FpsFilterWeight;
+ uint8_t EnabledLevelsChange;
+ uint8_t DteClampMode;
+ uint8_t FpsClampMode;
+
+ uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint8_t FpsEnabled;
+ uint8_t MaxPerfLevel;
+ uint8_t AllowLowClkInterruptToHost;
+ uint8_t FpsRunning;
+
+ uint32_t MaxAllowedFrequency;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+struct SMU7_VoltageScoreboard
+{
+ uint16_t CurrentVoltage;
+ uint16_t HighestVoltage;
+ uint16_t MaxVid;
+ uint8_t HighestVidOffset;
+ uint8_t CurrentVidOffset;
+#if defined (SMU__DGPU_ONLY)
+ uint8_t CurrentPhases;
+ uint8_t HighestPhases;
+#else
+ uint8_t AvsOffset;
+ uint8_t AvsOffsetApplied;
+#endif
+ uint8_t ControllerBusy;
+ uint8_t CurrentVid;
+ uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+#if defined (SMU__DGPU_ONLY)
+ uint8_t RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS];
+#endif
+ uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+ uint8_t TargetIndex;
+ uint8_t Delay;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint16_t CurrentStdVoltageHiSidd;
+ uint16_t CurrentStdVoltageLoSidd;
+#if defined (SMU__DGPU_ONLY)
+ uint16_t RequestedVddci;
+ uint16_t CurrentVddci;
+ uint16_t HighestVddci;
+ uint8_t CurrentVddciVid;
+ uint8_t TargetVddciIndex;
+#endif
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard
+{
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+
+ uint8_t CurrentLinkSpeed;
+ uint8_t EnabledLevelsChange;
+ uint16_t AutoDpmInterval;
+
+ uint16_t AutoDpmRange;
+ uint16_t AutoDpmCount;
+
+ uint8_t DpmMode;
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+// -------------------------------------------------------- CAC table ------------------------------------------------------
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I 7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard
+{
+ uint16_t MinVoltage;
+ uint16_t MaxVoltage;
+
+ uint32_t AvgGpuPower;
+
+ uint16_t VddcLeakagePower[SID_OPTION_COUNT];
+ uint16_t VddcSclkConstantPower[SID_OPTION_COUNT];
+ uint16_t VddcSclkDynamicPower[SID_OPTION_COUNT];
+ uint16_t VddcNonSclkDynamicPower[SID_OPTION_COUNT];
+ uint16_t VddcTotalPower[SID_OPTION_COUNT];
+ uint16_t VddcTotalCurrent[SID_OPTION_COUNT];
+ uint16_t VddcLoadVoltage[SID_OPTION_COUNT];
+ uint16_t VddcNoLoadVoltage[SID_OPTION_COUNT];
+
+ uint16_t DisplayPhyPower;
+ uint16_t PciePhyPower;
+
+ uint16_t VddciTotalPower;
+ uint16_t Vddr1TotalPower;
+
+ uint32_t RocPower;
+
+ uint32_t last_power;
+ uint32_t enableWinAvg;
+
+ uint32_t lkg_acc;
+ uint16_t VoltLkgeScaler;
+ uint16_t TempLkgeScaler;
+
+ uint32_t uvd_cac_dclk;
+ uint32_t uvd_cac_vclk;
+ uint32_t vce_cac_eclk;
+ uint32_t samu_cac_samclk;
+ uint32_t display_cac_dispclk;
+ uint32_t acp_cac_aclk;
+ uint32_t unb_cac;
+
+ uint32_t WinTime;
+
+ uint16_t GpuPwr_MAWt;
+ uint16_t FilteredVddcTotalPower;
+
+ uint8_t CalculationRepeats;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+// --------------------------------------------------------------------------------------------------
+
+struct SMU7_ThermalScoreboard
+{
+ int16_t GpuLimit;
+ int16_t GpuHyst;
+ uint16_t CurrGnbTemp;
+ uint16_t FilteredGnbTemp;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+// For FeatureEnables:
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+// All 'soft registers' should be uint32_t.
+struct SMU71_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerPeriod;
+ uint32_t FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+#endif
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsActivity;
+ uint32_t AverageMemoryActivity;
+ uint32_t AverageGioActivity;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterCount;
+ uint32_t UlvTime;
+ uint32_t UcodeLoadStatus;
+ uint8_t DPMFreezeAndForced;
+ uint8_t Activity_Weight;
+ uint8_t Reserved8[2];
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_SoftRegisters SMU71_SoftRegisters;
+
+struct SMU71_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t UvdDpmTable;
+ uint32_t AcpDpmTable;
+ uint32_t VceDpmTable;
+ uint32_t SamuDpmTable;
+ uint32_t UlvSettings;
+ uint32_t Reserved[37];
+ uint32_t Signature;
+};
+
+typedef struct SMU71_Firmware_Header SMU71_Firmware_Header;
+
+struct SMU7_HystController_Data
+{
+ uint8_t waterfall_up;
+ uint8_t waterfall_down;
+ uint8_t pstate;
+ uint8_t clamp_mode;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+//#define SX_BLOCK_COUNT 8
+//#define MC_BLOCK_COUNT 1
+//#define CPL_BLOCK_COUNT 27
+
+#if defined SMU__VARIANT__ICELAND
+ #define SX_BLOCK_COUNT 8
+ #define MC_BLOCK_COUNT 1
+ #define CPL_BLOCK_COUNT 29
+#endif
+
+struct SMU7_Local_Cac {
+ uint8_t BlockId;
+ uint8_t SignalId;
+ uint8_t Threshold;
+ uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+ SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT];
+ SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+ SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
new file mode 100644
index 000000000000..c0e3936d5c2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU71_DISCRETE_H
+#define SMU71_DISCRETE_H
+
+#include "smu71.h"
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(push, 1)
+#endif
+
+#define VDDC_ON_SVI2 0x1
+#define VDDCI_ON_SVI2 0x2
+#define MVDD_ON_SVI2 0x4
+
+struct SMU71_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel;
+
+struct SMU71_Discrete_GraphicsLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t pcieDpmLevel;
+ uint8_t DeepSleepDivId;
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t PowerThrottle;
+};
+
+typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel;
+
+struct SMU71_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel;
+
+struct SMU71_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv;
+
+struct SMU71_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel;
+
+struct SMU71_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3
+ uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16
+ uint8_t EnabledForActivity;
+ uint8_t SPC;
+ uint32_t DownThreshold;
+ uint32_t UpThreshold;
+ uint32_t Reserved;
+};
+
+typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel;
+
+
+#ifdef SMU__DYNAMIC_MCARB_SETTINGS
+// MC ARB DRAM Timing registers.
+struct SMU71_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU71_Discrete_MCArbDramTimingTable
+{
+ SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable;
+#endif
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU71_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU71_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel;
+
+// Everything that we need to keep track of about the current state.
+// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters
+// that need to be checked later.
+// We don't need to cache everything about a state, just a few parameters.
+struct SMU71_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo;
+
+
+struct SMU71_Discrete_DpmTable
+{
+ // Multi-DPM controller settings
+ SMU71_PIDController GraphicsPIDController;
+ SMU71_PIDController MemoryPIDController;
+ SMU71_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+ // SMIO masks for voltage and phase controls
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU71_Discrete_VoltageLevel VddcLevel [SMU71_MAX_LEVELS_VDDC];
+ SMU71_Discrete_VoltageLevel VddciLevel [SMU71_MAX_LEVELS_VDDCI];
+ SMU71_Discrete_VoltageLevel MvddLevel [SMU71_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t MasterDeepSleepControl;
+
+ uint32_t Reserved[5];
+
+ // State table entries for each DPM state
+ SMU71_Discrete_GraphicsLevel GraphicsLevel [SMU71_MAX_LEVELS_GRAPHICS];
+ SMU71_Discrete_MemoryLevel MemoryACPILevel;
+ SMU71_Discrete_MemoryLevel MemoryLevel [SMU71_MAX_LEVELS_MEMORY];
+ SMU71_Discrete_LinkLevel LinkLevel [SMU71_MAX_LEVELS_LINK];
+ SMU71_Discrete_ACPILevel ACPILevel;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU71_MAX_ENTRIES_SMIO];
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint8_t MergedVddci;
+ uint8_t padding2;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint32_t DisplayCac;
+
+ uint16_t MaxPwr;
+ uint16_t NomPwr;
+
+ uint16_t FpsHighThreshold;
+ uint16_t FpsLowThreshold;
+
+ uint16_t BAPMTI_R [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptThreshold;
+ uint32_t VddGfxReChkWait;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+};
+
+typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable;
+
+// --------------------------------------------------- AC Timing Parameters ------------------------------------------------
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY
+
+struct SMU71_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress;
+
+struct SMU71_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet;
+
+struct SMU71_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU71_Discrete_MCRegisterAddress address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU71_Discrete_MCRegisterSet data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters;
+
+
+// --------------------------------------------------- Fan Table -----------------------------------------------------------
+struct SMU71_Discrete_FanTable
+{
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+struct SMU71_MclkDpmScoreboard
+{
+
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfMclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t MclkSwitchInProgress;
+ uint8_t MclkSwitchCritical;
+
+ uint8_t TargetMclkIndex;
+ uint8_t TargetMvddIndex;
+ uint8_t MclkSwitchResult;
+
+ uint8_t EnabledLevelsChange;
+
+ uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY];
+ uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint16_t MclkSwitchingTime;
+ uint8_t padding[2];
+};
+
+typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard;
+
+struct SMU71_UlvScoreboard
+{
+ uint8_t EnterUlv;
+ uint8_t ExitUlv;
+ uint8_t UlvActive;
+ uint8_t WaitingForUlv;
+ uint8_t UlvEnable;
+ uint8_t UlvRunning;
+ uint8_t UlvMasterEnable;
+ uint8_t padding;
+ uint32_t UlvAbortedCount;
+ uint32_t UlvTimeStamp;
+};
+
+typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard;
+
+struct SMU71_VddGfxScoreboard
+{
+ uint8_t VddGfxEnable;
+ uint8_t VddGfxActive;
+ uint8_t padding[2];
+
+ uint32_t VddGfxEnteredCount;
+ uint32_t VddGfxAbortedCount;
+};
+
+typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard;
+
+struct SMU71_AcpiScoreboard {
+ uint32_t SavedInterruptMask[2];
+ uint8_t LastACPIRequest;
+ uint8_t CgBifResp;
+ uint8_t RequestType;
+ uint8_t Padding;
+ SMU71_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard;
+
+
+struct SMU71_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw12
+ uint8_t LPMLTemperatureScaler[16];
+
+ // dw13-dw14
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t Reserved6;
+
+ // dw15
+ uint8_t GnbLPML[16];
+
+ // dw15
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw16
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses;
+
+struct SMU71_Discrete_Log_Header_Table {
+ uint32_t version;
+ uint32_t asic_id;
+ uint16_t flags;
+ uint16_t entry_size;
+ uint32_t total_size;
+ uint32_t num_of_entries;
+ uint8_t type;
+ uint8_t mode;
+ uint8_t filler_0[2];
+ uint32_t filler_1[2];
+};
+
+typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table;
+
+struct SMU71_Discrete_Log_Cntl {
+ uint8_t Enabled;
+ uint8_t Type;
+ uint8_t padding[2];
+ uint32_t BufferSize;
+ uint32_t SamplesLogged;
+ uint32_t SampleSize;
+ uint32_t AddrL;
+ uint32_t AddrH;
+};
+
+typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+ #define CAC_ACC_NW_NUM_OF_SIGNALS 83
+#endif
+
+
+struct SMU71_Discrete_Cac_Collection_Table {
+ uint32_t temperature;
+ uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+ uint32_t filler[4];
+};
+
+typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table;
+
+struct SMU71_Discrete_Cac_Verification_Table {
+ uint32_t VddcTotalPower;
+ uint32_t VddcLeakagePower;
+ uint32_t VddcConstantPower;
+ uint32_t VddcGfxDynamicPower;
+ uint32_t VddcUvdDynamicPower;
+ uint32_t VddcVceDynamicPower;
+ uint32_t VddcAcpDynamicPower;
+ uint32_t VddcPcieDynamicPower;
+ uint32_t VddcDceDynamicPower;
+ uint32_t VddcCurrent;
+ uint32_t VddcVoltage;
+ uint32_t VddciTotalPower;
+ uint32_t VddciLeakagePower;
+ uint32_t VddciConstantPower;
+ uint32_t VddciDynamicPower;
+ uint32_t Vddr1TotalPower;
+ uint32_t Vddr1LeakagePower;
+ uint32_t Vddr1ConstantPower;
+ uint32_t Vddr1DynamicPower;
+ uint32_t spare[8];
+ uint32_t temperature;
+};
+
+typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table;
+
+#if !defined(SMC_MICROCODE)
+#pragma pack(pop)
+#endif
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
new file mode 100644
index 000000000000..65eb630bfea3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _PP_COMMON_H
+#define _PP_COMMON_H
+
+#include "smu7_ppsmc.h"
+#include "cgs_common.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+#include "smu74.h"
+#include "smu74_discrete.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
new file mode 100644
index 000000000000..bce00096d80d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef DGPU_VI_PP_SMC_H
+#define DGPU_VI_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
+
+#define PPSMC_SWSTATE_FLAG_DC 0x01
+#define PPSMC_SWSTATE_FLAG_UVD 0x02
+#define PPSMC_SWSTATE_FLAG_VCE 0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
+#define PPSMC_SYSTEMFLAG_GDDR5 0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
+#define PPSMC_DPM2FLAGS_OCP 0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW 0
+#define PPSMC_DISPLAY_WATERMARK_HIGH 1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
+#define PPSMC_STATEFLAG_POWERBOOST 0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT 0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK ((uint16_t)0x01)
+#define PPSMC_Result_NoMore ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow ((uint16_t)0x03)
+#define PPSMC_Result_Failed ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt ((uint16_t)0x10)
+#define PPSMC_MSG_Resume ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
+#define PPSMC_StartFanControl ((uint16_t)0x5B)
+#define PPSMC_StopFanControl ((uint16_t)0x5C)
+#define PPSMC_NoDisplay ((uint16_t)0x5D)
+#define PPSMC_HasDisplay ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
+#define PPSMC_OCPActive ((uint16_t)0x6C)
+#define PPSMC_OCPInactive ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+
+#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
+#define PPSMC_EVENT_STATUS_DC 0x00000004
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 3c235f0177cd..2139072065cc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -28,6 +28,7 @@
struct pp_smumgr;
struct pp_instance;
+struct pp_hwmgr;
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
@@ -53,6 +54,45 @@ enum AVFS_BTC_STATUS {
AVFS_BTC_SMUMSG_ERROR
};
+enum SMU_TABLE {
+ SMU_UVD_TABLE = 0,
+ SMU_VCE_TABLE,
+ SMU_SAMU_TABLE,
+ SMU_BIF_TABLE,
+};
+
+enum SMU_TYPE {
+ SMU_SoftRegisters = 0,
+ SMU_Discrete_DpmTable,
+};
+
+enum SMU_MEMBER {
+ HandshakeDisables = 0,
+ VoltageChangeTimeout,
+ AverageGraphicsActivity,
+ PreVBlankGap,
+ VBlankTimeout,
+ UcodeLoadStatus,
+ UvdBootLevel,
+ VceBootLevel,
+ SamuBootLevel,
+ LowSclkInterruptThreshold,
+};
+
+
+enum SMU_MAC_DEFINITION {
+ SMU_MAX_LEVELS_GRAPHICS = 0,
+ SMU_MAX_LEVELS_MEMORY,
+ SMU_MAX_LEVELS_LINK,
+ SMU_MAX_ENTRIES_SMIO,
+ SMU_MAX_LEVELS_VDDC,
+ SMU_MAX_LEVELS_VDDGFX,
+ SMU_MAX_LEVELS_VDDCI,
+ SMU_MAX_LEVELS_MVDD,
+ SMU_UVD_MCLK_HANDSHAKE_DISABLE,
+};
+
+
struct pp_smumgr_func {
int (*smu_init)(struct pp_smumgr *smumgr);
int (*smu_fini)(struct pp_smumgr *smumgr);
@@ -69,12 +109,23 @@ struct pp_smumgr_func {
int (*download_pptable_settings)(struct pp_smumgr *smumgr,
void **table);
int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
+ int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+ int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+ int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+ int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+ int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+ int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+ int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+ uint32_t (*get_mac_definition)(uint32_t value);
+ bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
};
struct pp_smumgr {
uint32_t chip_family;
uint32_t chip_id;
- uint32_t hw_revision;
void *device;
void *backend;
uint32_t usec_timeout;
@@ -122,6 +173,30 @@ extern int smu_allocate_memory(void *device, uint32_t size,
extern int smu_free_memory(void *device, void *handle);
+extern int cz_smum_init(struct pp_smumgr *smumgr);
+extern int iceland_smum_init(struct pp_smumgr *smumgr);
+extern int tonga_smum_init(struct pp_smumgr *smumgr);
+extern int fiji_smum_init(struct pp_smumgr *smumgr);
+extern int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+
+extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result);
+extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+ uint32_t type, uint32_t member);
+extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+
+extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
+
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index f10fb64ef981..51ff08301651 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,9 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
+ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
+ smu7_smumgr.o iceland_smc.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 87c023e518ab..5a44485526d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -89,13 +89,8 @@ static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
if (result != 0)
return result;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
-
- if (result != 0)
- return result;
-
- return 0;
}
static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
@@ -106,12 +101,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
if (0 != (3 & smc_address)) {
printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n");
- return -1;
+ return -EINVAL;
}
if (limit <= (smc_address + 3)) {
printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n");
- return -1;
+ return -EINVAL;
}
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
@@ -129,9 +124,10 @@ static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
return -EINVAL;
result = cz_set_smc_sram_address(smumgr, smc_address, limit);
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ if (!result)
+ cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
- return 0;
+ return result;
}
static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
@@ -148,7 +144,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
{
struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
- int result = 0;
uint32_t smc_address;
if (!smumgr->reload_fw) {
@@ -177,11 +172,9 @@ static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- result = cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
-
- return result;
}
static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
@@ -195,9 +188,6 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
if (smumgr == NULL || smumgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
- mmSMU_MP1_SRBM2P_ARG_0);
-
cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
for (i = 0; i < smumgr->usec_timeout; i++) {
@@ -275,7 +265,10 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
if (smumgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(smumgr);
+ if (ret)
+ printk(KERN_ERR "[ powerplay] SMU firmware load failed\n");
+
cz_check_fw_load_finish(smumgr, fw_to_check);
ret = cz_load_mec_firmware(smumgr);
@@ -566,10 +559,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- else
+ if (smumgr->chip_id != CHIP_STONEY)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
cz_smu_populate_single_ucode_load_task(smumgr,
@@ -580,10 +570,7 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
- CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- else
+ if (smumgr->chip_id != CHIP_STONEY)
cz_smu_populate_single_ucode_load_task(smumgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
cz_smu_populate_single_ucode_load_task(smumgr,
@@ -610,19 +597,12 @@ static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
cz_smu->toc_entry_used_count = 0;
-
cz_smu_initialize_toc_empty_job_list(smumgr);
-
cz_smu_construct_toc_for_rlc_aram_save(smumgr);
-
cz_smu_construct_toc_for_vddgfx_enter(smumgr);
-
cz_smu_construct_toc_for_vddgfx_exit(smumgr);
-
cz_smu_construct_toc_for_power_profiling(smumgr);
-
cz_smu_construct_toc_for_bootup(smumgr);
-
cz_smu_construct_toc_for_clock_table(smumgr);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
new file mode 100644
index 000000000000..76310ac7ef0d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -0,0 +1,2374 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "fiji_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "fiji_smumgr.h"
+#include "pppcielanes.h"
+#include "smu7_ppsmc.h"
+#include "smu73.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "smu7_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define VDDC_VDDCI_DELTA 300
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+ {1, 0xF, 0xFD,
+ /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+ 0x19, 5, 45}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ * This is the unit expected by SMC firmware
+ */
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ *voltage = *mvdd = 0;
+
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+ switch (line) {
+ case SMU7_I2CLineID_DDC1:
+ *scl = SMU7_I2C_DDC1CLK;
+ *sda = SMU7_I2C_DDC1DATA;
+ break;
+ case SMU7_I2CLineID_DDC2:
+ *scl = SMU7_I2C_DDC2CLK;
+ *sda = SMU7_I2C_DDC2DATA;
+ break;
+ case SMU7_I2CLineID_DDC3:
+ *scl = SMU7_I2C_DDC3CLK;
+ *sda = SMU7_I2C_DDC3DATA;
+ break;
+ case SMU7_I2CLineID_DDC4:
+ *scl = SMU7_I2C_DDC4CLK;
+ *sda = SMU7_I2C_DDC4DATA;
+ break;
+ case SMU7_I2CLineID_DDC5:
+ *scl = SMU7_I2C_DDC5CLK;
+ *sda = SMU7_I2C_DDC5DATA;
+ break;
+ case SMU7_I2CLineID_DDC6:
+ *scl = SMU7_I2C_DDC6CLK;
+ *sda = SMU7_I2C_DDC6DATA;
+ break;
+ case SMU7_I2CLineID_SCLSDA:
+ *scl = SMU7_I2C_SCL;
+ *sda = SMU7_I2C_SDA;
+ break;
+ case SMU7_I2CLineID_DDCVGA:
+ *scl = SMU7_I2C_DDCVGACLK;
+ *sda = SMU7_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &fiji_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ uint8_t uc_scl, uc_sda;
+
+ /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+ * as requested by SMC team
+ */
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+ /* The following are for new Fiji Multi-input fan/thermal control */
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+ dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+ dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrVddc * 256);
+ dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+ dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitPlx * 256);
+
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+ dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainLiquid));
+ dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+ dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+ dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainPlx));
+ dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+ dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+ dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+ dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+ dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+ get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Liquid_I2C_LineSCL = uc_scl;
+ dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Vr_I2C_LineSCL = uc_scl;
+ dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Plx_I2C_LineSCL = uc_scl;
+ dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+ return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ 0 == hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ uint32_t pm_fuse_table_offset;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (fiji_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (fiji_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != fiji_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (fiji_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (fiji_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW19 */
+ if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ /* DW20 */
+ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = fiji_populate_cac_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_Ulv *state)
+{
+ int result = 0;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+ }
+ return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ssInfo;
+
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ssInfo)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ *
+ * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+ */
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ssInfo.speed_spectrum_rate);
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU73_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t threshold, mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ (uint32_t *)(&level->MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+
+ level->SclkFrequency = clock;
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ threshold = clock * data->fast_watermark_threshold / 100;
+
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+ return 0;
+}
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = fiji_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now.*/
+ levels[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz, 800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+ if (mem_clock <= 10000)
+ return 0x0;
+ if (mem_clock <= 15000)
+ return 0x1;
+ if (mem_clock <= 20000)
+ return 0x2;
+ if (mem_clock <= 25000)
+ return 0x3;
+ if (mem_clock <= 30000)
+ return 0x4;
+ if (mem_clock <= 35000)
+ return 0x5;
+ if (mem_clock <= 40000)
+ return 0x6;
+ if (mem_clock <= 45000)
+ return 0x7;
+ if (mem_clock <= 50000)
+ return 0x8;
+ if (mem_clock <= 55000)
+ return 0x9;
+ if (mem_clock <= 60000)
+ return 0xa;
+ if (mem_clock <= 65000)
+ return 0xb;
+ if (mem_clock <= 70000)
+ return 0xc;
+ if (mem_clock <= 75000)
+ return 0xd;
+ if (mem_clock <= 80000)
+ return 0xe;
+ /* mem_clock > 800MHz */
+ return 0xf;
+}
+
+/**
+* Populates the SMC MCLK structure using the provided memory clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the memory clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+ struct pp_atomctrl_memory_clock_param mem_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to get Memory PLL Dividers.",
+ );
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = clock;
+ mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
+ mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
+
+ return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ /* enable stutter mode if all the follow condition applied
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+ data->display_timing.num_existing_displays = 1;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+ SMU73_MAX_LEVELS_MEMORY;
+ struct SMU73_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = fiji_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now. */
+ levels[0].EnabledForActivity = 1;
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high */
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param hwmgr the address of the hardware manager
+* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
+* @param voltage the SMC VOLTAGE structure to be populated
+*/
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (!data->sclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ table->ACPILevel.SclkFrequency =
+ data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ table->ACPILevel.SclkFrequency,
+ (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value " \
+ "in Clock Dependency Table",
+ );
+ } else {
+ table->ACPILevel.SclkFrequency =
+ data->vbios_boot_state.sclk_bootup_value;
+ table->ACPILevel.MinVoltage =
+ data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+ }
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ if (!data->mclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+ );
+ } else {
+ table->MemoryACPILevel.MclkFrequency =
+ data->vbios_boot_state.mclk_bootup_value;
+ table->MemoryACPILevel.MinVoltage =
+ data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+ }
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!fiji_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->VceLevel[count].MinVoltage |=
+ ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t)(mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burstTime;
+ ULONG state, trrds, trrdl;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+ state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+ trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+ trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+ arb_regs->TRRDS = (uint8_t)trrds;
+ arb_regs->TRRDL = (uint8_t)trrdl;
+
+ return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = fiji_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ break;
+ }
+ }
+
+ if (!result)
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU73_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+ }
+ return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (fiji_clock_stretch_amount_conversion
+ [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >=
+ (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq <
+ (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data (PowerState)
+* @return always 0
+*/
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+ fiji_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ fiji_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = fiji_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = fiji_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = fiji_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = fiji_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = fiji_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = fiji_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = fiji_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = fiji_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = fiji_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = fiji_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = fiji_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = fiji_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = fiji_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = fiji_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = fiji_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = fiji_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return fiji_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ result = fiji_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+ return result;
+}
+
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t fiji_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU73_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU73_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU73_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU73_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU73_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU73_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU73_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU73_MAX_LEVELS_MVDD;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ fiji_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ fiji_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+ /* Program additional LP registers
+ * that are no longer programmed by VBIOS
+ */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+ return 0;
+}
+
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
new file mode 100644
index 000000000000..d30d150f9ca6
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef FIJI_SMC_H
+#define FIJI_SMC_H
+
+#include "smumgr.h"
+#include "smu73.h"
+
+struct fiji_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+};
+
+int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
+int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
+uint32_t fiji_get_mac_definition(uint32_t value);
+int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
+int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 8e52a2e82db5..02fe1df855a9 100644..100755
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -38,6 +38,7 @@
#include "bif/bif_5_0_sh_mask.h"
#include "pp_debug.h"
#include "fiji_pwrvirus.h"
+#include "fiji_smc.h"
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
@@ -57,509 +58,6 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static enum cgs_ucode_id fiji_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-*/
-static int fiji_set_smc_sram_address(struct pp_smumgr *smumgr,
- uint32_t smc_addr, uint32_t limit)
-{
- PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)),
- "SMC address must be 4 byte aligned.", return -EINVAL;);
- PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)),
- "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byteCount the number of bytes to copy.
-*/
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit)
-{
- int result;
- uint32_t data, originalData;
- uint32_t addr, extraShift;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
- "SMC address must be 4 byte aligned.", return -EINVAL;);
- PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
- "SMC address is beyond the SMC RAM area.", return -EINVAL;);
-
- addr = smcStartAddress;
-
- while (byteCount >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
- src += 4;
- byteCount -= 4;
- addr += 4;
- }
-
- if (byteCount) {
- /* Now write the odd bytes left.
- * Do a read modify write cycle.
- */
- data = 0;
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- return result;
-
- originalData = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
- extraShift = 8 * (4 - byteCount);
-
- while (byteCount > 0) {
- /* Bytes are written into the SMC addres
- * space with the MSB first.
- */
- data = (0x100 * data) + *src++;
- byteCount--;
- }
- data <<= extraShift;
- data |= (originalData & ~((~0UL) << extraShift));
-
- result = fiji_set_smc_sram_address(smumgr, addr, limit);
- if (!result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
- }
- return 0;
-}
-
-int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-bool fiji_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
- CGS_IND_REG__SMC,
- SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (!fiji_is_smc_ram_running(smumgr))
- return -1;
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-/**
- * Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
- * @param msg: the message to send.
- * @param parameter: the parameter to send
- * @return The response that came from the SMC.
- */
-int fiji_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (!fiji_is_smc_ram_running(smumgr))
- return -1;
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int fiji_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) {
- printk(KERN_ERR "Failed to send Previous Message.");
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- }
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/**
-* Uploads the SMU firmware from .hex file
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @return 0 or -1.
-*/
-
-static int fiji_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
- const uint8_t *src;
- uint32_t byte_count;
- uint32_t *data;
- struct cgs_firmware_info info = {0};
-
- cgs_get_firmware_info(smumgr->device,
- fiji_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
- if (info.image_size & 3) {
- printk(KERN_ERR "SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (info.image_size > FIJI_SMC_SIZE) {
- printk(KERN_ERR "SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
- byte_count = info.image_size;
- src = (const uint8_t *)info.kptr;
-
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- return 0;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value and output parameter for the data read from the SMC SRAM.
-*/
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t *value, uint32_t limit)
-{
- int result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
- return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t value, uint32_t limit)
-{
- int result;
-
- result = fiji_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
- return 0;
-}
-
-static uint32_t fiji_get_mask_for_firmware_type(uint32_t fw_type)
-{
- uint32_t result = 0;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT2_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- printk(KERN_ERR "UCode type is out of range!");
- result = 0;
- }
-
- return result;
-}
-
-/* Populate one firmware image to the data structure */
-static int fiji_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint32_t fw_type, struct SMU_Entry *entry)
-{
- int result;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(
- smumgr->device,
- fiji_convert_fw_type_to_cgs(fw_type),
- &info);
-
- if (!result) {
- entry->version = 0;
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = info.image_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
- }
-
- return result;
-}
-
-static int fiji_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t fw_to_load;
- struct SMU_DRAMData_TOC *toc;
-
- if (priv->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- priv->soft_regs_start +
- offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
- 0x0);
-
- toc = (struct SMU_DRAMData_TOC *)priv->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
- PP_ASSERT_WITH_CODE(
- 0 == fiji_populate_single_firmware_entry(smumgr,
- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n" , return -1 );
-
- fiji_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI,
- priv->header_buffer.mc_addr_high);
- fiji_send_msg_to_smc_with_parameter(smumgr,PPSMC_MSG_DRV_DRAM_ADDR_LO,
- priv->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK
- + UCODE_ID_CP_MEC_JT1_MASK
- + UCODE_ID_CP_MEC_JT2_MASK;
-
- if (fiji_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_LoadUcodes, fw_to_load))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
-
- return 0;
-}
-
-
-/* Check if the FW has been loaded, SMU will not return
- * if loading has not finished.
- */
-static int fiji_check_fw_load_finish(struct pp_smumgr *smumgr,
- uint32_t fw_type)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t mask = fiji_get_mask_for_firmware_type(fw_type);
-
- /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
- if (smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX,
- priv->soft_regs_start +
- offsetof(SMU73_SoftRegisters, UcodeLoadStatus),
- mask, mask)) {
- printk(KERN_ERR "check firmware loading failed\n");
- return -EINVAL;
- }
- return 0;
-}
-
-
-static int fiji_reload_firmware(struct pp_smumgr *smumgr)
-{
- return smumgr->smumgr_funcs->start_smu(smumgr);
-}
-
-static bool fiji_is_hw_virtualization_enabled(struct pp_smumgr *smumgr)
-{
- uint32_t value;
-
- value = cgs_read_register(smumgr->device, mmBIF_IOV_FUNC_IDENTIFIER);
- if (value & BIF_IOV_FUNC_IDENTIFIER__IOV_ENABLE_MASK) {
- /* driver reads on SR-IOV enabled PF: 0x80000000
- * driver reads on SR-IOV enabled VF: 0x80000001
- * driver reads on SR-IOV disabled: 0x00000000
- */
- return true;
- }
- return false;
-}
-
-static int fiji_request_smu_specific_fw_load(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
- if (fiji_is_hw_virtualization_enabled(smumgr)) {
- uint32_t masks = fiji_get_mask_for_firmware_type(fw_type);
- if (fiji_send_msg_to_smc_with_parameter_without_waiting(smumgr,
- PPSMC_MSG_LoadUcodes, masks))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
- }
- /* For non-virtualization cases,
- * SMU loads all FWs at once in fiji_request_smu_load_fw.
- */
- return 0;
-}
-
static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
{
int result = 0;
@@ -571,7 +69,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = fiji_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
@@ -610,8 +108,8 @@ static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
- SMU_STATUS, SMU_PASS)) {
+ if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
@@ -639,12 +137,12 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = fiji_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- fiji_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
/* Enable clock */
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -698,15 +196,15 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
if (priv->avfs.AvfsBtcParam) {
- if (!fiji_send_msg_to_smc_with_parameter(smumgr,
+ if (!smum_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
- if (!fiji_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
+ if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
result = 0;
} else {
printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt"
" to Enable AVFS Failed!");
- fiji_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
+ smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
result = -1;
}
} else {
@@ -736,7 +234,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
- PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
PmFuseTable), &table_start, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
@@ -748,13 +246,13 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
inversion_voltage_addr = table_start +
offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
- result = fiji_copy_bytes_to_smc(smumgr, charz_freq_addr,
+ result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
(uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
PP_ASSERT_WITH_CODE(0 == result,
"[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
"be populated.", return -1;);
- result = fiji_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
+ result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
(uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
"charz_freq could not be populated.", return -1;);
@@ -769,7 +267,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == fiji_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -784,7 +282,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -792,7 +290,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == fiji_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -839,13 +337,13 @@ int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
break;
case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
- PPSMC_MSG_VftTableIsValid),
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
+ 0x666),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to VftTableIsValid Msg",
return -1;);
priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == fiji_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
PPSMC_MSG_EnableAvfs),
"[AVFS][fiji_avfs_event_mgr] SMU did not respond "
"correctly to EnableAvfs Message Msg",
@@ -898,7 +396,7 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
/* Only start SMC if SMC RAM is not running */
- if (!fiji_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
fiji_avfs_event_mgr(smumgr, false);
/* Check if SMU is running in protected mode */
@@ -929,12 +427,12 @@ static int fiji_start_smu(struct pp_smumgr *smumgr)
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- fiji_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
- &(priv->soft_regs_start), 0x40000);
+ &(priv->smu7_data.soft_regs_start), 0x40000);
- result = fiji_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
@@ -963,28 +461,10 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
static int fiji_smu_init(struct pp_smumgr *smumgr)
{
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- uint64_t mc_addr;
-
- priv->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_allocate_memory(smumgr->device,
- priv->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &priv->header_buffer.kaddr,
- &priv->header_buffer.handle);
-
- priv->header = priv->header_buffer.kaddr;
- priv->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- priv->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != priv->header),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)priv->header_buffer.handle);
- return -1);
+ int i;
+
+ if (smu7_init(smumgr))
+ return -EINVAL;
priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
if (fiji_is_hw_avfs_present(smumgr))
@@ -999,37 +479,35 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
else
priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
- priv->acpi_optimization = 1;
+ for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
+ priv->activity_target[i] = 30;
return 0;
}
-static int fiji_smu_fini(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
-
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
static const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
- .smu_fini = &fiji_smu_fini,
+ .smu_fini = &smu7_smu_fini,
.start_smu = &fiji_start_smu,
- .check_fw_load_finish = &fiji_check_fw_load_finish,
- .request_smu_load_fw = &fiji_reload_firmware,
- .request_smu_load_specific_fw = &fiji_request_smu_specific_fw_load,
- .send_msg_to_smc = &fiji_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &fiji_send_msg_to_smc_with_parameter,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_reload_firmware,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = fiji_update_smc_table,
+ .get_offsetof = fiji_get_offsetof,
+ .process_firmware_header = fiji_process_firmware_header,
+ .init_smc_table = fiji_init_smc_table,
+ .update_sclk_threshold = fiji_update_sclk_threshold,
+ .thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+ .populate_all_graphic_levels = fiji_populate_all_graphic_levels,
+ .populate_all_memory_levels = fiji_populate_all_memory_levels,
+ .get_mac_definition = fiji_get_mac_definition,
+ .initialize_mc_reg_table = fiji_initialize_mc_reg_table,
+ .is_dpm_running = fiji_is_dpm_running,
};
int fiji_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index b4eb483215b1..adcbdfb209be 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,37 +23,31 @@
#ifndef _FIJI_SMUMANAGER_H_
#define _FIJI_SMUMANAGER_H_
+#include "smu73_discrete.h"
+#include <pp_endian.h>
+#include "smu7_smumgr.h"
+
+
struct fiji_smu_avfs {
enum AVFS_BTC_STATUS AvfsBtcStatus;
uint32_t AvfsBtcParam;
};
-struct fiji_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
-};
struct fiji_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
- uint32_t soft_regs_start;
+ struct smu7_smumgr smu7_data;
+
struct fiji_smu_avfs avfs;
- uint32_t acpi_optimization;
+ struct SMU73_Discrete_DpmTable smc_state_table;
+ struct SMU73_Discrete_Ulv ulv_setting;
+ struct SMU73_Discrete_PmFuses power_tune_table;
+ const struct fiji_pt_defaults *power_tune_defaults;
+ uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
- struct fiji_buffer_entry header_buffer;
};
-int fiji_smum_init(struct pp_smumgr *smumgr);
-int fiji_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t *value, uint32_t limit);
-int fiji_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
- uint32_t value, uint32_t limit);
-int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smcStartAddress,
- const uint8_t *src, uint32_t byteCount, uint32_t limit);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
new file mode 100644
index 000000000000..8c889caba420
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -0,0 +1,2576 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "iceland_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu71_discrete.h"
+
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "processpptables.h"
+
+#include "iceland_smumgr.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+ * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case DEVICE_ID_VI_ICELAND_M_6900:
+ case DEVICE_ID_VI_ICELAND_M_6903:
+ smu_data->power_tune_defaults = &defaults_icelandxt;
+ break;
+
+ case DEVICE_ID_VI_ICELAND_M_6901:
+ case DEVICE_ID_VI_ICELAND_M_6902:
+ smu_data->power_tune_defaults = &defaults_icelandpro;
+ break;
+ default:
+ smu_data->power_tune_defaults = &defaults_iceland;
+ pr_warning("Unknown V.I. Device ID.\n");
+ break;
+ }
+ return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 8; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ }
+ } else {
+ PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+ }
+
+ return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW0 - DW3 */
+ if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate bapm vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW4 - DW5 */
+ if (iceland_populate_vddc_vid(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (iceland_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (iceland_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != iceland_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW16 */
+ if (iceland_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW17 */
+ if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ /* DW18 */
+ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+ return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ /* SCLK/VDDC Dependency Table has to exist. */
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warning("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ /*
+ * Since voltage in the sclk/vddc dependency table is not
+ * necessarily in ascending order because of ELB voltage
+ * patching, loop through entire list to find exact voltage.
+ */
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ /*
+ * If voltage is not found in the first pass, loop again to
+ * find the best match, equal or higher value.
+ */
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (0 != result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = iceland_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU71_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_Ulv *ulv_level)
+{
+ return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+ &graphic_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for VDDC \
+ engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ graphic_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ iceland_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 100;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+ SMU71_MAX_LEVELS_GRAPHICS;
+
+ SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = iceland_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (highest_pcie_level_enabled + 1))) != 0) {
+ highest_pcie_level_enabled++;
+ }
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+ count++;
+ }
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+ }
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param memory_clock the memory clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int iceland_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 47500) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ }
+ } else {
+ if (memory_clock < 65000) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 135000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000) {
+ mc_para_index = 0;
+ } else if (memory_clock >= 80000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+ }
+
+ return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+ memory_level->MinVddci = memory_level->MinVddc;
+ } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on iceland */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = iceland_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+ SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result) {
+ return result;
+ }
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU71_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t vddc_phase_shed_control = 0;
+
+ SMU71_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ int result = 0;
+ SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = iceland_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result) {
+ break;
+ }
+ }
+ }
+
+ if (0 == result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->BootVddci = table->BootVddc;
+ else
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
+ SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+ const struct iceland_mc_reg_entry *entry,
+ SMU71_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(
+ struct pp_smumgr *smumgr,
+ const uint32_t memory_clock,
+ SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = iceland_convert_mc_reg_table_entry_to_smc(
+ hwmgr->smumgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+ result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+
+ /*
+ * TDP number of fraction bits are changed from 8 to 7 for Iceland
+ * as requested by SMC team
+ */
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+ dpm_table->DTETjOffset = 0;
+
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ /* The following are for new Iceland Multi-input fan/thermal control */
+ if (NULL != ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU71_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ tab->SVI2Enable |= VDDC_ON_SVI2;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ tab->SVI2Enable |= VDDCI_ON_SVI2;
+ else
+ tab->MergedVddci = 1;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+ tab->SVI2Enable |= MVDD_ON_SVI2;
+
+ PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+ (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+ return 0;
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pInput the pointer to input data (PowerState)
+ * @return always 0
+ */
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+
+
+ iceland_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+ iceland_populate_smc_voltage_tables(hwmgr, table);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+ if (data->ulv_supported) {
+ result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = iceland_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result;);
+
+ result = iceland_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result;);
+
+ result = iceland_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result;);
+
+ result = iceland_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result;);
+
+ result = iceland_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result;);
+
+ result = iceland_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+ result = iceland_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result;);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result;);
+
+ result = iceland_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result;);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = iceland_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result;);
+
+ result = iceland_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+
+ result = iceland_populate_smc_svi2_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate SVI2 setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.ulv_setting_starts,
+ (uint8_t *)&(smu_data->ulv_setting),
+ sizeof(SMU71_Discrete_Ulv),
+ SMC_RAM_END);
+
+
+ result = iceland_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = iceland_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (0 == smu7_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ /* fan_table.FanControl_GL_Flag = 1; */
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return iceland_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = iceland_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t iceland_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU71_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU71_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU71_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU71_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU71_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU71_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU71_MAX_LEVELS_MVDD;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->dpm_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ smu7_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->mc_reg_table_start = tmp;
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->fan_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->arb_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ hwmgr->microcode_version_info.SMC = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, UlvSettings),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->ulv_setting_starts = tmp;
+ }
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct iceland_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++) {
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ }
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
+ * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3. need to set these data for each clock range
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param table the address of MCRegTable
+ * @return always 0
+ */
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j;
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ iceland_set_s0_mc_reg_index(ni_table);
+ result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ iceland_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
index 8bc38cb17b7f..13c8dbbccaf2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
@@ -20,17 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef _ICELAND_SMC_H
+#define _ICELAND_SMC_H
-#ifndef _TONGA_CLOCK_POWER_GATING_H_
-#define _TONGA_CLOCK_POWER_GATING_H_
+#include "smumgr.h"
-#include "tonga_hwmgr.h"
-#include "pp_asicblocks.h"
-extern int tonga_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern int tonga_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int tonga_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-extern int tonga_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
-#endif /* _TONGA_CLOCK_POWER_GATING_H_ */
+int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
+uint32_t iceland_get_mac_definition(uint32_t value);
+int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
+int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
new file mode 100644
index 000000000000..eeafefc4acba
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+
+#include "smumgr.h"
+#include "iceland_smumgr.h"
+#include "pp_debug.h"
+#include "smu_ucode_xfer_vi.h"
+#include "ppsmc.h"
+#include "smu/smu_7_1_1_d.h"
+#include "smu/smu_7_1_1_sh_mask.h"
+#include "cgs_common.h"
+#include "iceland_smc.h"
+
+#define ICELAND_SMC_SIZE 0x20000
+
+static int iceland_start_smc(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ return 0;
+}
+
+static void iceland_reset_smc(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+{
+ SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 0);
+}
+
+static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+{
+ /* set smc instruct start point at 0x0 */
+ smu7_program_jump_on_start(smumgr);
+
+ /* enable smc clock */
+ iceland_start_smc_clock(smumgr);
+
+ /* de-assert reset */
+ iceland_start_smc(smumgr);
+
+ SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
+
+ return 0;
+}
+
+
+static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+ uint32_t length, const uint8_t *src,
+ uint32_t limit, uint32_t start_addr)
+{
+ uint32_t byte_count = length;
+ uint32_t data;
+
+ PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ while (byte_count >= 4) {
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ byte_count -= 4;
+ }
+
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+ return 0;
+}
+
+
+static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+{
+ uint32_t val;
+ struct cgs_firmware_info info = {0};
+
+ if (smumgr == NULL || smumgr->device == NULL)
+ return -EINVAL;
+
+ /* load SMC firmware */
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+
+ if (info.image_size & 3) {
+ pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n");
+ return -EINVAL;
+ }
+
+ if (info.image_size > ICELAND_SMC_SIZE) {
+ pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
+ return -EINVAL;
+ }
+
+ /* wait for smc boot up */
+ SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ RCU_UC_EVENTS, boot_seq_done, 0);
+
+ /* clear firmware interrupt enable flag */
+ val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ ixSMC_SYSCON_MISC_CNTL);
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ ixSMC_SYSCON_MISC_CNTL, val | 1);
+
+ /* stop smc clock */
+ iceland_stop_smc_clock(smumgr);
+
+ /* reset smc */
+ iceland_reset_smc(smumgr);
+ iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ (uint8_t *)info.kptr, ICELAND_SMC_SIZE,
+ info.ucode_start_address);
+
+ return 0;
+}
+
+static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+ uint32_t firmwareType)
+{
+ return 0;
+}
+
+static int iceland_start_smu(struct pp_smumgr *smumgr)
+{
+ int result;
+
+ result = iceland_smu_upload_firmware_image(smumgr);
+ if (result)
+ return result;
+ result = iceland_smu_start_smc(smumgr);
+ if (result)
+ return result;
+
+ if (!smu7_is_smc_ram_running(smumgr)) {
+ printk("smu not running, upload firmware again \n");
+ result = iceland_smu_upload_firmware_image(smumgr);
+ if (result)
+ return result;
+
+ result = iceland_smu_start_smc(smumgr);
+ if (result)
+ return result;
+ }
+
+ result = smu7_request_smu_load_fw(smumgr);
+
+ return result;
+}
+
+/**
+ * Write a 32bit value to the SMC SRAM space.
+ * ALL PARAMETERS ARE IN HOST BYTE ORDER.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param smcAddress the address in the SMC RAM to access.
+ * @param value to write to the SMC SRAM.
+ */
+static int iceland_smu_init(struct pp_smumgr *smumgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ if (smu7_init(smumgr))
+ return -EINVAL;
+
+ for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = 30;
+
+ return 0;
+}
+
+static const struct pp_smumgr_func iceland_smu_funcs = {
+ .smu_init = &iceland_smu_init,
+ .smu_fini = &smu7_smu_fini,
+ .start_smu = &iceland_start_smu,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_reload_firmware,
+ .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = iceland_get_offsetof,
+ .process_firmware_header = iceland_process_firmware_header,
+ .init_smc_table = iceland_init_smc_table,
+ .update_sclk_threshold = iceland_update_sclk_threshold,
+ .thermal_setup_fan_table = iceland_thermal_setup_fan_table,
+ .populate_all_graphic_levels = iceland_populate_all_graphic_levels,
+ .populate_all_memory_levels = iceland_populate_all_memory_levels,
+ .get_mac_definition = iceland_get_mac_definition,
+ .initialize_mc_reg_table = iceland_initialize_mc_reg_table,
+ .is_dpm_running = iceland_is_dpm_running,
+};
+
+int iceland_smum_init(struct pp_smumgr *smumgr)
+{
+ struct iceland_smumgr *iceland_smu = NULL;
+
+ iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL);
+
+ if (iceland_smu == NULL)
+ return -ENOMEM;
+
+ smumgr->backend = iceland_smu;
+ smumgr->smumgr_funcs = &iceland_smu_funcs;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
new file mode 100644
index 000000000000..8eae01b37c40
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Huang Rui <ray.huang@amd.com>
+ *
+ */
+
+#ifndef _ICELAND_SMUMGR_H_
+#define _ICELAND_SMUMGR_H_
+
+
+#include "smu7_smumgr.h"
+#include "pp_endian.h"
+#include "smu71_discrete.h"
+
+struct iceland_pt_defaults {
+ uint8_t svi_load_line_en;
+ uint8_t svi_load_line_vddc;
+ uint8_t tdc_vddc_throttle_release_limit_perc;
+ uint8_t tdc_mawt;
+ uint8_t tdc_waterfall_ctl;
+ uint8_t dte_ambient_temp_base;
+ uint32_t display_cac;
+ uint32_t bamp_temp_gradient;
+ uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+ uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
+};
+
+struct iceland_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_mc_reg_table {
+ uint8_t last; /* number of registers*/
+ uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
+ uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+ struct iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct iceland_smumgr {
+ struct smu7_smumgr smu7_data;
+ struct SMU71_Discrete_DpmTable smc_state_table;
+ struct SMU71_Discrete_PmFuses power_tune_table;
+ struct SMU71_Discrete_Ulv ulv_setting;
+ const struct iceland_pt_defaults *power_tune_defaults;
+ SMU71_Discrete_MCRegisters mc_regs;
+ struct iceland_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
new file mode 100644
index 000000000000..4ccc0b72324d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -0,0 +1,2287 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "polaris10_smumgr.h"
+#include "pppcielanes.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = smu7_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = smu7_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+ uint32_t mclk_stutter_mode_threshold = 40000;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* In order to prevent MC activity from stutter mode to push DPM up,
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not affected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param hwmgr the address of the hardware manager
+* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
+* @param voltage the SMC VOLTAGE structure to be populated
+*/
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+
+ smu7_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = polaris10_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+ return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ polaris10_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ polaris10_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+ break;
+ }
+ return 0;
+}
+
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x \n", type, member);
+ return 0;
+}
+
+uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU74_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU74_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU74_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU74_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU74_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU74_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU74_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU74_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ printk("cant't get the mac of %x \n", value);
+ return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.soft_regs_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
new file mode 100644
index 000000000000..5ade3cea8bb7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef POLARIS10_SMC_H
+#define POLARIS10_SMC_H
+
+#include "smumgr.h"
+
+
+int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
+uint32_t polaris10_get_mac_definition(uint32_t value);
+int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
+bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5dba7c509710..5c3598ab7dae 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -38,16 +38,11 @@
#include "ppatomctrl.h"
#include "pp_debug.h"
#include "cgs_common.h"
+#include "polaris10_smc.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /* 128 *1024 */
-
-#define SMC_RAM_END 0x40000
+#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
/* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
@@ -62,572 +57,9 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
{ 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
};
-static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
- {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-*/
-static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
-{
- PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
- PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from SMC RAM space into driver memory.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smc_start_address the start address in the SMC RAM to copy bytes from
-* @param src the byte array to copy the bytes to.
-* @param byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
-{
- uint32_t data;
- uint32_t addr;
- uint8_t *dest_byte;
- uint8_t i, data_byte[4] = {0};
- uint32_t *pdata = (uint32_t *)&data_byte;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
-
- *dest = PP_SMC_TO_HOST_UL(data);
-
- dest += 1;
- byte_count -= 4;
- addr += 4;
- }
-
- if (byte_count) {
- polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
- *pdata = PP_SMC_TO_HOST_UL(data);
- /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
- dest_byte = (uint8_t *)dest;
- for (i = 0; i < byte_count; i++)
- dest_byte[i] = data_byte[i];
- }
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param pSmuMgr the address of the powerplay SMU manager.
-* @param smc_start_address the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byte_count the number of bytes to copy.
-*/
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
- const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- int result;
- uint32_t data = 0;
- uint32_t original_data;
- uint32_t addr = 0;
- uint32_t extra_shift;
-
- PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
- PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
-
- addr = smc_start_address;
-
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
-
- data = 0;
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
-
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- /* Bytes are written into the SMC addres space with the MSB first. */
- data = (0x100 * data) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- result = polaris10_set_smc_sram_address(smumgr, addr, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
- }
-
- return 0;
-}
-
-
-static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
-
- polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
-{
- uint32_t efuse;
-
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
- efuse &= 0x00000001;
- if (efuse)
- return true;
-
- return false;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- int ret;
-
- if (!polaris10_is_smc_ram_running(smumgr))
- return -1;
-
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
- if (ret != 1)
- printk("\n failed to send pre message %x ret is %d \n", msg, ret);
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
-
- if (ret != 1)
- printk("\n failed to send message %x ret is %d \n", msg, ret);
-
- return 0;
-}
-
-
-/**
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return Always return 0.
-*/
-int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
-{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/**
-* Send a message to the SMC with parameter
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- if (!polaris10_is_smc_ram_running(smumgr)) {
- return -1;
- }
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return polaris10_send_msg_to_smc(smumgr, msg);
-}
-
-
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
-{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
- printk("Failed to send Message.\n");
-
- return 0;
-}
-
-/**
-* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
-{
- /* If the SMC is not even on it qualifies as inactive. */
- if (!polaris10_is_smc_ram_running(smumgr))
- return -1;
-
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
- return 0;
-}
-
-
-/**
-* Upload the SMC firmware to the SMC microcontroller.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param pFirmware the data structure containing the various sections of the firmware.
-*/
-static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
-{
- uint32_t byte_count = length;
-
- PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
+ 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
-
- for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
-
- return 0;
-}
-
-static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SMU_SK:
- result = CGS_UCODE_ID_SMU_SK;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
-{
- int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- struct cgs_firmware_info info = {0};
-
- if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
- else
- cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
-
- /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
- result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
-
- return result;
-}
-
-/**
-* Read a 32bit value from the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-* @param value and output parameter for the data read from the SMC SRAM.
-*/
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
-{
- int result;
-
- result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
- return 0;
-}
-
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
-{
- int result;
-
- result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
-
- if (result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
-
- return 0;
-}
-
-
-int polaris10_smu_fini(struct pp_smumgr *smumgr)
-{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
-
-/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
-static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
-{
- uint32_t result = 0;
-
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- printk("UCode type is out of range! \n");
- result = 0;
- }
-
- return result;
-}
-
-/* Populate one firmware image to the data structure */
-
-static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- int result = 0;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(smumgr->device,
- polaris10_convert_fw_type_to_cgs(fw_type),
- &info);
-
- if (!result) {
- entry->version = info.version;
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = info.image_size;
- entry->num_register_entries = 0;
- }
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t fw_to_load;
-
- int result = 0;
- struct SMU_DRAMData_TOC *toc;
-
- if (!smumgr->reload_fw) {
- printk(KERN_INFO "[ powerplay ] skip reloading...\n");
- return 0;
- }
-
- if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
- 0x0);
-
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
- PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
-
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK;
-
- if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
- printk(KERN_ERR "Fail to Request SMU Load uCode");
-
- return result;
-}
-
-/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
- uint32_t ret;
- /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
- fw_mask, fw_mask);
-
- return ret;
-}
-
-static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
-{
- return smumgr->smumgr_funcs->start_smu(smumgr);
-}
static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
{
@@ -669,7 +101,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -697,7 +129,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -708,14 +140,14 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -723,7 +155,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -732,7 +164,7 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -793,7 +225,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = polaris10_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
@@ -812,7 +244,7 @@ static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- polaris10_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(smumgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
@@ -853,12 +285,12 @@ static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = polaris10_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- polaris10_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
@@ -881,10 +313,10 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!polaris10_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
SMU_VFT_INTACT = false;
smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
@@ -894,7 +326,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
/* If failed, try with different security Key. */
if (result != 0) {
- smu_data->security_hard_key ^= 1;
+ smu_data->smu7_data.security_hard_key ^= 1;
result = polaris10_start_smu_in_protection_mode(smumgr);
}
}
@@ -906,89 +338,69 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr)
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- smu_data->post_initial_boot = true;
polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
- &(smu_data->soft_regs_start), 0x40000);
+ smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = polaris10_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse &= 0x00000001;
+ if (efuse)
+ return true;
+
+ return false;
+}
+
static int polaris10_smu_init(struct pp_smumgr *smumgr)
{
- struct polaris10_smumgr *smu_data;
- uint8_t *internal_buf;
- uint64_t mc_addr = 0;
- /* Allocate memory for backend private data */
- smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- smu_data->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- smu_data->smu_buffer.data_size = 200*4096;
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-/* Allocate FW image data structure and header buffer and
- * send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
- smu_data->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &smu_data->header_buffer.kaddr,
- &smu_data->header_buffer.handle);
-
- smu_data->header = smu_data->header_buffer.kaddr;
- smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != smu_data->header),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)smu_data->header_buffer.handle);
- return -1);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ int i;
-/* Allocate buffer for SMU internal buffer and send the address to SMU.
- * Iceland SMU does not need internal buffer.*/
- smu_allocate_memory(smumgr->device,
- smu_data->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &smu_data->smu_buffer.kaddr,
- &smu_data->smu_buffer.handle);
-
- internal_buf = smu_data->smu_buffer.kaddr;
- smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != internal_buf),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)smu_data->smu_buffer.handle);
- return -1;);
+ if (smu7_init(smumgr))
+ return -EINVAL;
if (polaris10_is_hw_avfs_present(smumgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+ for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
return 0;
}
-static const struct pp_smumgr_func ellsemere_smu_funcs = {
+static const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
- .smu_fini = polaris10_smu_fini,
+ .smu_fini = smu7_smu_fini,
.start_smu = polaris10_start_smu,
- .check_fw_load_finish = polaris10_check_fw_load_finish,
- .request_smu_load_fw = polaris10_reload_firmware,
+ .check_fw_load_finish = smu7_check_fw_load_finish,
+ .request_smu_load_fw = smu7_reload_firmware,
.request_smu_load_specific_fw = NULL,
- .send_msg_to_smc = polaris10_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+ .send_msg_to_smc = smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = polaris10_update_smc_table,
+ .get_offsetof = polaris10_get_offsetof,
+ .process_firmware_header = polaris10_process_firmware_header,
+ .init_smc_table = polaris10_init_smc_table,
+ .update_sclk_threshold = polaris10_update_sclk_threshold,
+ .thermal_avfs_enable = polaris10_thermal_avfs_enable,
+ .thermal_setup_fan_table = polaris10_thermal_setup_fan_table,
+ .populate_all_graphic_levels = polaris10_populate_all_graphic_levels,
+ .populate_all_memory_levels = polaris10_populate_all_memory_levels,
+ .get_mac_definition = polaris10_get_mac_definition,
+ .is_dpm_running = polaris10_is_dpm_running,
};
int polaris10_smum_init(struct pp_smumgr *smumgr)
@@ -998,10 +410,10 @@ int polaris10_smum_init(struct pp_smumgr *smumgr)
polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
if (polaris10_smu == NULL)
- return -1;
+ return -EINVAL;
smumgr->backend = polaris10_smu;
- smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+ smumgr->smumgr_funcs = &polaris10_smu_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index e5377aec057f..49ebf1d5a53c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -24,45 +24,52 @@
#ifndef _POLARIS10_SMUMANAGER_H
#define _POLARIS10_SMUMANAGER_H
-#include <polaris10_ppsmc.h>
+
#include <pp_endian.h>
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
struct polaris10_avfs {
enum AVFS_BTC_STATUS avfs_btc_status;
uint32_t avfs_btc_param;
};
-struct polaris10_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
+struct polaris10_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+
+ uint32_t DisplayCac;
+ uint32_t BAPM_TEMP_GRADIENT;
+ uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+ uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+
+
+struct polaris10_range_table {
+ uint32_t trans_lower_frequency; /* in 10khz */
+ uint32_t trans_upper_frequency;
};
struct polaris10_smumgr {
- uint8_t *header;
- uint8_t *mec_image;
- struct polaris10_buffer_entry smu_buffer;
- struct polaris10_buffer_entry header_buffer;
- uint32_t soft_regs_start;
- uint8_t *read_rrm_straps;
- uint32_t read_drm_straps_mc_address_high;
- uint32_t read_drm_straps_mc_address_low;
- uint32_t acpi_optimization;
- bool post_initial_boot;
+ struct smu7_smumgr smu7_data;
uint8_t protected_mode;
- uint8_t security_hard_key;
struct polaris10_avfs avfs;
+ SMU74_Discrete_DpmTable smc_state_table;
+ struct SMU74_Discrete_Ulv ulv_setting;
+ struct SMU74_Discrete_PmFuses power_tune_table;
+ struct polaris10_range_table range_table[NUM_SCLK_RANGE];
+ const struct polaris10_pt_defaults *power_tune_defaults;
+ uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
};
-int polaris10_smum_init(struct pp_smumgr *smumgr);
-
-int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
-int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
-int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
- const uint8_t *src, uint32_t byte_count, uint32_t limit);
-
#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
new file mode 100644
index 000000000000..6af744f42ec9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+#include "smu7_smumgr.h"
+
+#define SMU7_SMC_SIZE 0x20000
+
+static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ return 0;
+}
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+ uint32_t data;
+ uint32_t addr;
+ uint8_t *dest_byte;
+ uint8_t i, data_byte[4] = {0};
+ uint32_t *pdata = (uint32_t *)&data_byte;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+ *dest = PP_SMC_TO_HOST_UL(data);
+
+ dest += 1;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (byte_count) {
+ smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ *pdata = PP_SMC_TO_HOST_UL(data);
+ /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+ dest_byte = (uint8_t *)dest;
+ for (i = 0; i < byte_count; i++)
+ dest_byte[i] = data_byte[i];
+ }
+
+ return 0;
+}
+
+
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = smu7_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ }
+
+ return 0;
+}
+
+
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+ return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ int ret;
+
+ if (!smu7_is_smc_ram_running(smumgr))
+ return -EINVAL;
+
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send pre message %x ret is %d \n", msg, ret);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send message %x ret is %d \n", msg, ret);
+
+ return 0;
+}
+
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ return 0;
+}
+
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ if (!smu7_is_smc_ram_running(smumgr)) {
+ return -EINVAL;
+ }
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return smu7_send_msg_to_smc(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ printk("Failed to send Message.\n");
+
+ return 0;
+}
+
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+ if (!smu7_is_smc_ram_running(smumgr))
+ return -EINVAL;
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ return 0;
+}
+
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case UCODE_ID_SMU:
+ result = CGS_UCODE_ID_SMU;
+ break;
+ case UCODE_ID_SMU_SK:
+ result = CGS_UCODE_ID_SMU_SK;
+ break;
+ case UCODE_ID_SDMA0:
+ result = CGS_UCODE_ID_SDMA0;
+ break;
+ case UCODE_ID_SDMA1:
+ result = CGS_UCODE_ID_SDMA1;
+ break;
+ case UCODE_ID_CP_CE:
+ result = CGS_UCODE_ID_CP_CE;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = CGS_UCODE_ID_CP_PFP;
+ break;
+ case UCODE_ID_CP_ME:
+ result = CGS_UCODE_ID_CP_ME;
+ break;
+ case UCODE_ID_CP_MEC:
+ result = CGS_UCODE_ID_CP_MEC;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ result = CGS_UCODE_ID_CP_MEC_JT1;
+ break;
+ case UCODE_ID_CP_MEC_JT2:
+ result = CGS_UCODE_ID_CP_MEC_JT2;
+ break;
+ case UCODE_ID_RLC_G:
+ result = CGS_UCODE_ID_RLC_G;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ return 0;
+}
+
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+ int result;
+
+ result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+ return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+
+static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
+{
+ uint32_t result = 0;
+
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ result = UCODE_ID_SDMA0_MASK;
+ break;
+ case UCODE_ID_SDMA1:
+ result = UCODE_ID_SDMA1_MASK;
+ break;
+ case UCODE_ID_CP_CE:
+ result = UCODE_ID_CP_CE_MASK;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = UCODE_ID_CP_PFP_MASK;
+ break;
+ case UCODE_ID_CP_ME:
+ result = UCODE_ID_CP_ME_MASK;
+ break;
+ case UCODE_ID_CP_MEC:
+ case UCODE_ID_CP_MEC_JT1:
+ case UCODE_ID_CP_MEC_JT2:
+ result = UCODE_ID_CP_MEC_MASK;
+ break;
+ case UCODE_ID_RLC_G:
+ result = UCODE_ID_RLC_G_MASK;
+ break;
+ default:
+ printk("UCode type is out of range! \n");
+ result = 0;
+ }
+
+ return result;
+}
+
+static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+ uint32_t fw_type,
+ struct SMU_Entry *entry)
+{
+ int result = 0;
+ struct cgs_firmware_info info = {0};
+
+ result = cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(fw_type),
+ &info);
+
+ if (!result) {
+ entry->version = info.version;
+ entry->id = (uint16_t)fw_type;
+ entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+ entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+ entry->meta_data_addr_high = 0;
+ entry->meta_data_addr_low = 0;
+ entry->data_size_byte = info.image_size;
+ entry->num_register_entries = 0;
+ }
+
+ if (fw_type == UCODE_ID_RLC_G)
+ entry->flags = 1;
+ else
+ entry->flags = 0;
+
+ return 0;
+}
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ uint32_t fw_to_load;
+ int result = 0;
+ struct SMU_DRAMData_TOC *toc;
+
+ if (!smumgr->reload_fw) {
+ printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+ return 0;
+ }
+
+ if (smu_data->soft_regs_start)
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ SMU_SoftRegisters, UcodeLoadStatus),
+ 0x0);
+
+ if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+ fw_to_load = UCODE_ID_RLC_G_MASK
+ + UCODE_ID_SDMA0_MASK
+ + UCODE_ID_SDMA1_MASK
+ + UCODE_ID_CP_CE_MASK
+ + UCODE_ID_CP_ME_MASK
+ + UCODE_ID_CP_PFP_MASK
+ + UCODE_ID_CP_MEC_MASK;
+ } else {
+ fw_to_load = UCODE_ID_RLC_G_MASK
+ + UCODE_ID_SDMA0_MASK
+ + UCODE_ID_SDMA1_MASK
+ + UCODE_ID_CP_CE_MASK
+ + UCODE_ID_CP_ME_MASK
+ + UCODE_ID_CP_PFP_MASK
+ + UCODE_ID_CP_MEC_MASK
+ + UCODE_ID_CP_MEC_JT1_MASK
+ + UCODE_ID_CP_MEC_JT2_MASK;
+ }
+
+ toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ "Failed to Get Firmware Entry.", return -EINVAL);
+
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+ if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+ return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
+ uint32_t ret;
+
+ ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ SMU_SoftRegisters, UcodeLoadStatus),
+ fw_mask, fw_mask);
+
+ return ret;
+}
+
+int smu7_reload_firmware(struct pp_smumgr *smumgr)
+{
+ return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+ uint32_t byte_count = length;
+
+ PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+ for (; byte_count >= 4; byte_count -= 4)
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+
+ return 0;
+}
+
+
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ struct cgs_firmware_info info = {0};
+
+ if (smu_data->security_hard_key == 1)
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+ else
+ cgs_get_firmware_info(smumgr->device,
+ smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+ result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+
+ return result;
+}
+
+
+int smu7_init(struct pp_smumgr *smumgr)
+{
+ struct smu7_smumgr *smu_data;
+ uint8_t *internal_buf;
+ uint64_t mc_addr = 0;
+
+ /* Allocate memory for backend private data */
+ smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data->header_buffer.data_size =
+ ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+ smu_data->smu_buffer.data_size = 200*4096;
+
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+ smu_allocate_memory(smumgr->device,
+ smu_data->header_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->header_buffer.kaddr,
+ &smu_data->header_buffer.handle);
+
+ smu_data->header = smu_data->header_buffer.kaddr;
+ smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->header_buffer.handle);
+ return -EINVAL);
+
+ smu_allocate_memory(smumgr->device,
+ smu_data->smu_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->smu_buffer.kaddr,
+ &smu_data->smu_buffer.handle);
+
+ internal_buf = smu_data->smu_buffer.kaddr;
+ smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != internal_buf),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->smu_buffer.handle);
+ return -EINVAL);
+
+ return 0;
+}
+
+
+int smu7_smu_fini(struct pp_smumgr *smumgr)
+{
+ if (smumgr->backend) {
+ kfree(smumgr->backend);
+ smumgr->backend = NULL;
+ }
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
new file mode 100644
index 000000000000..76352f2423ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SMU7_SMUMANAGER_H
+#define _SMU7_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+
+#define SMC_RAM_END 0x40000
+#define mmSMC_IND_INDEX_11 0x01AC
+#define mmSMC_IND_DATA_11 0x01AD
+
+struct smu7_buffer_entry {
+ uint32_t data_size;
+ uint32_t mc_addr_low;
+ uint32_t mc_addr_high;
+ void *kaddr;
+ unsigned long handle;
+};
+
+struct smu7_smumgr {
+ uint8_t *header;
+ uint8_t *mec_image;
+ struct smu7_buffer_entry smu_buffer;
+ struct smu7_buffer_entry header_buffer;
+
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ uint8_t security_hard_key;
+ uint32_t acpi_optimization;
+};
+
+
+int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ uint32_t *dest, uint32_t byte_count, uint32_t limit);
+int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit);
+int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
+bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+ uint32_t parameter);
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+ uint16_t msg, uint32_t parameter);
+int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
+int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+
+enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
+int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit);
+int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+ uint32_t value, uint32_t limit);
+
+int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
+int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_smumgr *smumgr);
+int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
+int smu7_init(struct pp_smumgr *smumgr);
+int smu7_smu_fini(struct pp_smumgr *smumgr);
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 7723473e51a0..e5812aa456f3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -28,10 +28,7 @@
#include "smumgr.h"
#include "cgs_common.h"
#include "linux/delay.h"
-#include "cz_smumgr.h"
-#include "tonga_smumgr.h"
-#include "fiji_smumgr.h"
-#include "polaris10_smumgr.h"
+
int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -47,7 +44,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
smumgr->device = pp_init->device;
smumgr->chip_family = pp_init->chip_family;
smumgr->chip_id = pp_init->chip_id;
- smumgr->hw_revision = pp_init->rev_id;
smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
smumgr->reload_fw = 1;
handle->smu_mgr = smumgr;
@@ -58,6 +54,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
+ case CHIP_TOPAZ:
+ iceland_smum_init(smumgr);
+ break;
case CHIP_TONGA:
tonga_smum_init(smumgr);
break;
@@ -87,6 +86,57 @@ int smum_fini(struct pp_smumgr *smumgr)
return 0;
}
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+
+ return 0;
+}
+
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+
+ return 0;
+}
+
+int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+
+ if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+
+ return 0;
+}
+
+int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+
+ if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+
+ return 0;
+}
+
+uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+{
+ if (NULL != smumgr->smumgr_funcs->get_offsetof)
+ return smumgr->smumgr_funcs->get_offsetof(type, member);
+
+ return 0;
+}
+
+int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ return 0;
+}
+
int smum_get_argument(struct pp_smumgr *smumgr)
{
if (NULL != smumgr->smumgr_funcs->get_argument)
@@ -95,13 +145,20 @@ int smum_get_argument(struct pp_smumgr *smumgr)
return 0;
}
+uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+{
+ if (NULL != smumgr->smumgr_funcs->get_mac_definition)
+ return smumgr->smumgr_funcs->get_mac_definition(value);
+
+ return 0;
+}
+
int smum_download_powerplay_table(struct pp_smumgr *smumgr,
void **table)
{
if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
table);
-
return 0;
}
@@ -268,3 +325,44 @@ int smu_free_memory(void *device, void *handle)
return 0;
}
+
+int smum_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+
+ return 0;
+}
+
+int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+
+ return 0;
+}
+
+int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+
+ return 0;
+}
+
+/*this interface is needed by island ci/vi */
+int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+
+ return 0;
+}
+
+bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
new file mode 100644
index 000000000000..de2a24d85f48
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -0,0 +1,3206 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ *
+ */
+
+#include "tonga_smc.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "tonga_smumgr.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "smu7_ppsmc.h"
+
+#include "smu72_discrete.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ {1, 0xF, 0xFD, 0x19,
+ 5, 45, 0, 0xB0000,
+ {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+ 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+ 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+ },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0},
+ {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5},
+ {0, 2, 4, 5, 6, 5}
+};
+
+/* PPGen has the gain setting generated in x * 100 unit
+ * This function is to convert the unit to x * 4096(0x1000) unit.
+ * This is the unit expected by SMC firmware
+ */
+
+
+static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ voltage->VddGfx = phm_get_voltage_index(
+ pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(
+ pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddc);
+
+ if (allowed_clock_voltage_table->entries[i].vddci)
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+ else
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+ if (allowed_clock_voltage_table->entries[i].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+ voltage->Phases = 1;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddc);
+
+ if (allowed_clock_voltage_table->entries[i-1].vddci)
+ voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i-1].vddci);
+
+ if (allowed_clock_voltage_table->entries[i-1].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+ return 0;
+}
+
+
+/**
+ * Vddc table preparation for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ table->VddcTable[count] =
+ PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+ }
+ return 0;
+}
+
+/**
+ * VddGfx table preparation for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+ for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+ table->VddGfxTable[count] =
+ PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+ }
+ return 0;
+}
+
+/**
+ * Vddci table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->SmioTable1.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+ table->SmioTable1.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->vddci_voltage_table.entries[count].smio_low;
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ table->SmioTable2.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->mvdd_voltage_table.entries[count].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ }
+
+ return 0;
+}
+
+/**
+ * Preparation of vddc and vddgfx CAC tables for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+ pptable_info->vddgfx_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+ pptable_info->vddc_lookup_table;
+
+ /* table is already swapped, so in order to use the value from it
+ * we need to swap it back.
+ */
+ uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+ uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+ for (count = 0; count < vddc_level_count; count++) {
+ /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+
+ if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+ for (count = 0; count < vddgfx_level_count; count++) {
+ index = phm_get_voltage_index(vddgfx_lookup_table,
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+ }
+ } else {
+ for (count = 0; count < vddc_level_count; count++) {
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddGfxVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddGfxVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Preparation of voltage tables for SMC.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = tonga_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDC voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDCI voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDGFX voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate MVDD voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_cac_tables(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_Ulv *state)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_DpmTable *table)
+{
+ return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+/**
+ * Calculates the SCLK dividers using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+/**
+ * Populates single SMC SCLK structure using the provided engine clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param engine_clock the engine clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_sclk, engine_clock,
+ &graphic_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find VDDC voltage value for VDDC "
+ "engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 0;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (!result) {
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+/**
+ * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ *
+ * @param hwmgr the address of the hardware manager
+ */
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+ uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+
+ SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i, max_entry;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = tonga_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ }
+ } else {
+ if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+ printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !");
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(highest_pcie_level_enabled+1))) != 0)) {
+ highest_pcie_level_enabled++;
+ }
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<lowest_pcie_level_enabled)) == 0)) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+ count++;
+ }
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+/**
+ * Populates the SMC MCLK structure using the provided memory clock
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param memory_clock the memory clock to use to populate the structure
+ * @param sclk the SMC SCLK structure to be populated
+ */
+static int tonga_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "Error retrieving Memory Clock Parameters from VBIOS.",
+ return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+ mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF,
+ mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC,
+ mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE,
+ mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+ mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *memory_level
+ )
+{
+ uint32_t mvdd = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_stutter_mode_threshold = 30000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (NULL != pptable_info->vdd_dep_on_mclk) {
+ result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_mclk,
+ memory_clock,
+ &memory_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "can not find MinVddc voltage value from memory VDDC "
+ "voltage dependency table",
+ return result);
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else
+ memory_level->MinMvdd = mvdd;
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
+ memory_level->StutterEnable = 1;
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ } else {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ } else {
+ dll_state_on = data->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio =
+ tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = tonga_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_address =
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size =
+ sizeof(SMU72_Discrete_MemoryLevel) *
+ SMU72_MAX_LEVELS_MEMORY;
+ SMU72_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = tonga_populate_single_memory_level(
+ hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ smio_pattern->Voltage =
+ data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMIO_Pattern voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ table->ACPILevel.MinVoltage =
+ smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVoltage =
+ smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+ /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+ if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t) (mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->UvdLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->UvdLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->UvdLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(
+ hwmgr,
+ table->UvdLevel[count].VclkFrequency,
+ &dividers);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Vclk clock",
+ return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Dclk clock",
+ return result);
+
+ table->UvdLevel[count].DclkDivider =
+ (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ }
+
+ return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t) (mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->VceLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->VceLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->VceLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t) (mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->AcpLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->AcpLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->AcpLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->SamuLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->SamuLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->SamuLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+/**
+ * Setup parameters for the MC ARB.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ * This function is to be called from the SetPowerState table.
+ */
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ int result = 0;
+ SMU72_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = tonga_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (!result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU72_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ printk(KERN_ERR "[powerplay] VBIOS did not find boot engine "
+ "clock value in dependency table. "
+ "Using Graphics DPM level 0 !");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ printk(KERN_ERR "[powerplay] VBIOS did not find boot "
+ "engine clock value in dependency table."
+ "Using Memory DPM level 0 !");
+ result = 0;
+ }
+
+ table->BootVoltage.Vddc =
+ phm_get_voltage_id(&(data->vddc_voltage_table),
+ data->vbios_boot_state.vddc_bootup_value);
+ table->BootVoltage.VddGfx =
+ phm_get_voltage_id(&(data->vddgfx_voltage_table),
+ data->vbios_boot_state.vddgfx_bootup_value);
+ table->BootVoltage.Vddci =
+ phm_get_voltage_id(&(data->vddci_voltage_table),
+ data->vbios_boot_state.vddci_bootup_value);
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t hw_revision, dev_id;
+ struct cgs_system_info sys_info = {0};
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+ volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+ (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+ (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+ volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+ (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+ (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+ } else {
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ }
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (tonga_clock_stretch_amount_conversion
+ [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+ * Populates the SMC VRConfig field in DPM table.
+ *
+ * @param hwmgr the address of the hardware manager
+ * @param table the SMC DPM table structure to be populated
+ * @return always 0
+ */
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* Splitted mode */
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should "
+ "be both on SVI2 control in splitted mode !\n");
+ }
+ } else {
+ /* Merged mode */
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ printk(KERN_ERR "[ powerplay ] VDDC should be on "
+ "SVI2 control in merged mode !\n");
+ }
+ }
+
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Initialize the ARB DRAM timing table's index field.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /*
+ * This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result != 0)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(smumgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ int i, j, k;
+ const uint16_t *pdef1, *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range !",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ dpm_table->BAPM_TEMP_GRADIENT =
+ PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ pdef1 = defaults->bapmti_r;
+ pdef2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU72_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 "
+ "(SviLoadLineEn) from SMC Failed !",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed !",
+ return -EINVAL);
+
+ /* DW6 */
+ if (tonga_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed !",
+ return -EINVAL);
+ /* DW7 */
+ if (tonga_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed !",
+ return -EINVAL);
+ /* DW8 */
+ if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl Failed !",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (tonga_populate_temperature_scaler(hwmgr) != 0)
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed !",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (tonga_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan "
+ "Control parameters Failed !",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (tonga_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed !",
+ return -EINVAL);
+
+ /* DW19 */
+ if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML "
+ "Min and Max Vid Failed !",
+ return -EINVAL);
+
+ /* DW20 */
+ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(
+ false,
+ "Attempt to populate BapmVddCBaseLeakage "
+ "Hi and Lo Sidd Failed !",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed !",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
+ SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(
+ i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array "
+ "out of boundary",
+ return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+ const struct tonga_mc_reg_entry *entry,
+ SMU72_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+ struct pp_smumgr *smumgr,
+ const uint32_t memory_clock,
+ SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = tonga_convert_mc_reg_table_entry_to_smc(
+ hwmgr->smumgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start +
+ (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(
+ hwmgr->smumgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU72_Discrete_MCRegisterSet) *
+ data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+ result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for the MC register addresses !",
+ return result;);
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for driver state !",
+ return result;);
+
+ return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &tonga_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+/**
+ * Initializes the SMC table and uploads it
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pInput the pointer to input data (PowerState)
+ * @return always 0
+ */
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ uint8_t i;
+ pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ tonga_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ tonga_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+ if (i == 1 || i == 0)
+ table->SystemFlags |= 0x40;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = tonga_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state !",
+ return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = tonga_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level !", return result);
+
+ result = tonga_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level !", return result);
+
+ result = tonga_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level !", return result);
+
+ result = tonga_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level !", return result);
+
+ result = tonga_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level !", return result);
+
+ result = tonga_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+ result = tonga_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level !", return result);
+
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = tonga_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.",
+ return result;);
+
+ result = tonga_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level !", return result);
+
+ result = tonga_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level !", return result);
+
+ tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters !", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = tonga_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table !",
+ return result;);
+ }
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ /*
+ * Cail reads current link status and reports it as cap (we cannot
+ * change this due to some previous issues we had)
+ * SMC drops the link status to lowest level after enabling
+ * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+ * but this time Cail reads current link status which was set to low by
+ * SMC and reports it as cap to powerplay
+ * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+ */
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+ table->PCIeGenInterval = 1;
+
+ result = tonga_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting !", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+
+ if (0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ }
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory !", return result;);
+
+ result = tonga_init_arb_table_index(hwmgr->smumgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload arb data to SMC memory !", return result);
+
+ tonga_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize pm fuses !", return result);
+
+ result = tonga_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize MC Reg table !", return result);
+
+ return 0;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (0 == smu_data->smu7_data.fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ fan_table.FanControl_GL_Flag = 1;
+
+ res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table,
+ (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return tonga_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to upload MC reg table !",
+ return result);
+
+ result = tonga_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters !",
+ );
+
+ return result;
+}
+
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ printk("cant't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+uint32_t tonga_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU72_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU72_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU72_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU72_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU72_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU72_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU72_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU72_MAX_LEVELS_MVDD;
+ }
+ printk("cant't get the mac value %x\n", value);
+
+ return 0;
+}
+
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ tonga_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ tonga_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+
+/**
+ * Get the location of various tables inside the FW image.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (result != 0);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (result != 0);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+ &address) ?
+ address :
+ table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct tonga_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+/**
+ * VBIOS omits some information to reduce size, we need to recover them here.
+ * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
+ * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
+ * mmMC_PMG_CMD_MRS/_LP[15:0]
+ * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
+ * mmMC_PMG_CMD_MRS1/_LP[15:0].
+ * 3. need to set these data for each clock range
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param table the address of MCRegTable
+ * @return always 0
+ */
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device,
+ mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (!result)
+ result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (!result) {
+ tonga_set_s0_mc_reg_index(ni_table);
+ result = tonga_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (!result)
+ tonga_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
index 8e6670b3cb67..8ae169ff541d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
@@ -20,35 +20,19 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef _TONGA_SMC_H
+#define _TONGA_SMC_H
-#ifndef TONGA_POWERTUNE_H
-#define TONGA_POWERTUNE_H
+#include "smumgr.h"
+#include "smu72.h"
-enum _phw_tonga_ptc_config_reg_type {
- TONGA_CONFIGREG_MMR = 0,
- TONGA_CONFIGREG_SMC_IND,
- TONGA_CONFIGREG_DIDT_IND,
- TONGA_CONFIGREG_CACHE,
- TONGA_CONFIGREG_MAX
-};
-typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type;
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define ASICID_IS_TONGA_P(wDID, bRID) \
+ (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+ || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-struct _phw_tonga_pt_config_reg {
- uint32_t Offset;
- uint32_t Mask;
- uint32_t Shift;
- uint32_t Value;
- phw_tonga_ptc_config_reg_type Type;
-};
-typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg;
-struct _phw_tonga_pt_defaults {
+struct tonga_pt_defaults {
uint8_t svi_load_line_en;
uint8_t svi_load_line_vddC;
uint8_t tdc_vddc_throttle_release_limit_perc;
@@ -60,7 +44,17 @@ struct _phw_tonga_pt_defaults {
uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
};
-typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults;
+int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
+int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
+uint32_t tonga_get_mac_definition(uint32_t value);
+int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
+int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f42c536b3af1..5f9124046b9b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,587 +33,9 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "cgs_common.h"
+#include "tonga_smc.h"
+#include "smu7_smumgr.h"
-#define TONGA_SMC_SIZE 0x20000
-#define BUFFER_SIZE 80000
-#define MAX_STRING_SIZE 15
-#define BUFFER_SIZETWO 131072 /*128 *1024*/
-
-/**
-* Set the address for reading/writing the SMC SRAM space.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smcAddress the address in the SMC RAM to access.
-*/
-static int tonga_set_smc_sram_address(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t limit)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
- PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)),
- "SMC address must be 4 byte aligned.",
- return -1;);
-
- PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)),
- "SMC address is beyond the SMC RAM area.",
- return -1;);
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
-
- return 0;
-}
-
-/**
-* Copy bytes from an array into the SMC RAM space.
-*
-* @param smumgr the address of the powerplay SMU manager.
-* @param smcStartAddress the start address in the SMC RAM to copy bytes to.
-* @param src the byte array to copy the bytes from.
-* @param byteCount the number of bytes to copy.
-*/
-int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
- PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)),
- "SMC address must be 4 byte aligned.",
- return 0;);
-
- PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)),
- "SMC address is beyond the SMC RAM area.",
- return 0;);
-
- addr = smcStartAddress;
-
- while (byteCount >= 4) {
- /*
- * Bytes are written into the
- * SMC address space with the MSB first
- */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
-
- if (result)
- goto out;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
-
- src += 4;
- byteCount -= 4;
- addr += 4;
- }
-
- if (0 != byteCount) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- goto out;
-
- orig_data = cgs_read_register(smumgr->device,
- mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byteCount);
-
- while (byteCount > 0) {
- data = (data << 8) + *src++;
- byteCount--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = tonga_set_smc_sram_address(smumgr, addr, limit);
- if (result)
- goto out;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
- }
-
-out:
- return result;
-}
-
-
-int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
-{
- static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
-
- return 0;
-}
-
-/**
-* Return if the SMC is currently running.
-*
-* @param smumgr the address of the powerplay hardware manager.
-*/
-static int tonga_is_smc_ram_running(struct pp_smumgr *smumgr)
-{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
- SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSMC_PC_C)));
-}
-
-static int tonga_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- return 0;
-}
-
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- if (!tonga_is_smc_ram_running(smumgr))
- return -1;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Previous Message.",
- );
-
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Message.",
- );
-
- return 0;
-}
-
-/*
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_without_waiting
- (struct pp_smumgr *smumgr, uint16_t msg)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- PP_ASSERT_WITH_CODE(
- 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP),
- "Failed to send Previous Message.",
- );
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-/*
-* Send a message to the SMC with parameter
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- if (!tonga_is_smc_ram_running(smumgr))
- return PPSMC_Result_Failed;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc(smumgr, msg);
-}
-
-/*
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
-
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc_without_waiting(smumgr, msg);
-}
-
-/*
- * Read a 32bit value from the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value and output parameter for the data read from the SMC SRAM.
- */
-int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t *value,
- uint32_t limit)
-{
- int result;
-
- result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
- if (0 != result)
- return result;
-
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0);
-
- return 0;
-}
-
-/*
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
- uint32_t smcAddress, uint32_t value,
- uint32_t limit)
-{
- int result;
-
- result = tonga_set_smc_sram_address(smumgr, smcAddress, limit);
-
- if (0 != result)
- return result;
-
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value);
-
- return 0;
-}
-
-static int tonga_smu_fini(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
-
- smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
- smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
-
- if (smumgr->backend != NULL) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
-
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- return 0;
-}
-
-static enum cgs_ucode_id tonga_convert_fw_type_to_cgs(uint32_t fw_type)
-{
- enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
-
- switch (fw_type) {
- case UCODE_ID_SMU:
- result = CGS_UCODE_ID_SMU;
- break;
- case UCODE_ID_SDMA0:
- result = CGS_UCODE_ID_SDMA0;
- break;
- case UCODE_ID_SDMA1:
- result = CGS_UCODE_ID_SDMA1;
- break;
- case UCODE_ID_CP_CE:
- result = CGS_UCODE_ID_CP_CE;
- break;
- case UCODE_ID_CP_PFP:
- result = CGS_UCODE_ID_CP_PFP;
- break;
- case UCODE_ID_CP_ME:
- result = CGS_UCODE_ID_CP_ME;
- break;
- case UCODE_ID_CP_MEC:
- result = CGS_UCODE_ID_CP_MEC;
- break;
- case UCODE_ID_CP_MEC_JT1:
- result = CGS_UCODE_ID_CP_MEC_JT1;
- break;
- case UCODE_ID_CP_MEC_JT2:
- result = CGS_UCODE_ID_CP_MEC_JT2;
- break;
- case UCODE_ID_RLC_G:
- result = CGS_UCODE_ID_RLC_G;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-/**
- * Convert the PPIRI firmware type to SMU type mask.
- * For MEC, we need to check all MEC related type
-*/
-static uint16_t tonga_get_mask_for_firmware_type(uint16_t firmwareType)
-{
- uint16_t result = 0;
-
- switch (firmwareType) {
- case UCODE_ID_SDMA0:
- result = UCODE_ID_SDMA0_MASK;
- break;
- case UCODE_ID_SDMA1:
- result = UCODE_ID_SDMA1_MASK;
- break;
- case UCODE_ID_CP_CE:
- result = UCODE_ID_CP_CE_MASK;
- break;
- case UCODE_ID_CP_PFP:
- result = UCODE_ID_CP_PFP_MASK;
- break;
- case UCODE_ID_CP_ME:
- result = UCODE_ID_CP_ME_MASK;
- break;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- result = UCODE_ID_CP_MEC_MASK;
- break;
- case UCODE_ID_RLC_G:
- result = UCODE_ID_RLC_G_MASK;
- break;
- default:
- break;
- }
-
- return result;
-}
-
-/**
- * Check if the FW has been loaded,
- * SMU will not return if loading has not finished.
-*/
-static int tonga_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType)
-{
- uint16_t fwMask = tonga_get_mask_for_firmware_type(fwType);
-
- if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
- SOFT_REGISTERS_TABLE_28, fwMask, fwMask)) {
- printk(KERN_ERR "[ powerplay ] check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Populate one firmware image to the data structure */
-static int tonga_populate_single_firmware_entry(struct pp_smumgr *smumgr,
- uint16_t firmware_type,
- struct SMU_Entry *pentry)
-{
- int result;
- struct cgs_firmware_info info = {0};
-
- result = cgs_get_firmware_info(
- smumgr->device,
- tonga_convert_fw_type_to_cgs(firmware_type),
- &info);
-
- if (result == 0) {
- pentry->version = 0;
- pentry->id = (uint16_t)firmware_type;
- pentry->image_addr_high = smu_upper_32_bits(info.mc_addr);
- pentry->image_addr_low = smu_lower_32_bits(info.mc_addr);
- pentry->meta_data_addr_high = 0;
- pentry->meta_data_addr_low = 0;
- pentry->data_size_byte = info.image_size;
- pentry->num_register_entries = 0;
-
- if (firmware_type == UCODE_ID_RLC_G)
- pentry->flags = 1;
- else
- pentry->flags = 0;
- } else {
- return result;
- }
-
- return result;
-}
-
-static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *tonga_smu =
- (struct tonga_smumgr *)(smumgr->backend);
- uint16_t fw_to_load;
- struct SMU_DRAMData_TOC *toc;
- /**
- * First time this gets called during SmuMgr init,
- * we haven't processed SMU header file yet,
- * so Soft Register Start offset is unknown.
- * However, for this case, UcodeLoadStatus is already 0,
- * so we can skip this if the Soft Registers Start offset is 0.
- */
- cgs_write_ind_register(smumgr->device,
- CGS_IND_REG__SMC, ixSOFT_REGISTERS_TABLE_28, 0);
-
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_SMU_DRAM_ADDR_HI,
- tonga_smu->smu_buffer.mc_addr_high);
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_SMU_DRAM_ADDR_LO,
- tonga_smu->smu_buffer.mc_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry(smumgr,
- UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n",
- return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry(smumgr,
- UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n",
- return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
- PP_ASSERT_WITH_CODE(
- 0 == tonga_populate_single_firmware_entry
- (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
- "Failed to Get Firmware Entry.\n", return -1);
-
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DRV_DRAM_ADDR_HI,
- tonga_smu->header_buffer.mc_addr_high);
- tonga_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_DRV_DRAM_ADDR_LO,
- tonga_smu->header_buffer.mc_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK
- + UCODE_ID_SDMA0_MASK
- + UCODE_ID_SDMA1_MASK
- + UCODE_ID_CP_CE_MASK
- + UCODE_ID_CP_ME_MASK
- + UCODE_ID_CP_PFP_MASK
- + UCODE_ID_CP_MEC_MASK;
-
- PP_ASSERT_WITH_CODE(
- 0 == tonga_send_msg_to_smc_with_parameter_without_waiting(
- smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
- "Fail to Request SMU Load uCode", return 0);
-
- return 0;
-}
-
-static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
- uint32_t firmwareType)
-{
- return 0;
-}
-
-/**
- * Upload the SMC firmware to the SMC microcontroller.
- *
- * @param smumgr the address of the powerplay hardware manager.
- * @param pFirmware the data structure containing the various sections of the firmware.
- */
-static int tonga_smu_upload_firmware_image(struct pp_smumgr *smumgr)
-{
- const uint8_t *src;
- uint32_t byte_count;
- uint32_t *data;
- struct cgs_firmware_info info = {0};
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_get_firmware_info(smumgr->device,
- tonga_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
-
- if (info.image_size & 3) {
- printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (info.image_size > TONGA_SMC_SIZE) {
- printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
-
- byte_count = info.image_size;
- src = (const uint8_t *)info.kptr;
-
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]);
-
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
-
- return 0;
-}
static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
{
@@ -623,7 +45,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = tonga_smu_upload_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result)
return result;
@@ -653,7 +75,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- tonga_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(smumgr);
/* Wait for done bit to be set */
SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
@@ -690,13 +112,13 @@ static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = tonga_smu_upload_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(smumgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- tonga_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(smumgr);
SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
@@ -718,7 +140,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
int result;
/* Only start SMC if SMC RAM is not running */
- if (!tonga_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(smumgr)) {
/*Check if SMU is running in protected mode*/
if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
@@ -732,7 +154,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
}
}
- result = tonga_request_smu_reload_fw(smumgr);
+ result = smu7_request_smu_load_fw(smumgr);
return result;
}
@@ -746,67 +168,41 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
*/
static int tonga_smu_init(struct pp_smumgr *smumgr)
{
- struct tonga_smumgr *tonga_smu;
- uint8_t *internal_buf;
- uint64_t mc_addr = 0;
- /* Allocate memory for backend private data */
- tonga_smu = (struct tonga_smumgr *)(smumgr->backend);
- tonga_smu->header_buffer.data_size =
- ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- tonga_smu->smu_buffer.data_size = 200*4096;
-
- smu_allocate_memory(smumgr->device,
- tonga_smu->header_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &tonga_smu->header_buffer.kaddr,
- &tonga_smu->header_buffer.handle);
-
- tonga_smu->pHeader = tonga_smu->header_buffer.kaddr;
- tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)tonga_smu->header_buffer.handle);
- return -1);
-
- smu_allocate_memory(smumgr->device,
- tonga_smu->smu_buffer.data_size,
- CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
- PAGE_SIZE,
- &mc_addr,
- &tonga_smu->smu_buffer.kaddr,
- &tonga_smu->smu_buffer.handle);
-
- internal_buf = tonga_smu->smu_buffer.kaddr;
- tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
- tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
-
- PP_ASSERT_WITH_CODE((NULL != internal_buf),
- "Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
- (cgs_handle_t)tonga_smu->smu_buffer.handle);
- return -1;);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+
+ int i;
+
+ if (smu7_init(smumgr))
+ return -EINVAL;
+
+ for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
+ smu_data->activity_target[i] = 30;
return 0;
}
static const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
- .smu_fini = &tonga_smu_fini,
+ .smu_fini = &smu7_smu_fini,
.start_smu = &tonga_start_smu,
- .check_fw_load_finish = &tonga_check_fw_load_finish,
- .request_smu_load_fw = &tonga_request_smu_reload_fw,
- .request_smu_load_specific_fw = &tonga_request_smu_load_specific_fw,
- .send_msg_to_smc = &tonga_send_msg_to_smc,
- .send_msg_to_smc_with_parameter = &tonga_send_msg_to_smc_with_parameter,
+ .check_fw_load_finish = &smu7_check_fw_load_finish,
+ .request_smu_load_fw = &smu7_request_smu_load_fw,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = &smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
+ .update_smc_table = tonga_update_smc_table,
+ .get_offsetof = tonga_get_offsetof,
+ .process_firmware_header = tonga_process_firmware_header,
+ .init_smc_table = tonga_init_smc_table,
+ .update_sclk_threshold = tonga_update_sclk_threshold,
+ .thermal_setup_fan_table = tonga_thermal_setup_fan_table,
+ .populate_all_graphic_levels = tonga_populate_all_graphic_levels,
+ .populate_all_memory_levels = tonga_populate_all_memory_levels,
+ .get_mac_definition = tonga_get_mac_definition,
+ .initialize_mc_reg_table = tonga_initialize_mc_reg_table,
+ .is_dpm_running = tonga_is_dpm_running,
};
int tonga_smum_init(struct pp_smumgr *smumgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 33c788d7f05c..8c4f761d5bc8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -24,30 +24,36 @@
#ifndef _TONGA_SMUMGR_H_
#define _TONGA_SMUMGR_H_
-struct tonga_buffer_entry {
- uint32_t data_size;
- uint32_t mc_addr_low;
- uint32_t mc_addr_high;
- void *kaddr;
- unsigned long handle;
+#include "smu72_discrete.h"
+
+#include "smu7_smumgr.h"
+
+struct tonga_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct tonga_mc_reg_table {
+ uint8_t last; /* number of registers*/
+ uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/
+ uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/
+ struct tonga_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU72_Discrete_MCRegisterAddress mc_reg_address[SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE];
};
+
struct tonga_smumgr {
- uint8_t *pHeader;
- uint8_t *pMecImage;
- uint32_t ulSoftRegsStart;
- struct tonga_buffer_entry header_buffer;
- struct tonga_buffer_entry smu_buffer;
-};
+ struct smu7_smumgr smu7_data;
+ struct SMU72_Discrete_DpmTable smc_state_table;
+ struct SMU72_Discrete_Ulv ulv_setting;
+ struct SMU72_Discrete_PmFuses power_tune_table;
+ const struct tonga_pt_defaults *power_tune_defaults;
+ SMU72_Discrete_MCRegisters mc_regs;
+ struct tonga_mc_reg_table mc_reg_table;
-extern int tonga_smum_init(struct pp_smumgr *smumgr);
-extern int tonga_copy_bytes_to_smc(struct pp_smumgr *smumgr,
- uint32_t smcStartAddress, const uint8_t *src,
- uint32_t byteCount, uint32_t limit);
-extern int tonga_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t *value, uint32_t limit);
-extern int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smcAddress,
- uint32_t value, uint32_t limit);
+ uint32_t activity_target[SMU72_MAX_LEVELS_GRAPHICS];
+
+};
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 963a24d46a93..ffe1f85ce300 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
-struct kmem_cache *sched_fence_slab;
-atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
-
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
- if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
- sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!sched_fence_slab)
- return -ENOMEM;
- }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,6 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
- if (atomic_dec_and_test(&sched_fence_slab_ref))
- kmem_cache_destroy(sched_fence_slab);
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..51068e6c3d9a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,9 +30,6 @@
struct amd_gpu_scheduler;
struct amd_sched_rq;
-extern struct kmem_cache *sched_fence_slab;
-extern atomic_t sched_fence_slab_ref;
-
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+int amd_sched_fence_slab_init(void);
+void amd_sched_fence_slab_fini(void);
+
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 6b63beaf7574..88fc2d662579 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+static struct kmem_cache *sched_fence_slab;
+
+int amd_sched_fence_slab_init(void)
+{
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amd_sched_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(sched_fence_slab);
+}
+
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner)
{
@@ -103,7 +122,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
}
/**
- * amd_sched_fence_release - callback that fence can be freed
+ * amd_sched_fence_release_scheduled - callback that fence can be freed
*
* @fence: fence
*
@@ -118,7 +137,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f)
}
/**
- * amd_sched_fence_release_scheduled - drop extra reference
+ * amd_sched_fence_release_finished - drop extra reference
*
* @f: fence
*
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index ee0a61c2861b..7130b044b004 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -183,8 +183,6 @@ static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
- .prepare_fb = NULL,
- .cleanup_fb = NULL,
.atomic_update = arc_pgu_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 6d4ff34737cb..28e6471257d0 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -198,8 +198,8 @@ static int arcpgu_probe(struct platform_device *pdev)
int ret;
drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
ret = arcpgu_load(drm);
if (ret)
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b7a8b2ac4055..b69c66b4897e 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -14,170 +14,45 @@
*
*/
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder_slave.h>
-#include <drm/drm_atomic_helper.h>
#include "arcpgu.h"
-struct arcpgu_drm_connector {
- struct drm_connector connector;
- struct drm_encoder_slave *encoder_slave;
-};
-
-static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
-{
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_get_modes: cannot find slave encoder for connector\n");
- return 0;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs->get_modes == NULL)
- return 0;
-
- return sfuncs->get_modes(&slave->base, connector);
-}
-
-static enum drm_connector_status
-arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
-{
- enum drm_connector_status status = connector_status_unknown;
- const struct drm_encoder_slave_funcs *sfuncs;
- struct drm_encoder_slave *slave;
-
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_detect: cannot find slave encoder for connector\n");
- return status;
- }
-
- sfuncs = slave->slave_funcs;
- if (sfuncs && sfuncs->detect)
- return sfuncs->detect(&slave->base, connector);
-
- dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
- return status;
-}
-
-static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_helper_funcs
-arcpgu_drm_connector_helper_funcs = {
- .get_modes = arcpgu_drm_connector_get_modes,
-};
-
-static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = arcpgu_drm_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = arcpgu_drm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
- .dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
- .mode_set = drm_i2c_encoder_mode_set,
- .prepare = drm_i2c_encoder_prepare,
- .commit = drm_i2c_encoder_commit,
- .detect = drm_i2c_encoder_detect,
-};
-
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
{
- struct arcpgu_drm_connector *arcpgu_connector;
- struct drm_i2c_encoder_driver *driver;
- struct drm_encoder_slave *encoder;
- struct drm_connector *connector;
- struct i2c_client *i2c_slave;
- int ret;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ int ret = 0;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_err(drm->dev, "failed to find i2c slave encoder\n");
- return -EPROBE_DEFER;
- }
-
- if (i2c_slave->dev.driver == NULL) {
- dev_err(drm->dev, "failed to find i2c slave driver\n");
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
- driver =
- to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
- ret = driver->encoder_init(i2c_slave, drm, encoder);
- if (ret) {
- dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
- return ret;
- }
-
- encoder->base.possible_crtcs = 1;
- encoder->base.possible_clones = 0;
- ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ encoder->possible_crtcs = 1;
+ encoder->possible_clones = 0;
+ ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
return ret;
- drm_encoder_helper_add(&encoder->base,
- &arcpgu_drm_encoder_helper_funcs);
-
- arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
- GFP_KERNEL);
- if (!arcpgu_connector) {
- ret = -ENOMEM;
- goto error_encoder_cleanup;
- }
-
- connector = &arcpgu_connector->connector;
- drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
- ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
- dev_err(drm->dev, "failed to initialize drm connector\n");
- goto error_encoder_cleanup;
- }
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+ encoder->bridge = bridge;
- ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
- if (ret < 0) {
- dev_err(drm->dev, "could not attach connector to encoder\n");
- drm_connector_unregister(connector);
- goto error_connector_cleanup;
- }
-
- arcpgu_connector->encoder_slave = encoder;
-
- return 0;
-
-error_connector_cleanup:
- drm_connector_cleanup(connector);
+ ret = drm_bridge_attach(drm, bridge);
+ if (ret)
+ drm_encoder_cleanup(encoder);
-error_encoder_cleanup:
- drm_encoder_cleanup(&encoder->base);
return ret;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index d83b46a30327..fb6a418ce6be 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -326,8 +326,8 @@ static int hdlcd_drm_bind(struct device *dev)
return -ENOMEM;
drm = drm_dev_alloc(&hdlcd_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
drm->dev_private = hdlcd;
dev_set_drvdata(dev, drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 82171d223f2d..9280358b8f15 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -91,7 +91,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, true);
+ drm_atomic_helper_commit_planes(drm, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
malidp_atomic_commit_hw_done(state);
@@ -310,8 +311,8 @@ static int malidp_bind(struct device *dev)
return ret;
drm = drm_dev_alloc(&malidp_driver, dev);
- if (!drm) {
- ret = -ENOMEM;
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
goto alloc_fail;
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 95558fde214b..271d2fb9711c 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -49,6 +49,6 @@ void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
/* often used combination of rotational bits */
-#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+#define MALIDP_ROTATED_MASK (DRM_ROTATE_90 | DRM_ROTATE_270)
#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 725098d6179a..82c193e5e0d6 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -108,7 +108,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return -EINVAL;
/* packed RGB888 / BGR888 can't be rotated or flipped */
- if (state->rotation != BIT(DRM_ROTATE_0) &&
+ if (state->rotation != DRM_ROTATE_0 &&
(state->fb->pixel_format == DRM_FORMAT_RGB888 ||
state->fb->pixel_format == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -188,9 +188,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
/* setup the rotation and axis flip bits */
if (plane->state->rotation & DRM_ROTATE_MASK)
val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
- if (plane->state->rotation & BIT(DRM_REFLECT_X))
+ if (plane->state->rotation & DRM_REFLECT_X)
val |= LAYER_V_FLIP;
- if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+ if (plane->state->rotation & DRM_REFLECT_Y)
val |= LAYER_H_FLIP;
/* set the 'enable layer' bit */
@@ -255,12 +255,12 @@ int malidp_de_planes_init(struct drm_device *drm)
goto cleanup;
if (!drm->mode_config.rotation_property) {
- unsigned long flags = BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_180) |
- BIT(DRM_ROTATE_270) |
- BIT(DRM_REFLECT_X) |
- BIT(DRM_REFLECT_Y);
+ unsigned long flags = DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270 |
+ DRM_REFLECT_X |
+ DRM_REFLECT_Y;
drm->mode_config.rotation_property =
drm_mode_create_rotation_property(drm, flags);
}
@@ -268,7 +268,7 @@ int malidp_de_planes_init(struct drm_device *drm)
if (drm->mode_config.rotation_property && (id != DE_SMART))
drm_object_attach_property(&plane->base.base,
drm->mode_config.rotation_property,
- BIT(DRM_ROTATE_0));
+ DRM_ROTATE_0);
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- armada_drm_crtc_update(dcrtc);
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
- clk_disable_unprepare(dcrtc->clk);
+ if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
if (dpms_blanked(dpms))
armada_drm_vblank_off(dcrtc);
- else
+ else if (!IS_ERR(dcrtc->clk))
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (!dpms_blanked(dpms))
drm_crtc_vblank_on(&dcrtc->crtc);
+ else if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+ } else if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
}
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index f5ebdd681445..1e0e68f608e4 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -211,7 +211,7 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ DRIVER_PRIME,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7d03c51abcb9..ca73ad8614fe 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -7,7 +7,6 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
-#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index cb8f0347b934..806791897304 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -387,7 +387,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (!access_ok(VERIFY_READ, ptr, args->size))
return -EFAULT;
- ret = fault_in_multipages_readable(ptr, args->size);
+ ret = fault_in_pages_readable(ptr, args->size);
if (ret)
return ret;
@@ -547,7 +547,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
exp_info.flags = O_RDWR;
exp_info.priv = obj;
- return dma_buf_export(&exp_info);
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 1ee707ef6b8d..152b4e716269 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,7 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
- BIT(DRM_ROTATE_0),
+ DRM_ROTATE_0,
0, INT_MAX, true, false, &visible);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index c017a9330a18..7a86e24e2687 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -33,7 +33,6 @@
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index b29a41218fc9..0743e65cb240 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -150,7 +150,8 @@ static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct ast_bo *astbo = ast_bo(bo);
- return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&astbo->gem.vma_node,
+ filp->private_data);
}
static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -266,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -274,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index d4a3d61b7b06..5f484310bee9 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -457,7 +457,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, false);
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -797,8 +797,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
int ret;
ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
- if (!ddev)
- return -ENOMEM;
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
ret = atmel_hlcdc_dc_load(ddev);
if (ret)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 52c527f6642a..9d4c030672f0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
- (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
+ (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+ if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@@ -677,7 +677,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
return -EINVAL;
switch (state->base.rotation & DRM_ROTATE_MASK) {
- case BIT(DRM_ROTATE_90):
+ case DRM_ROTATE_90:
offset = ((y_offset + state->src_y + patched_src_w - 1) /
ydiv) * fb->pitches[i];
offset += ((x_offset + state->src_x) / xdiv) *
@@ -686,7 +686,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
fb->pitches[i];
state->pstride[i] = -fb->pitches[i] - state->bpp[i];
break;
- case BIT(DRM_ROTATE_180):
+ case DRM_ROTATE_180:
offset = ((y_offset + state->src_y + patched_src_h - 1) /
ydiv) * fb->pitches[i];
offset += ((x_offset + state->src_x + patched_src_w - 1) /
@@ -695,7 +695,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
state->bpp[i]) - fb->pitches[i];
state->pstride[i] = -2 * state->bpp[i];
break;
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_270:
offset = ((y_offset + state->src_y) / ydiv) *
fb->pitches[i];
offset += ((x_offset + state->src_x + patched_src_h - 1) /
@@ -705,7 +705,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
(2 * state->bpp[i]);
state->pstride[i] = fb->pitches[i] - state->bpp[i];
break;
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
default:
offset = ((y_offset + state->src_y) / ydiv) *
fb->pitches[i];
@@ -755,7 +755,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
}
static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
- const struct drm_plane_state *new_state)
+ struct drm_plane_state *new_state)
{
/*
* FIXME: we should avoid this const -> non-const cast but it's
@@ -780,7 +780,7 @@ static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
}
static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
- const struct drm_plane_state *old_state)
+ struct drm_plane_state *old_state)
{
/*
* FIXME: we should avoid this const -> non-const cast but it's
@@ -905,7 +905,7 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
if (desc->layout.xstride && desc->layout.pstride)
drm_object_attach_property(&plane->base.base,
plane->base.dev->mode_config.rotation_property,
- BIT(DRM_ROTATE_0));
+ DRM_ROTATE_0);
if (desc->layout.csc) {
/*
@@ -1056,10 +1056,10 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_180) |
- BIT(DRM_ROTATE_270));
+ DRM_ROTATE_0 |
+ DRM_ROTATE_90 |
+ DRM_ROTATE_180 |
+ DRM_ROTATE_270);
if (!dev->mode_config.rotation_property)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 19b5adaebe24..32dfe418cc98 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,5 +1,4 @@
#include <linux/io.h>
-#include <linux/fb.h>
#include <linux/console.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index abace82de6ea..534227df23f3 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <drm/drm_fb_helper.h>
#include "bochs.h"
@@ -153,7 +154,7 @@ static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
- remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
kfree(ap);
return 0;
@@ -162,8 +163,15 @@ static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
static int bochs_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ unsigned long fbsize;
int ret;
+ fbsize = pci_resource_len(pdev, 0);
+ if (fbsize < 4 * 1024 * 1024) {
+ DRM_ERROR("less than 4 MB video memory, ignoring device\n");
+ return -ENOMEM;
+ }
+
ret = bochs_kick_out_firmware_fb(pdev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 207a2cbcc113..0b4e5d117043 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -178,7 +178,7 @@ static void bochs_encoder_init(struct drm_device *dev)
}
-int bochs_connector_get_modes(struct drm_connector *connector)
+static int bochs_connector_get_modes(struct drm_connector *connector)
{
int count;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 5c5638a777a1..269cfca9ca06 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -128,7 +128,8 @@ static int bochs_bo_verify_access(struct ttm_buffer_object *bo,
{
struct bochs_bo *bochsbo = bochs_bo(bo);
- return drm_vma_node_verify_access(&bochsbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&bochsbo->gem.vma_node,
+ filp->private_data);
}
static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b590e678052d..10e12e74fc9f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -17,6 +17,13 @@ config DRM_ANALOGIX_ANX78XX
the HDMI output of an application processor to MyDP
or DisplayPort.
+config DRM_DUMB_VGA_DAC
+ tristate "Dumb VGA DAC Bridge support"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Support for RGB to VGA DAC based bridges
+
config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index efdb07e878f5..cdf3a3cf765d 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,7 @@
ccflags-y := -Iinclude/drm
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
+obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec8fb2ed3275..8ed3906dd411 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -922,15 +922,13 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
+ unsigned int main_i2c_addr = i2c->addr << 1;
+ unsigned int edid_i2c_addr = main_i2c_addr + 4;
unsigned int val;
int ret;
@@ -991,8 +989,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
- packet_i2c_addr);
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ main_i2c_addr - 0xa);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR,
+ main_i2c_addr - 2);
+
adv7511_packet_disable(adv7511, 0xffff);
adv7511->i2c_main = i2c;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 5eebd15899b1..d7f7b7ce8ebe 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -149,13 +149,12 @@ void adv7533_uninit_cec(struct adv7511 *adv)
i2c_unregister_device(adv->i2c_cec);
}
-static const int cec_i2c_addr = 0x78;
-
int adv7533_init_cec(struct adv7511 *adv)
{
int ret;
- adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+ adv->i2c_main->addr - 1);
if (!adv->i2c_cec)
return -ENOMEM;
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index f9f03bcba0af..a2a82366a771 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1001,16 +1001,11 @@ static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
return connector_status_connected;
}
-static void anx78xx_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs anx78xx_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = anx78xx_detect,
- .destroy = anx78xx_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 32715daf73cb..6e0447f329a2 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -31,6 +31,7 @@
#include <drm/bridge/analogix_dp.h>
#include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
#define to_dp(nm) container_of(nm, struct analogix_dp_device, nm)
@@ -97,133 +98,89 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
return 0;
}
-static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
+int analogix_dp_psr_supported(struct device *dev)
{
- int i;
- unsigned char sum = 0;
-
- for (i = 0; i < EDID_BLOCK_LENGTH; i++)
- sum = sum + edid_data[i];
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
- return sum;
+ return dp->psr_support;
}
+EXPORT_SYMBOL_GPL(analogix_dp_psr_supported);
-static int analogix_dp_read_edid(struct analogix_dp_device *dp)
+int analogix_dp_enable_psr(struct device *dev)
{
- unsigned char *edid = dp->edid;
- unsigned int extend_block = 0;
- unsigned char sum;
- unsigned char test_vector;
- int retval;
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
+ struct edp_vsc_psr psr_vsc;
- /*
- * EDID device address is 0x50.
- * However, if necessary, you must have set upper address
- * into E-EDID in I2C device, 0x30.
- */
+ if (!dp->psr_support)
+ return -EINVAL;
- /* Read Extension Flag, Number of 128-byte EDID extension blocks */
- retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
- EDID_EXTENSION_FLAG,
- &extend_block);
- if (retval)
- return retval;
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
- if (extend_block > 0) {
- dev_dbg(dp->dev, "EDID data includes a single extension!\n");
-
- /* Read EDID data */
- retval = analogix_dp_read_bytes_from_i2c(dp,
- I2C_EDID_DEVICE_ADDR,
- EDID_HEADER_PATTERN,
- EDID_BLOCK_LENGTH,
- &edid[EDID_HEADER_PATTERN]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = analogix_dp_calc_edid_check_sum(edid);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
+ psr_vsc.DB0 = 0;
+ psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
- /* Read additional EDID data */
- retval = analogix_dp_read_bytes_from_i2c(dp,
- I2C_EDID_DEVICE_ADDR,
- EDID_BLOCK_LENGTH,
- EDID_BLOCK_LENGTH,
- &edid[EDID_BLOCK_LENGTH]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
+ analogix_dp_send_psr_spd(dp, &psr_vsc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
- analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
- &test_vector);
- if (test_vector & DP_TEST_LINK_EDID_READ) {
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TEST_EDID_CHECKSUM,
- edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TEST_RESPONSE,
- DP_TEST_EDID_CHECKSUM_WRITE);
- }
- } else {
- dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
- /* Read EDID data */
- retval = analogix_dp_read_bytes_from_i2c(dp,
- I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
- EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = analogix_dp_calc_edid_check_sum(edid);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
+int analogix_dp_disable_psr(struct device *dev)
+{
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
+ struct edp_vsc_psr psr_vsc;
- analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
- &test_vector);
- if (test_vector & DP_TEST_LINK_EDID_READ) {
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
- }
- }
+ if (!dp->psr_support)
+ return -EINVAL;
+
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+
+ psr_vsc.DB0 = 0;
+ psr_vsc.DB1 = 0;
- dev_dbg(dp->dev, "EDID Read success!\n");
+ analogix_dp_send_psr_spd(dp, &psr_vsc);
return 0;
}
+EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
-static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
{
- u8 buf[12];
- int i;
- int retval;
+ unsigned char psr_version;
- /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
- retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
- if (retval)
- return retval;
+ drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
+ dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
- /* Read EDID */
- for (i = 0; i < 3; i++) {
- retval = analogix_dp_read_edid(dp);
- if (!retval)
- break;
- }
+ return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
+}
- return retval;
+static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
+{
+ unsigned char psr_en;
+
+ /* Disable psr function */
+ drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en);
+ psr_en &= ~DP_PSR_ENABLE;
+ drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+ /* Main-Link transmitter remains active during PSR active states */
+ psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+ drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+ /* Enable psr function */
+ psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
+ DP_PSR_CRC_VERIFICATION;
+ drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
+
+ analogix_dp_enable_psr_crc(dp);
}
static void
@@ -232,15 +189,15 @@ analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
{
u8 data;
- analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+ drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data);
if (enable)
- analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
- DP_LANE_COUNT_ENHANCED_FRAME_EN |
- DPCD_LANE_COUNT_SET(data));
+ drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+ DP_LANE_COUNT_ENHANCED_FRAME_EN |
+ DPCD_LANE_COUNT_SET(data));
else
- analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
- DPCD_LANE_COUNT_SET(data));
+ drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET,
+ DPCD_LANE_COUNT_SET(data));
}
static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
@@ -248,7 +205,7 @@ static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
u8 data;
int retval;
- analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
retval = DPCD_ENHANCED_FRAME_CAP(data);
return retval;
@@ -267,8 +224,8 @@ static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
{
analogix_dp_set_training_pattern(dp, DP_NONE);
- analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
- DP_TRAINING_PATTERN_DISABLE);
+ drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
}
static void
@@ -313,8 +270,8 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
/* Setup RX configuration */
buf[0] = dp->link_train.link_rate;
buf[1] = dp->link_train.lane_count;
- retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
- if (retval)
+ retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2);
+ if (retval < 0)
return retval;
/* Set TX pre-emphasis to minimum */
@@ -338,20 +295,22 @@ static int analogix_dp_link_start(struct analogix_dp_device *dp)
analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
/* Set RX training pattern */
- retval = analogix_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
- if (retval)
+ retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_1);
+ if (retval < 0)
return retval;
for (lane = 0; lane < lane_count; lane++)
buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
- retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
- lane_count, buf);
+ retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf,
+ lane_count);
+ if (retval < 0)
+ return retval;
- return retval;
+ return 0;
}
static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
@@ -503,25 +462,23 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
lane_count = dp->link_train.lane_count;
- retval = analogix_dp_read_bytes_from_dpcd(dp,
- DP_LANE0_1_STATUS, 2, link_status);
- if (retval)
+ retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+ if (retval < 0)
return retval;
- retval = analogix_dp_read_bytes_from_dpcd(dp,
- DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
- if (retval)
+ retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ adjust_request, 2);
+ if (retval < 0)
return retval;
if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
/* set training pattern 2 for EQ */
analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
- retval = analogix_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_2);
- if (retval)
+ retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+ if (retval < 0)
return retval;
dev_info(dp->dev, "Link Training Clock Recovery success\n");
@@ -559,13 +516,12 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
analogix_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
- retval = analogix_dp_write_bytes_to_dpcd(dp,
- DP_TRAINING_LANE0_SET, lane_count,
- dp->link_train.training_lane);
- if (retval)
+ retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->link_train.training_lane, lane_count);
+ if (retval < 0)
return retval;
- return retval;
+ return 0;
}
static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
@@ -578,9 +534,8 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
lane_count = dp->link_train.lane_count;
- retval = analogix_dp_read_bytes_from_dpcd(dp,
- DP_LANE0_1_STATUS, 2, link_status);
- if (retval)
+ retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2);
+ if (retval < 0)
return retval;
if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
@@ -588,14 +543,14 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
return -EIO;
}
- retval = analogix_dp_read_bytes_from_dpcd(dp,
- DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
- if (retval)
+ retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ adjust_request, 2);
+ if (retval < 0)
return retval;
- retval = analogix_dp_read_byte_from_dpcd(dp,
- DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
- if (retval)
+ retval = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+ &link_align);
+ if (retval < 0)
return retval;
analogix_dp_get_adjust_training_lane(dp, adjust_request);
@@ -636,10 +591,12 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
analogix_dp_set_lane_link_training(dp,
dp->link_train.training_lane[lane], lane);
- retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
- lane_count, dp->link_train.training_lane);
+ retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->link_train.training_lane, lane_count);
+ if (retval < 0)
+ return retval;
- return retval;
+ return 0;
}
static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
@@ -653,7 +610,7 @@ static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
* For DP rev.1.2, Maximum link rate of Main Link lanes
* 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
*/
- analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+ drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data);
*bandwidth = data;
}
@@ -666,7 +623,7 @@ static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
* For DP rev.1.1, Maximum number of Main Link lanes
* 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
*/
- analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data);
*lane_count = DPCD_MAX_LANE_COUNT(data);
}
@@ -835,19 +792,15 @@ static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
if (enable) {
analogix_dp_enable_scrambling(dp);
- analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
- &data);
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+ drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+ drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
} else {
analogix_dp_disable_scrambling(dp);
- analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
- &data);
- analogix_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+ drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data);
+ drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
}
}
@@ -921,21 +874,85 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
/* Enable video */
analogix_dp_start_video(dp);
+
+ dp->psr_support = analogix_dp_detect_sink_psr(dp);
+ if (dp->psr_support)
+ analogix_dp_enable_sink_psr(dp);
}
-int analogix_dp_get_modes(struct drm_connector *connector)
+/*
+ * This function is a bit of a catch-all for panel preparation, hopefully
+ * simplifying the logic of functions that need to prepare/unprepare the panel
+ * below.
+ *
+ * If @prepare is true, this function will prepare the panel. Conversely, if it
+ * is false, the panel will be unprepared.
+ *
+ * If @is_modeset_prepare is true, the function will disregard the current state
+ * of the panel and either prepare/unprepare the panel based on @prepare. Once
+ * it finishes, it will update dp->panel_is_modeset to reflect the current state
+ * of the panel.
+ */
+static int analogix_dp_prepare_panel(struct analogix_dp_device *dp,
+ bool prepare, bool is_modeset_prepare)
{
- struct analogix_dp_device *dp = to_dp(connector);
- struct edid *edid = (struct edid *)dp->edid;
- int num_modes = 0;
+ int ret = 0;
- if (analogix_dp_handle_edid(dp) == 0) {
- drm_mode_connector_update_edid_property(&dp->connector, edid);
- num_modes += drm_add_edid_modes(&dp->connector, edid);
- }
+ if (!dp->plat_data->panel)
+ return 0;
- if (dp->plat_data->panel)
+ mutex_lock(&dp->panel_lock);
+
+ /*
+ * Exit early if this is a temporary prepare/unprepare and we're already
+ * modeset (since we neither want to prepare twice or unprepare early).
+ */
+ if (dp->panel_is_modeset && !is_modeset_prepare)
+ goto out;
+
+ if (prepare)
+ ret = drm_panel_prepare(dp->plat_data->panel);
+ else
+ ret = drm_panel_unprepare(dp->plat_data->panel);
+
+ if (ret)
+ goto out;
+
+ if (is_modeset_prepare)
+ dp->panel_is_modeset = prepare;
+
+out:
+ mutex_unlock(&dp->panel_lock);
+ return ret;
+}
+
+static int analogix_dp_get_modes(struct drm_connector *connector)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+ struct edid *edid;
+ int ret, num_modes = 0;
+
+ if (dp->plat_data->panel) {
num_modes += drm_panel_get_modes(dp->plat_data->panel);
+ } else {
+ ret = analogix_dp_prepare_panel(dp, true, false);
+ if (ret) {
+ DRM_ERROR("Failed to prepare panel (%d)\n", ret);
+ return 0;
+ }
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&dp->connector,
+ edid);
+ num_modes += drm_add_edid_modes(&dp->connector, edid);
+ kfree(edid);
+ }
+
+ ret = analogix_dp_prepare_panel(dp, false, false);
+ if (ret)
+ DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+ }
if (dp->plat_data->get_modes)
num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
@@ -956,29 +973,37 @@ static const struct drm_connector_helper_funcs analogix_dp_connector_helper_func
.best_encoder = analogix_dp_best_encoder,
};
-enum drm_connector_status
+static enum drm_connector_status
analogix_dp_detect(struct drm_connector *connector, bool force)
{
struct analogix_dp_device *dp = to_dp(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+ int ret;
- if (analogix_dp_detect_hpd(dp))
+ if (dp->plat_data->panel)
+ return connector_status_connected;
+
+ ret = analogix_dp_prepare_panel(dp, true, false);
+ if (ret) {
+ DRM_ERROR("Failed to prepare panel (%d)\n", ret);
return connector_status_disconnected;
+ }
- return connector_status_connected;
-}
+ if (!analogix_dp_detect_hpd(dp))
+ status = connector_status_connected;
-static void analogix_dp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
+ ret = analogix_dp_prepare_panel(dp, false, false);
+ if (ret)
+ DRM_ERROR("Failed to unprepare panel (%d)\n", ret);
+ return status;
}
static const struct drm_connector_funcs analogix_dp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = analogix_dp_detect,
- .destroy = analogix_dp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -1035,6 +1060,16 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
return 0;
}
+static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ int ret;
+
+ ret = analogix_dp_prepare_panel(dp, true, true);
+ if (ret)
+ DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+}
+
static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
@@ -1058,6 +1093,7 @@ static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
{
struct analogix_dp_device *dp = bridge->driver_private;
+ int ret;
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@@ -1077,6 +1113,10 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
pm_runtime_put_sync(dp->dev);
+ ret = analogix_dp_prepare_panel(dp, false, true);
+ if (ret)
+ DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
@@ -1165,9 +1205,9 @@ static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .pre_enable = analogix_dp_bridge_pre_enable,
.enable = analogix_dp_bridge_enable,
.disable = analogix_dp_bridge_disable,
- .pre_enable = analogix_dp_bridge_nop,
.post_disable = analogix_dp_bridge_nop,
.mode_set = analogix_dp_bridge_mode_set,
.attach = analogix_dp_bridge_attach,
@@ -1231,6 +1271,14 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
return 0;
}
+static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct analogix_dp_device *dp = to_dp(aux);
+
+ return analogix_dp_transfer(dp, msg);
+}
+
int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
struct analogix_dp_plat_data *plat_data)
{
@@ -1254,6 +1302,9 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
+ mutex_init(&dp->panel_lock);
+ dp->panel_is_modeset = false;
+
/*
* platform dp driver need containor_of the plat_data to get
* the driver private data, so we need to store the point of
@@ -1333,13 +1384,6 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
phy_power_on(dp->phy);
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
analogix_dp_init_dp(dp);
ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
@@ -1355,6 +1399,14 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = analogix_dpaux_transfer;
+ dp->aux.dev = &pdev->dev;
+
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret)
+ goto err_disable_pm_runtime;
+
ret = analogix_dp_create_bridge(drm_dev, dp);
if (ret) {
DRM_ERROR("failed to create bridge (%d)\n", ret);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index b45638043ec4..5c6a28806129 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -20,15 +20,6 @@
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR 0x50
-#define I2C_E_EDID_DEVICE_ADDR 0x30
-
-#define EDID_BLOCK_LENGTH 0x80
-#define EDID_HEADER_PATTERN 0x00
-#define EDID_EXTENSION_FLAG 0x7e
-#define EDID_CHECKSUM 0x7f
-
/* DP_MAX_LANE_COUNT */
#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
@@ -166,6 +157,7 @@ struct analogix_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_bridge *bridge;
+ struct drm_dp_aux aux;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -176,7 +168,10 @@ struct analogix_dp_device {
int dpms_mode;
int hpd_gpio;
bool force_hpd;
- unsigned char edid[EDID_BLOCK_LENGTH * 2];
+ bool psr_support;
+
+ struct mutex panel_lock;
+ bool panel_is_modeset;
struct analogix_dp_plat_data *plat_data;
};
@@ -206,33 +201,6 @@ void analogix_dp_reset_aux(struct analogix_dp_device *dp);
void analogix_dp_init_aux(struct analogix_dp_device *dp);
int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
-int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned char data);
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned char *data);
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[]);
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[]);
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr);
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int *data);
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char edid[]);
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
@@ -278,4 +246,10 @@ int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp);
void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc);
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ struct drm_dp_aux_msg *msg);
+
#endif /* _ANALOGIX_DP_CORE_H */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 48030f0cf497..cd37ac058675 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -585,330 +585,6 @@ int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
return retval;
}
-int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned char *data)
-{
- u32 reg;
- int i;
- int retval;
-
- for (i = 0; i < 3; i++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
- /*
- * Set DisplayPort transaction and read 1 byte
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
-
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
- }
-
- /* Read data buffer */
- reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
- *data = (unsigned char)(reg & 0xff);
-
- return retval;
-}
-
-int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[])
-{
- u32 reg;
- unsigned int start_offset;
- unsigned int cur_data_count;
- unsigned int cur_data_idx;
- int i;
- int retval = 0;
-
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
- start_offset = 0;
- while (start_offset < count) {
- /* Buffer size of AUX CH is 16 * 4bytes */
- if ((count - start_offset) > 16)
- cur_data_count = 16;
- else
- cur_data_count = count - start_offset;
-
- for (i = 0; i < 3; i++) {
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
- for (cur_data_idx = 0; cur_data_idx < cur_data_count;
- cur_data_idx++) {
- reg = data[start_offset + cur_data_idx];
- writel(reg, dp->reg_base +
- ANALOGIX_DP_BUF_DATA_0 +
- 4 * cur_data_idx);
- }
-
- /*
- * Set DisplayPort transaction and write
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(cur_data_count) |
- AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
-
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- start_offset += cur_data_count;
- }
-
- return retval;
-}
-
-int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[])
-{
- u32 reg;
- unsigned int start_offset;
- unsigned int cur_data_count;
- unsigned int cur_data_idx;
- int i;
- int retval = 0;
-
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
- start_offset = 0;
- while (start_offset < count) {
- /* Buffer size of AUX CH is 16 * 4bytes */
- if ((count - start_offset) > 16)
- cur_data_count = 16;
- else
- cur_data_count = count - start_offset;
-
- /* AUX CH Request Transaction process */
- for (i = 0; i < 3; i++) {
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr + start_offset);
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
- /*
- * Set DisplayPort transaction and read
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(cur_data_count) |
- AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
-
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- for (cur_data_idx = 0; cur_data_idx < cur_data_count;
- cur_data_idx++) {
- reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
- + 4 * cur_data_idx);
- data[start_offset + cur_data_idx] =
- (unsigned char)reg;
- }
-
- start_offset += cur_data_count;
- }
-
- return retval;
-}
-
-int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr)
-{
- u32 reg;
- int retval;
-
- /* Set EDID device address */
- reg = device_addr;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
- writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
- writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
-
- /* Set offset from base address of EDID device */
- writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
- /*
- * Set I2C transaction and write address
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
- AUX_TX_COMM_WRITE;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval != 0)
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
- return retval;
-}
-
-int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int *data)
-{
- u32 reg;
- int i;
- int retval;
-
- for (i = 0; i < 3; i++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
- /* Select EDID device */
- retval = analogix_dp_select_i2c_device(dp, device_addr,
- reg_addr);
- if (retval != 0)
- continue;
-
- /*
- * Set I2C transaction and read data
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_I2C_TRANSACTION |
- AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
-
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
- }
-
- /* Read data */
- if (retval == 0)
- *data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
-
- return retval;
-}
-
-int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char edid[])
-{
- u32 reg;
- unsigned int i, j;
- unsigned int cur_data_idx;
- unsigned int defer = 0;
- int retval = 0;
-
- for (i = 0; i < count; i += 16) {
- for (j = 0; j < 3; j++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
-
- /* Set normal AUX CH command */
- reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
- reg &= ~ADDR_ONLY;
- writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
-
- /*
- * If Rx sends defer, Tx sends only reads
- * request without sending address
- */
- if (!defer)
- retval = analogix_dp_select_i2c_device(dp,
- device_addr, reg_addr + i);
- else
- defer = 0;
-
- if (retval == 0) {
- /*
- * Set I2C transaction and write data
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(16) |
- AUX_TX_COMM_I2C_TRANSACTION |
- AUX_TX_COMM_READ;
- writel(reg, dp->reg_base +
- ANALOGIX_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = analogix_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
-
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
- /* Check if Rx sends defer */
- reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
- if (reg == AUX_RX_COMM_AUX_DEFER ||
- reg == AUX_RX_COMM_I2C_DEFER) {
- dev_err(dp->dev, "Defer: %d\n\n", reg);
- defer = 1;
- }
- }
-
- for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
- reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
- + 4 * cur_data_idx);
- edid[i + cur_data_idx] = (unsigned char)reg;
- }
- }
-
- return retval;
-}
-
void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
{
u32 reg;
@@ -1073,34 +749,22 @@ void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
{
- u32 reg;
-
- reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
- return reg;
+ return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
{
- u32 reg;
-
- reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
- return reg;
+ return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
{
- u32 reg;
-
- reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
- return reg;
+ return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
}
u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
{
- u32 reg;
-
- reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
- return reg;
+ return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
}
void analogix_dp_reset_macro(struct analogix_dp_device *dp)
@@ -1322,3 +986,181 @@ void analogix_dp_disable_scrambling(struct analogix_dp_device *dp)
reg |= SCRAMBLING_DISABLE;
writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
}
+
+void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
+{
+ writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
+}
+
+void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc)
+{
+ unsigned int val;
+
+ /* don't send info frame */
+ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+ val &= ~IF_EN;
+ writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+ /* configure single frame update mode */
+ writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE,
+ dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL);
+
+ /* configure VSC HB0~HB3 */
+ writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0);
+ writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1);
+ writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2);
+ writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3);
+
+ /* configure reused VSC PB0~PB3, magic number from vendor */
+ writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0);
+ writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1);
+ writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2);
+ writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
+
+ /* configure DB0 / DB1 values */
+ writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
+ writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
+
+ /* set reuse spd inforframe */
+ val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+ val |= REUSE_SPD_EN;
+ writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+
+ /* mark info frame update */
+ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+ val = (val | IF_UP) & ~IF_EN;
+ writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+ /* send info frame */
+ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+ val |= IF_EN;
+ writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+}
+
+ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 reg;
+ u8 *buffer = msg->buffer;
+ int timeout_loop = 0;
+ unsigned int i;
+ int num_transferred = 0;
+
+ /* Buffer size of AUX CH is 16 bytes */
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_I2C_WRITE:
+ reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_I2C_TRANSACTION;
+ if (msg->request & DP_AUX_I2C_MOT)
+ reg |= AUX_TX_COMM_MOT;
+ break;
+
+ case DP_AUX_I2C_READ:
+ reg = AUX_TX_COMM_READ | AUX_TX_COMM_I2C_TRANSACTION;
+ if (msg->request & DP_AUX_I2C_MOT)
+ reg |= AUX_TX_COMM_MOT;
+ break;
+
+ case DP_AUX_NATIVE_WRITE:
+ reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_DP_TRANSACTION;
+ break;
+
+ case DP_AUX_NATIVE_READ:
+ reg = AUX_TX_COMM_READ | AUX_TX_COMM_DP_TRANSACTION;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ reg |= AUX_LENGTH(msg->size);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(msg->address);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(msg->address);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(msg->address);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ if (!(msg->request & DP_AUX_I2C_READ)) {
+ for (i = 0; i < msg->size; i++) {
+ reg = buffer[i];
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+ num_transferred++;
+ }
+ }
+
+ /* Enable AUX CH operation */
+ reg = AUX_EN;
+
+ /* Zero-sized messages specify address-only transactions. */
+ if (msg->size < 1)
+ reg |= ADDR_ONLY;
+
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+ /* Is AUX CH command reply received? */
+ /* TODO: Wait for an interrupt instead of looping? */
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ while (!(reg & RPLY_RECEIV)) {
+ timeout_loop++;
+ if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "AUX CH command reply failed!\n");
+ return -ETIMEDOUT;
+ }
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ usleep_range(10, 11);
+ }
+
+ /* Clear interrupt source for AUX CH command reply */
+ writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+ /* Clear interrupt source for AUX CH access error */
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ if (reg & AUX_ERR) {
+ writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+ return -EREMOTEIO;
+ }
+
+ /* Check AUX CH error access status */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+ if ((reg & AUX_STATUS_MASK)) {
+ dev_err(dp->dev, "AUX CH error happened: %d\n\n",
+ reg & AUX_STATUS_MASK);
+ return -EREMOTEIO;
+ }
+
+ if (msg->request & DP_AUX_I2C_READ) {
+ for (i = 0; i < msg->size; i++) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
+ 4 * i);
+ buffer[i] = (unsigned char)reg;
+ num_transferred++;
+ }
+ }
+
+ /* Check if Rx sends defer */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+ if (reg == AUX_RX_COMM_AUX_DEFER)
+ msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ else if (reg == AUX_RX_COMM_I2C_DEFER)
+ msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE ||
+ (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_READ)
+ msg->reply = DP_AUX_I2C_REPLY_ACK;
+ else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE ||
+ (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+
+ return num_transferred;
+}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index cdcc6c5add5e..40200c652533 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -22,6 +22,8 @@
#define ANALOGIX_DP_VIDEO_CTL_8 0x3C
#define ANALOGIX_DP_VIDEO_CTL_10 0x44
+#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0 0xD8
+
#define ANALOGIX_DP_PLL_REG_1 0xfc
#define ANALOGIX_DP_PLL_REG_2 0x9e4
#define ANALOGIX_DP_PLL_REG_3 0x9e8
@@ -30,6 +32,21 @@
#define ANALOGIX_DP_PD 0x12c
+#define ANALOGIX_DP_IF_TYPE 0x244
+#define ANALOGIX_DP_IF_PKT_DB1 0x254
+#define ANALOGIX_DP_IF_PKT_DB2 0x258
+#define ANALOGIX_DP_SPD_HB0 0x2F8
+#define ANALOGIX_DP_SPD_HB1 0x2FC
+#define ANALOGIX_DP_SPD_HB2 0x300
+#define ANALOGIX_DP_SPD_HB3 0x304
+#define ANALOGIX_DP_SPD_PB0 0x308
+#define ANALOGIX_DP_SPD_PB1 0x30C
+#define ANALOGIX_DP_SPD_PB2 0x310
+#define ANALOGIX_DP_SPD_PB3 0x314
+#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL 0x318
+#define ANALOGIX_DP_VSC_SHADOW_DB0 0x31C
+#define ANALOGIX_DP_VSC_SHADOW_DB1 0x320
+
#define ANALOGIX_DP_LANE_MAP 0x35C
#define ANALOGIX_DP_ANALOG_CTL_1 0x370
@@ -103,6 +120,8 @@
#define ANALOGIX_DP_SOC_GENERAL_CTL 0x800
+#define ANALOGIX_DP_CRC_CON 0x890
+
/* ANALOGIX_DP_TX_SW_RESET */
#define RESET_DP_TX (0x1 << 0)
@@ -151,6 +170,7 @@
#define VID_CHK_UPDATE_TYPE_SHIFT (4)
#define VID_CHK_UPDATE_TYPE_1 (0x1 << 4)
#define VID_CHK_UPDATE_TYPE_0 (0x0 << 4)
+#define REUSE_SPD_EN (0x1 << 3)
/* ANALOGIX_DP_VIDEO_CTL_8 */
#define VID_HRES_TH(x) (((x) & 0xf) << 4)
@@ -167,6 +187,12 @@
#define REF_CLK_27M (0x0 << 0)
#define REF_CLK_MASK (0x1 << 0)
+/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */
+#define PSR_FRAME_UP_TYPE_BURST (0x1 << 0)
+#define PSR_FRAME_UP_TYPE_SINGLE (0x0 << 0)
+#define PSR_CRC_SEL_HARDWARE (0x1 << 1)
+#define PSR_CRC_SEL_MANUALLY (0x0 << 1)
+
/* ANALOGIX_DP_LANE_MAP */
#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
#define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6)
@@ -376,4 +402,12 @@
#define VIDEO_MODE_SLAVE_MODE (0x1 << 0)
#define VIDEO_MODE_MASTER_MODE (0x0 << 0)
+/* ANALOGIX_DP_PKT_SEND_CTL */
+#define IF_UP (0x1 << 4)
+#define IF_EN (0x1 << 0)
+
+/* ANALOGIX_DP_CRC_CON */
+#define PSR_VID_CRC_FLUSH (0x1 << 2)
+#define PSR_VID_CRC_ENABLE (0x1 << 0)
+
#endif /* _ANALOGIX_DP_REG_H */
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
new file mode 100644
index 000000000000..afec232185a7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+struct dumb_vga {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct i2c_adapter *ddc;
+};
+
+static inline struct dumb_vga *
+drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dumb_vga, bridge);
+}
+
+static inline struct dumb_vga *
+drm_connector_to_dumb_vga(struct drm_connector *connector)
+{
+ return container_of(connector, struct dumb_vga, connector);
+}
+
+static int dumb_vga_get_modes(struct drm_connector *connector)
+{
+ struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
+ struct edid *edid;
+ int ret;
+
+ if (IS_ERR(vga->ddc))
+ goto fallback;
+
+ edid = drm_get_edid(connector, vga->ddc);
+ if (!edid) {
+ DRM_INFO("EDID readout failed, falling back to standard modes\n");
+ goto fallback;
+ }
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ return drm_add_edid_modes(connector, edid);
+
+fallback:
+ /*
+ * In case we cannot retrieve the EDIDs (broken or missing i2c
+ * bus), fallback on the XGA standards
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anyone can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
+ .get_modes = dumb_vga_get_modes,
+};
+
+static enum drm_connector_status
+dumb_vga_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
+
+ /*
+ * Even if we have an I2C bus, we can't assume that the cable
+ * is disconnected if drm_probe_ddc fails. Some cables don't
+ * wire the DDC pins, or the I2C bus might not be working at
+ * all.
+ */
+ if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs dumb_vga_con_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = dumb_vga_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dumb_vga_attach(struct drm_bridge *bridge)
+{
+ struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&vga->connector,
+ &dumb_vga_con_helper_funcs);
+ ret = drm_connector_init(bridge->dev, &vga->connector,
+ &dumb_vga_con_funcs, DRM_MODE_CONNECTOR_VGA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&vga->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
+ .attach = dumb_vga_attach,
+};
+
+static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
+{
+ struct device_node *end_node, *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ end_node = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
+ if (!end_node) {
+ dev_err(dev, "Missing connector endpoint\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ remote = of_graph_get_remote_port_parent(end_node);
+ of_node_put(end_node);
+ if (!remote) {
+ dev_err(dev, "Enable to parse remote node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
+static int dumb_vga_probe(struct platform_device *pdev)
+{
+ struct dumb_vga *vga;
+ int ret;
+
+ vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
+ if (!vga)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, vga);
+
+ vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
+ if (IS_ERR(vga->ddc)) {
+ if (PTR_ERR(vga->ddc) == -ENODEV) {
+ dev_dbg(&pdev->dev,
+ "No i2c bus specified. Disabling EDID readout\n");
+ } else {
+ dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+ return PTR_ERR(vga->ddc);
+ }
+ }
+
+ vga->bridge.funcs = &dumb_vga_bridge_funcs;
+ vga->bridge.of_node = pdev->dev.of_node;
+
+ ret = drm_bridge_add(&vga->bridge);
+ if (ret && !IS_ERR(vga->ddc))
+ i2c_put_adapter(vga->ddc);
+
+ return ret;
+}
+
+static int dumb_vga_remove(struct platform_device *pdev)
+{
+ struct dumb_vga *vga = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&vga->bridge);
+
+ if (!IS_ERR(vga->ddc))
+ i2c_put_adapter(vga->ddc);
+
+ return 0;
+}
+
+static const struct of_device_id dumb_vga_match[] = {
+ { .compatible = "dumb-vga-dac" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dumb_vga_match);
+
+static struct platform_driver dumb_vga_driver = {
+ .probe = dumb_vga_probe,
+ .remove = dumb_vga_remove,
+ .driver = {
+ .name = "dumb-vga-dac",
+ .of_match_table = dumb_vga_match,
+ },
+};
+module_platform_driver(dumb_vga_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
index 122bb015f4a9..8f2d1379c880 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c
@@ -640,7 +640,6 @@ static struct platform_driver snd_dw_hdmi_driver = {
.remove = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.pm = PM_OPS,
},
};
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 77ab47341658..ab7023e5dfde 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -940,10 +940,11 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
*/
/*
- * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
- * active aspect present in bit 6 rather than 4.
+ * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6,
+ * scan info in bits 4,5 rather than 0,1 and active aspect present in
+ * bit 6 rather than 4.
*/
- val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+ val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3);
if (frame.active_aspect & 15)
val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
if (frame.top_bar || frame.bottom_bar)
@@ -1476,12 +1477,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
return mode_status;
}
-static void dw_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -1498,7 +1493,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
- .destroy = dw_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.force = dw_hdmi_connector_force,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1812,9 +1807,6 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
- hdmi->connector.funcs->destroy(&hdmi->connector);
- hdmi->encoder->funcs->destroy(hdmi->encoder);
-
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
i2c_put_adapter(hdmi->ddc);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 93f3dacf9e27..f1a99938e924 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -245,16 +245,11 @@ static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
return connector_status_connected;
}
-static void ptn3460_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ptn3460_detect,
- .destroy = ptn3460_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 583b8ce614e3..6f7c2f9860d2 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -16,7 +16,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -484,16 +483,11 @@ static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
return connector_status_connected;
}
-static void ps8622_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs ps8622_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ps8622_detect,
- .destroy = ps8622_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index a09825d8c94a..44d476ea6d2e 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1165,17 +1165,11 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.best_encoder = tc_connector_best_encoder,
};
-static void tc_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs tc_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tc_connector_detect,
- .destroy = tc_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b05f7eae32ce..6c76d125995b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -57,7 +57,7 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
kfree(ap);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 3b5be7272357..daecf1ad76a4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -13,8 +13,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
-
#include "cirrus_drv.h"
static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1cc9ee607128..5e7e63ce7bce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -150,7 +150,8 @@ static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *fi
{
struct cirrus_bo *cirrusbo = cirrus_bo(bo);
- return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&cirrusbo->gem.vma_node,
+ filp->private_data);
}
static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -266,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -275,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -284,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 605bd243fb36..d621c8a4cf00 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -430,9 +430,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
* intact so it can still be used. It is safe to call this if AGP is disabled or
* was already removed.
*
- * If DRIVER_MODESET is active, nothing is done to protect the modesetting
- * resources from getting destroyed. Drivers are responsible of cleaning them up
- * during device shutdown.
+ * Cleanup is only done for drivers who have DRIVER_LEGACY set.
*/
void drm_legacy_agp_clear(struct drm_device *dev)
{
@@ -440,7 +438,7 @@ void drm_legacy_agp_clear(struct drm_device *dev)
if (!dev->agp)
return;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2a3ded44cf2a..e6862a744210 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
ssize_t expected_size,
bool *replaced)
{
- struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
+ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length)
+
+ if (expected_size > 0 && expected_size != new_blob->length) {
+ drm_property_unreference_blob(new_blob);
return -EINVAL;
+ }
}
drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ drm_property_unreference_blob(new_blob);
return 0;
}
@@ -837,8 +840,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
/* Check whether this plane supports the fb pixel format. */
ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
if (ret) {
- DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
- drm_get_format_name(state->fb->pixel_format));
+ char *format_name = drm_get_format_name(state->fb->pixel_format);
+ DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
+ kfree(format_name);
return ret;
}
@@ -1690,7 +1694,7 @@ retry:
goto out;
}
- prop = drm_property_find(dev, prop_id);
+ prop = drm_mode_obj_find_prop_id(obj, prop_id);
if (!prop) {
drm_mode_object_unreference(obj);
ret = -ENOENT;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 20be86d89a20..21f992605541 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -594,10 +594,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
- ret = drm_atomic_helper_normalize_zpos(dev, state);
- if (ret)
- return ret;
-
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -749,6 +745,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
/* Right function depends upon target state. */
if (crtc->state->enable && funcs->prepare)
funcs->prepare(crtc);
+ else if (funcs->atomic_disable)
+ funcs->atomic_disable(crtc, old_crtc_state);
else if (funcs->disable)
funcs->disable(crtc);
else
@@ -886,8 +884,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call mode_set hooks twice.
*/
- if (funcs && funcs->mode_set)
+ if (funcs && funcs->atomic_mode_set) {
+ funcs->atomic_mode_set(encoder, new_crtc_state,
+ connector->state);
+ } else if (funcs && funcs->mode_set) {
funcs->mode_set(encoder, mode, adjusted_mode);
+ }
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
@@ -1003,29 +1005,46 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
* @dev: DRM device
* @state: atomic state object with old state structures
+ * @pre_swap: if true, do an interruptible wait
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
* drm_atomic_helper_swap_state() so it uses the current plane state (and
* just uses the atomic state to find the changed planes)
+ *
+ * Returns zero if success or < 0 if fence_wait() fails.
*/
-void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
- struct drm_atomic_state *state)
+int drm_atomic_helper_wait_for_fences(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool pre_swap)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
- int i;
+ int i, ret;
for_each_plane_in_state(state, plane, plane_state, i) {
- if (!plane->state->fence)
+ if (!pre_swap)
+ plane_state = plane->state;
+
+ if (!plane_state->fence)
continue;
- WARN_ON(!plane->state->fb);
+ WARN_ON(!plane_state->fb);
- fence_wait(plane->state->fence, false);
- fence_put(plane->state->fence);
- plane->state->fence = NULL;
+ /*
+ * If waiting for fences pre-swap (ie: nonblock), userspace can
+ * still interrupt the operation. Instead of blocking until the
+ * timer expires, make the wait interruptible.
+ */
+ ret = fence_wait(plane_state->fence, pre_swap);
+ if (ret)
+ return ret;
+
+ fence_put(plane_state->fence);
+ plane_state->fence = NULL;
}
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
@@ -1142,7 +1161,8 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* drm_atomic_helper_commit_modeset_enables(dev, state);
*
- * drm_atomic_helper_commit_planes(dev, state, true);
+ * drm_atomic_helper_commit_planes(dev, state,
+ * DRM_PLANE_COMMIT_ACTIVE_ONLY);
*
* for committing the atomic update to hardware. See the kerneldoc entries for
* these three functions for more details.
@@ -1153,7 +1173,7 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, false);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -1172,7 +1192,7 @@ static void commit_tail(struct drm_atomic_state *state)
funcs = dev->mode_config.helper_private;
- drm_atomic_helper_wait_for_fences(dev, state);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
@@ -1231,6 +1251,12 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (ret)
return ret;
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret)
+ return ret;
+ }
+
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
@@ -1631,6 +1657,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
+ if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+ continue;
+
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state);
if (ret)
@@ -1647,18 +1676,20 @@ fail:
if (j >= i)
continue;
+ if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
+ continue;
+
funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
-
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
-bool plane_crtc_active(struct drm_plane_state *state)
+static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
}
@@ -1667,7 +1698,7 @@ bool plane_crtc_active(struct drm_plane_state *state)
* drm_atomic_helper_commit_planes - commit plane state
* @dev: DRM device
* @old_state: atomic state object with old state structures
- * @active_only: Only commit on active CRTC if set
+ * @flags: flags for committing plane state
*
* This function commits the new plane state using the plane and atomic helper
* functions for planes and crtcs. It assumes that the atomic state has already
@@ -1687,25 +1718,34 @@ bool plane_crtc_active(struct drm_plane_state *state)
* most drivers don't need to be immediately notified of plane updates for a
* disabled CRTC.
*
- * Unless otherwise needed, drivers are advised to set the @active_only
- * parameters to true in order not to receive plane update notifications related
- * to a disabled CRTC. This avoids the need to manually ignore plane updates in
+ * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in
+ * @flags in order not to receive plane update notifications related to a
+ * disabled CRTC. This avoids the need to manually ignore plane updates in
* driver code when the driver and/or hardware can't or just don't need to deal
* with updates on disabled CRTCs, for example when supporting runtime PM.
*
- * The drm_atomic_helper_commit() default implementation only sets @active_only
- * to false to most closely match the behaviour of the legacy helpers. This should
- * not be copied blindly by drivers.
+ * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant
+ * display controllers require to disable a CRTC's planes when the CRTC is
+ * disabled. This function would skip the ->atomic_disable call for a plane if
+ * the CRTC of the old plane state needs a modesetting operation. Of course,
+ * the drivers need to disable the planes in their CRTC disable callbacks
+ * since no one else would do that.
+ *
+ * The drm_atomic_helper_commit() default implementation doesn't set the
+ * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers.
+ * This should not be copied blindly by drivers.
*/
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *old_state,
- bool active_only)
+ uint32_t flags)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state;
int i;
+ bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY;
+ bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET;
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -1749,10 +1789,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
/*
* Special-case disabling the plane if drivers support it.
*/
- if (disabling && funcs->atomic_disable)
+ if (disabling && funcs->atomic_disable) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = old_plane_state->crtc->state;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+ no_disable)
+ continue;
+
funcs->atomic_disable(plane, old_plane_state);
- else if (plane->state->crtc || disabling)
+ } else if (plane->state->crtc || disabling) {
funcs->atomic_update(plane, old_plane_state);
+ }
}
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -1831,12 +1880,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
/**
* drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes
- * @crtc: CRTC
+ * @old_crtc_state: atomic state object with the old CRTC state
* @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks
*
* Disables all planes associated with the given CRTC. This can be
- * used for instance in the CRTC helper disable callback to disable
- * all planes before shutting down the display pipeline.
+ * used for instance in the CRTC helper atomic_disable callback to disable
+ * all planes.
*
* If the atomic-parameter is set the function calls the CRTC's
* atomic_begin hook before and atomic_flush hook after disabling the
@@ -1845,9 +1894,11 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
* It is a bug to call this function without having implemented the
* ->atomic_disable() plane hook.
*/
-void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
- bool atomic)
+void
+drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state,
+ bool atomic)
{
+ struct drm_crtc *crtc = old_crtc_state->crtc;
const struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
struct drm_plane *plane;
@@ -1855,11 +1906,11 @@ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
if (atomic && crtc_funcs && crtc_funcs->atomic_begin)
crtc_funcs->atomic_begin(crtc, NULL);
- drm_for_each_plane(plane, crtc->dev) {
+ drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
const struct drm_plane_helper_funcs *plane_funcs =
plane->helper_private;
- if (plane->state->crtc != crtc || !plane_funcs)
+ if (!plane_funcs)
continue;
WARN_ON(!plane_funcs->atomic_disable);
@@ -1894,6 +1945,9 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
+ if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
+ continue;
+
funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -2354,7 +2408,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
- if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
+ if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@@ -3039,7 +3093,7 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
if (plane->state) {
plane->state->plane = plane;
- plane->state->rotation = BIT(DRM_ROTATE_0);
+ plane->state->rotation = DRM_ROTATE_0;
}
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 4153e8a193af..6b143514a566 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -251,7 +251,7 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master(file_priv))
goto out;
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
/*
* Since the master is disappearing, so is the
* possibility to lock.
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index f3c0942bd756..85172a977bf3 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -25,12 +25,173 @@
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_blend.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sort.h>
-#include "drm_internal.h"
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The basic plane composition model supported by standard plane properties only
+ * has a source rectangle (in logical pixels within the &drm_framebuffer), with
+ * sub-pixel accuracy, which is scaled up to a pixel-aligned destination
+ * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is
+ * defined by the horizontal and vertical visible pixels (stored in @hdisplay
+ * and @vdisplay) of the requested mode (stored in @mode in the
+ * &drm_crtc_state). These two rectangles are both stored in the
+ * &drm_plane_state.
+ *
+ * For the atomic ioctl the following standard (atomic) properties on the plane object
+ * encode the basic plane composition model:
+ *
+ * SRC_X:
+ * X coordinate offset for the source rectangle within the
+ * &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_Y:
+ * Y coordinate offset for the source rectangle within the
+ * &drm_framebuffer, in 16.16 fixed point. Must be positive.
+ * SRC_W:
+ * Width for the source rectangle within the &drm_framebuffer, in 16.16
+ * fixed point. SRC_X plus SRC_W must be within the width of the source
+ * framebuffer. Must be positive.
+ * SRC_H:
+ * Height for the source rectangle within the &drm_framebuffer, in 16.16
+ * fixed point. SRC_Y plus SRC_H must be within the height of the source
+ * framebuffer. Must be positive.
+ * CRTC_X:
+ * X coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_Y:
+ * Y coordinate offset for the destination rectangle. Can be negative.
+ * CRTC_W:
+ * Width for the destination rectangle. CRTC_X plus CRTC_W can extend past
+ * the currently visible horizontal area of the &drm_crtc.
+ * CRTC_H:
+ * Height for the destination rectangle. CRTC_Y plus CRTC_H can extend past
+ * the currently visible vertical area of the &drm_crtc.
+ * FB_ID:
+ * Mode object ID of the &drm_framebuffer this plane should scan out.
+ * CRTC_ID:
+ * Mode object ID of the &drm_crtc this plane should be connected to.
+ *
+ * Note that the source rectangle must fully lie within the bounds of the
+ * &drm_framebuffer. The destination rectangle can lie outside of the visible
+ * area of the current mode of the CRTC. It must be apprpriately clipped by the
+ * driver, which can be done by calling drm_plane_helper_check_update(). Drivers
+ * are also allowed to round the subpixel sampling positions appropriately, but
+ * only to the next full pixel. No pixel outside of the source rectangle may
+ * ever be sampled, which is important when applying more sophisticated
+ * filtering than just a bilinear one when scaling. The filtering mode when
+ * scaling is unspecified.
+ *
+ * On top of this basic transformation additional properties can be exposed by
+ * the driver:
+ *
+ * - Rotation is set up with drm_mode_create_rotation_property(). It adds a
+ * rotation and reflection step between the source and destination rectangles.
+ * Without this property the rectangle is only scaled, but not rotated or
+ * reflected.
+ *
+ * - Z position is set up with drm_plane_create_zpos_immutable_property() and
+ * drm_plane_create_zpos_property(). It controls the visibility of overlapping
+ * planes. Without this property the primary plane is always below the cursor
+ * plane, and ordering between all other planes is undefined.
+ *
+ * Note that all the property extensions described here apply either to the
+ * plane or the CRTC (e.g. for the background color, which currently is not
+ * exposed and assumed to be black).
+ */
+
+/**
+ * drm_mode_create_rotation_property - create a new rotation property
+ * @dev: DRM device
+ * @supported_rotations: bitmask of supported rotations and reflections
+ *
+ * This creates a new property with the selected support for transformations.
+ * The resulting property should be stored in @rotation_property in
+ * &drm_mode_config. It then must be attached to each plane which supports
+ * rotations using drm_object_attach_property().
+ *
+ * FIXME: Probably better if the rotation property is created on each plane,
+ * like the zpos property. Otherwise it's not possible to allow different
+ * rotation modes on different planes.
+ *
+ * Since a rotation by 180° degress is the same as reflecting both along the x
+ * and the y axis the rotation property is somewhat redundant. Drivers can use
+ * drm_rotation_simplify() to normalize values of this property.
+ *
+ * The property exposed to userspace is a bitmask property (see
+ * drm_property_create_bitmask()) called "rotation" and has the following
+ * bitmask enumaration values:
+ *
+ * DRM_ROTATE_0:
+ * "rotate-0"
+ * DRM_ROTATE_90:
+ * "rotate-90"
+ * DRM_ROTATE_180:
+ * "rotate-180"
+ * DRM_ROTATE_270:
+ * "rotate-270"
+ * DRM_REFLECT_X:
+ * "reflect-x"
+ * DRM_REFELCT_Y:
+ * "reflect-y"
+ *
+ * Rotation is the specified amount in degrees in counter clockwise direction,
+ * the X and Y axis are within the source rectangle, i.e. the X/Y axis before
+ * rotation. After reflection, the rotation is applied to the image sampled from
+ * the source rectangle, before scaling it to fit the destination rectangle.
+ */
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations)
+{
+ static const struct drm_prop_enum_list props[] = {
+ { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
+ { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" },
+ { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
+ { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
+ { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
+ { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
+ };
+
+ return drm_property_create_bitmask(dev, 0, "rotation",
+ props, ARRAY_SIZE(props),
+ supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
+ * DRM_ROTATE_90 | DRM_ROTATE_180 |
+ * DRM_ROTATE_270 | DRM_REFLECT_Y);
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations)
+{
+ if (rotation & ~supported_rotations) {
+ rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
+ rotation = (rotation & DRM_REFLECT_MASK) |
+ BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
+ }
+
+ return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
/**
* drm_plane_create_zpos_property - create mutable zpos property
@@ -49,10 +210,14 @@
* If zpos of some planes cannot be changed (like fixed background or
* cursor/topmost planes), driver should adjust min/max values and assign those
* planes immutable zpos property with lower or higher values (for more
- * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * information, see drm_plane_create_zpos_immutable_property() function). In such
* case driver should also assign proper initial zpos values for all planes in
* its plane_reset() callback, so the planes will be always sorted properly.
*
+ * See also drm_atomic_normalize_zpos().
+ *
+ * The property exposed to userspace is called "zpos".
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
@@ -88,7 +253,9 @@ EXPORT_SYMBOL(drm_plane_create_zpos_property);
* support for it in drm core. Using this property driver lets userspace
* to get the arrangement of the planes for blending operation and notifies
* it that the hardware (or driver) doesn't support changing of the planes'
- * order.
+ * order. For mutable zpos see drm_plane_create_zpos_property().
+ *
+ * The property exposed to userspace is called "zpos".
*
* Returns:
* Zero on success, negative errno on failure.
@@ -127,20 +294,6 @@ static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
return sa->plane->base.id - sb->plane->base.id;
}
-/**
- * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
- * @crtc: crtc with planes, which have to be considered for normalization
- * @crtc_state: new atomic state to apply
- *
- * This function checks new states of all planes assigned to given crtc and
- * calculates normalized zpos value for them. Planes are compared first by their
- * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
- * is at the bottom. The plane_state->normalized_zpos is then filled with unique
- * values from 0 to number of active planes in crtc minus one.
- *
- * RETURNS
- * Zero for success or -errno
- */
static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -193,20 +346,25 @@ done:
}
/**
- * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
- * crtcs
+ * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs
* @dev: DRM device
* @state: atomic state of DRM device
*
* This function calculates normalized zpos value for all modified planes in
- * the provided atomic state of DRM device. For more information, see
- * drm_atomic_helper_crtc_normalize_zpos() function.
+ * the provided atomic state of DRM device.
+ *
+ * For every CRTC this function checks new states of all planes assigned to
+ * it and calculates normalized zpos value for these planes. Planes are compared
+ * first by their zpos values, then by plane id (if zpos is equal). The plane
+ * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is
+ * then filled with unique values from 0 to number of active planes in crtc
+ * minus one.
*
* RETURNS
* Zero for success or -errno
*/
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
- struct drm_atomic_state *state)
+int drm_atomic_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
@@ -236,3 +394,4 @@ int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
}
return 0;
}
+EXPORT_SYMBOL(drm_atomic_normalize_zpos);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 255543086590..0ee052b7c21a 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -23,10 +23,9 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mutex.h>
-#include <drm/drm_crtc.h>
-
-#include "drm/drmP.h"
+#include <drm/drm_bridge.h>
/**
* DOC: overview
@@ -98,11 +97,11 @@ EXPORT_SYMBOL(drm_bridge_remove);
* @dev: DRM device
* @bridge: bridge control structure
*
- * called by a kms driver to link one of our encoder/bridge to the given
+ * Called by a kms driver to link one of our encoder/bridge to the given
* bridge.
*
* Note that setting up links between the bridge and our encoder/bridge
- * objects needs to be handled by the kms driver itself
+ * objects needs to be handled by the kms driver itself.
*
* RETURNS:
* Zero on success, error code on failure
@@ -125,6 +124,31 @@ int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
EXPORT_SYMBOL(drm_bridge_attach);
/**
+ * drm_bridge_detach - deassociate given bridge from its DRM device
+ *
+ * @bridge: bridge control structure
+ *
+ * Called by a kms driver to unlink the given bridge from its DRM device.
+ *
+ * Note that tearing down links between the bridge and our encoder/bridge
+ * objects needs to be handled by the kms driver itself.
+ */
+void drm_bridge_detach(struct drm_bridge *bridge)
+{
+ if (WARN_ON(!bridge))
+ return;
+
+ if (WARN_ON(!bridge->dev))
+ return;
+
+ if (bridge->funcs->detach)
+ bridge->funcs->detach(bridge);
+
+ bridge->dev = NULL;
+}
+EXPORT_SYMBOL(drm_bridge_detach);
+
+/**
* DOC: bridge callbacks
*
* The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index c3a12cd8bd0d..adb1dd7fde5f 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -397,7 +397,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
return -EPERM;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -443,7 +443,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int i;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
idx = map->offset;
@@ -545,7 +545,7 @@ EXPORT_SYMBOL(drm_legacy_rmmap_locked);
void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -558,7 +558,7 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
{
struct drm_map_list *r_list, *list_temp;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -595,7 +595,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -755,7 +755,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
return -EINVAL;
}
- entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
@@ -905,14 +905,14 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
return -EINVAL;
}
- entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+ entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
@@ -923,8 +923,9 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
/* Keep the original pagelist until we know all the allocations
* have succeeded
*/
- temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
- sizeof(*dma->pagelist), GFP_KERNEL);
+ temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
+ sizeof(*dma->pagelist),
+ GFP_KERNEL);
if (!temp_pagelist) {
kfree(entry->buflist);
kfree(entry->seglist);
@@ -1116,8 +1117,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
return -EINVAL;
}
- entry->buflist = kzalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
+ entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
@@ -1220,7 +1220,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
struct drm_buf_desc *request = data;
int ret;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1266,7 +1266,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
int i;
int count;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1347,7 +1347,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
int order;
struct drm_buf_entry *entry;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1395,7 +1395,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
int idx;
struct drm_buf *buf;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1450,7 +1450,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
struct drm_buf_map *request = data;
int i;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1530,7 +1530,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (dev->driver->dma_ioctl)
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
new file mode 100644
index 000000000000..d28ffdd2b929
--- /dev/null
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_color_mgmt.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Color management or color space adjustments is supported through a set of 5
+ * properties on the &drm_crtc object. They are set up by calling
+ * drm_crtc_enable_color_mgmt().
+ *
+ * "DEGAMMA_LUT”:
+ * Blob property to set the degamma lookup table (LUT) mapping pixel data
+ * from the framebuffer before it is given to the transformation matrix.
+ * The data is interpreted as an array of struct &drm_color_lut elements.
+ * Hardware might choose not to use the full precision of the LUT elements
+ * nor use all the elements of the LUT (for example the hardware might
+ * choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “DEGAMMA_LUT_SIZE”:
+ * Unsinged range property to give the size of the lookup table to be set
+ * on the DEGAMMA_LUT property (the size depends on the underlying
+ * hardware). If drivers support multiple LUT sizes then they should
+ * publish the largest size, and sub-sample smaller sized LUTs (e.g. for
+ * split-gamma modes) appropriately.
+ *
+ * “CTM”:
+ * Blob property to set the current transformation matrix (CTM) apply to
+ * pixel data after the lookup through the degamma LUT and before the
+ * lookup through the gamma LUT. The data is interpreted as a struct
+ * &drm_color_ctm.
+ *
+ * “GAMMA_LUT”:
+ * Blob property to set the gamma lookup table (LUT) mapping pixel data
+ * after the transformation matrix to data sent to the connector. The
+ * data is interpreted as an array of struct &drm_color_lut elements.
+ * Hardware might choose not to use the full precision of the LUT elements
+ * nor use all the elements of the LUT (for example the hardware might
+ * choose to interpolate between LUT[0] and LUT[4]).
+ *
+ * “GAMMA_LUT_SIZE”:
+ * Unsigned range property to give the size of the lookup table to be set
+ * on the GAMMA_LUT property (the size depends on the underlying hardware).
+ * If drivers support multiple LUT sizes then they should publish the
+ * largest size, and sub-sample smaller sized LUTs (e.g. for split-gamma
+ * modes) appropriately.
+ *
+ * There is also support for a legacy gamma table, which is set up by calling
+ * drm_mode_crtc_set_gamma_size(). Drivers which support both should use
+ * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the
+ * "GAMMA_LUT" property above.
+ */
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ uint degamma_lut_size,
+ bool has_ctm,
+ uint gamma_lut_size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (degamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_size_property,
+ degamma_lut_size);
+ }
+
+ if (has_ctm)
+ drm_object_attach_property(&crtc->base,
+ config->ctm_property, 0);
+
+ if (gamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_size_property,
+ gamma_lut_size);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
+
+/**
+ * drm_mode_crtc_set_gamma_size - set the gamma table size
+ * @crtc: CRTC to set the gamma table size for
+ * @gamma_size: size of the gamma table
+ *
+ * Drivers which support gamma tables should set this to the supported gamma
+ * table size when initializing the CRTC. Currently the drm core only supports a
+ * fixed gamma table size.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size)
+{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
+ crtc->gamma_size = gamma_size;
+
+ crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
+ GFP_KERNEL);
+ if (!crtc->gamma_store) {
+ crtc->gamma_size = 0;
+ return -ENOMEM;
+ }
+
+ r_base = crtc->gamma_store;
+ g_base = r_base + gamma_size;
+ b_base = g_base + gamma_size;
+ for (i = 0; i < gamma_size; i++) {
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+ }
+
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+/**
+ * drm_mode_gamma_set_ioctl - set the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
+ * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+
+}
+
+/**
+ * drm_mode_gamma_get_ioctl - get the gamma table
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Copy the current gamma table into the storage provided. This also provides
+ * the gamma table size the driver expects, which can be used to size the
+ * allocated storage.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_lut *crtc_lut = data;
+ struct drm_crtc *crtc;
+ void *r_base, *g_base, *b_base;
+ int size;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ if (!crtc) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* memcpy into gamma store */
+ if (crtc_lut->gamma_size != crtc->gamma_size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = crtc_lut->gamma_size * (sizeof(uint16_t));
+ r_base = crtc->gamma_store;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ g_base = r_base + size;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ b_base = g_base + size;
+ if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
new file mode 100644
index 000000000000..2db7fb510b6c
--- /dev/null
+++ b/drivers/gpu/drm/drm_connector.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * In DRM connectors are the general abstraction for display sinks, and include
+ * als fixed panels or anything else that can display pixels in some form. As
+ * opposed to all other KMS objects representing hardware (like CRTC, encoder or
+ * plane abstractions) connectors can be hotplugged and unplugged at runtime.
+ * Hence they are reference-counted using drm_connector_reference() and
+ * drm_connector_unreference().
+ *
+ * KMS driver must create, initialize, register and attach at a struct
+ * &drm_connector for each such sink. The instance is created as other KMS
+ * objects and initialized by setting the following fields.
+ *
+ * The connector is then registered with a call to drm_connector_init() with a
+ * pointer to the connector functions and a connector type, and exposed through
+ * sysfs with a call to drm_connector_register().
+ *
+ * Connectors must be attached to an encoder to be used. For devices that map
+ * connectors to encoders 1:1, the connector should be attached at
+ * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * driver must also set the struct &drm_connector encoder field to point to the
+ * attached encoder.
+ *
+ * For connectors which are not fixed (like built-in panels) the driver needs to
+ * support hotplug notifications. The simplest way to do that is by using the
+ * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have
+ * hardware support for hotplug interrupts. Connectors with hardware hotplug
+ * support can instead use e.g. drm_helper_hpd_irq_event().
+ */
+
+struct drm_conn_prop_enum_list {
+ int type;
+ const char *name;
+ struct ida ida;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { DRM_MODE_CONNECTOR_VGA, "VGA" },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+ { DRM_MODE_CONNECTOR_Composite, "Composite" },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+ { DRM_MODE_CONNECTOR_Component, "Component" },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+ { DRM_MODE_CONNECTOR_TV, "TV" },
+ { DRM_MODE_CONNECTOR_eDP, "eDP" },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+ { DRM_MODE_CONNECTOR_DSI, "DSI" },
+ { DRM_MODE_CONNECTOR_DPI, "DPI" },
+};
+
+void drm_connector_ida_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
+/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ *
+ * The kernel supports per-connector configuration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+ struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+ char *option = NULL;
+
+ if (fb_get_options(connector->name, &option))
+ return;
+
+ if (!drm_mode_parse_command_line_for_connector(option,
+ connector,
+ mode))
+ return;
+
+ if (mode->force) {
+ const char *s;
+
+ switch (mode->force) {
+ case DRM_FORCE_OFF:
+ s = "OFF";
+ break;
+ case DRM_FORCE_ON_DIGITAL:
+ s = "ON - dig";
+ break;
+ default:
+ case DRM_FORCE_ON:
+ s = "ON";
+ break;
+ }
+
+ DRM_INFO("forcing %s connector %s\n", connector->name, s);
+ connector->force = mode->force;
+ }
+
+ DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+ connector->name,
+ mode->xres, mode->yres,
+ mode->refresh_specified ? mode->refresh : 60,
+ mode->rb ? " reduced blanking" : "",
+ mode->margins ? " with margins" : "",
+ mode->interlace ? " interlaced" : "");
+}
+
+static void drm_connector_free(struct kref *kref)
+{
+ struct drm_connector *connector =
+ container_of(kref, struct drm_connector, base.refcount);
+ struct drm_device *dev = connector->dev;
+
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+ struct ida *connector_ida =
+ &drm_connector_enum_list[connector_type].ida;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get_reg(dev, &connector->base,
+ DRM_MODE_OBJECT_CONNECTOR,
+ false, drm_connector_free);
+ if (ret)
+ goto out_unlock;
+
+ connector->base.properties = &connector->properties;
+ connector->dev = dev;
+ connector->funcs = funcs;
+
+ ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_put;
+ connector->index = ret;
+ ret = 0;
+
+ connector->connector_type = connector_type;
+ connector->connector_type_id =
+ ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ if (connector->connector_type_id < 0) {
+ ret = connector->connector_type_id;
+ goto out_put_id;
+ }
+ connector->name =
+ kasprintf(GFP_KERNEL, "%s-%d",
+ drm_connector_enum_list[connector_type].name,
+ connector->connector_type_id);
+ if (!connector->name) {
+ ret = -ENOMEM;
+ goto out_put_type_id;
+ }
+
+ INIT_LIST_HEAD(&connector->probed_modes);
+ INIT_LIST_HEAD(&connector->modes);
+ connector->edid_blob_ptr = NULL;
+ connector->status = connector_status_unknown;
+
+ drm_connector_get_cmdline_mode(connector);
+
+ /* We should add connectors at the end to avoid upsetting the connector
+ * index too much. */
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
+
+ if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ drm_object_attach_property(&connector->base,
+ config->edid_property,
+ 0);
+
+ drm_object_attach_property(&connector->base,
+ config->dpms_property, 0);
+
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
+ }
+
+ connector->debugfs_entry = NULL;
+out_put_type_id:
+ if (ret)
+ ida_simple_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+ if (ret)
+ ida_simple_remove(&config->connector_ida, connector->index);
+out_put:
+ if (ret)
+ drm_mode_object_unregister(dev, &connector->base);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * @connector: connector to attach
+ * @encoder: encoder to attach @connector to
+ *
+ * This function links up a connector to an encoder. Note that the routing
+ * restrictions between encoders and crtcs are exposed to userspace through the
+ * possible_clones and possible_crtcs bitmasks.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder)
+{
+ int i;
+
+ /*
+ * In the past, drivers have attempted to model the static association
+ * of connector to encoder in simple connector/encoder devices using a
+ * direct assignment of connector->encoder = encoder. This connection
+ * is a logical one and the responsibility of the core, so drivers are
+ * expected not to mess with this.
+ *
+ * Note that the error return should've been enough here, but a large
+ * majority of drivers ignores the return value, so add in a big WARN
+ * to get people's attention.
+ */
+ if (WARN_ON(connector->encoder))
+ return -EINVAL;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] == 0) {
+ connector->encoder_ids[i] = encoder->base.id;
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+static void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ list_del(&mode->head);
+ drm_mode_destroy(connector->dev, mode);
+}
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode, *t;
+
+ /* The connector should have been removed from userspace long before
+ * it is finally destroyed.
+ */
+ if (WARN_ON(connector->registered))
+ drm_connector_unregister(connector);
+
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+ list_for_each_entry_safe(mode, t, &connector->modes, head)
+ drm_mode_remove(connector, mode);
+
+ ida_simple_remove(&drm_connector_enum_list[connector->connector_type].ida,
+ connector->connector_type_id);
+
+ ida_simple_remove(&dev->mode_config.connector_ida,
+ connector->index);
+
+ kfree(connector->display_info.bus_formats);
+ drm_mode_object_unregister(dev, &connector->base);
+ kfree(connector->name);
+ connector->name = NULL;
+ list_del(&connector->head);
+ dev->mode_config.num_connector--;
+
+ WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
+ if (connector->state && connector->funcs->atomic_destroy_state)
+ connector->funcs->atomic_destroy_state(connector,
+ connector->state);
+
+ memset(connector, 0, sizeof(*connector));
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ if (connector->registered)
+ return 0;
+
+ ret = drm_sysfs_connector_add(connector);
+ if (ret)
+ return ret;
+
+ ret = drm_debugfs_connector_add(connector);
+ if (ret) {
+ goto err_sysfs;
+ }
+
+ if (connector->funcs->late_register) {
+ ret = connector->funcs->late_register(connector);
+ if (ret)
+ goto err_debugfs;
+ }
+
+ drm_mode_object_register(connector->dev, &connector->base);
+
+ connector->registered = true;
+ return 0;
+
+err_debugfs:
+ drm_debugfs_connector_remove(connector);
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+ if (!connector->registered)
+ return;
+
+ if (connector->funcs->early_unregister)
+ connector->funcs->early_unregister(connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_debugfs_connector_remove(connector);
+
+ connector->registered = false;
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+void drm_connector_unregister_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_connector_unregister(connector);
+}
+
+int drm_connector_register_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ /* FIXME: taking the mode config mutex ends up in a clash with
+ * fbcon/backlight registration */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_unregister_all(dev);
+ return ret;
+}
+
+/**
+ * drm_get_connector_status_name - return a string for connector status
+ * @status: connector status to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+ if (status == connector_status_connected)
+ return "connected";
+ else if (status == connector_status_disconnected)
+ return "disconnected";
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(drm_get_connector_status_name);
+
+static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
+ { SubPixelUnknown, "Unknown" },
+ { SubPixelHorizontalRGB, "Horizontal RGB" },
+ { SubPixelHorizontalBGR, "Horizontal BGR" },
+ { SubPixelVerticalRGB, "Vertical RGB" },
+ { SubPixelVerticalBGR, "Vertical BGR" },
+ { SubPixelNone, "None" },
+};
+
+/**
+ * drm_get_subpixel_order_name - return a string for a given subpixel enum
+ * @order: enum of subpixel_order
+ *
+ * Note you could abuse this and return something out of bounds, but that
+ * would be a caller error. No unscrubbed user data should make it here.
+ */
+const char *drm_get_subpixel_order_name(enum subpixel_order order)
+{
+ return drm_subpixel_enum_list[order].name;
+}
+EXPORT_SYMBOL(drm_get_subpixel_order_name);
+
+static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
+ { DRM_MODE_DPMS_ON, "On" },
+ { DRM_MODE_DPMS_STANDBY, "Standby" },
+ { DRM_MODE_DPMS_SUSPEND, "Suspend" },
+ { DRM_MODE_DPMS_OFF, "Off" }
+};
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/**
+ * drm_display_info_set_bus_formats - set the supported bus formats
+ * @info: display info to store bus formats in
+ * @formats: array containing the supported bus formats
+ * @num_formats: the number of entries in the fmts array
+ *
+ * Store the supported bus formats in display info structure.
+ * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
+ * a full list of available formats.
+ */
+int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats)
+{
+ u32 *fmts = NULL;
+
+ if (!formats && num_formats)
+ return -EINVAL;
+
+ if (formats && num_formats) {
+ fmts = kmemdup(formats, sizeof(*formats) * num_formats,
+ GFP_KERNEL);
+ if (!fmts)
+ return -ENOMEM;
+ }
+
+ kfree(info->bus_formats);
+ info->bus_formats = fmts;
+ info->num_bus_formats = num_formats;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_display_info_set_bus_formats);
+
+/* Optional connector properties. */
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
+ { DRM_MODE_SCALE_NONE, "None" },
+ { DRM_MODE_SCALE_FULLSCREEN, "Full" },
+ { DRM_MODE_SCALE_CENTER, "Center" },
+ { DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+ { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+ { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+ { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
+ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
+};
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+ drm_dvi_i_subconnector_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
+ { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
+};
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ drm_tv_subconnector_enum_list)
+
+int drm_connector_create_standard_properties(struct drm_device *dev)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "EDID", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.edid_property = prop;
+
+ prop = drm_property_create_enum(dev, 0,
+ "DPMS", drm_dpms_enum_list,
+ ARRAY_SIZE(drm_dpms_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.dpms_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.path_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.tile_property = prop;
+
+ return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+ struct drm_property *dvi_i_selector;
+ struct drm_property *dvi_i_subconnector;
+
+ if (dev->mode_config.dvi_i_select_subconnector_property)
+ return 0;
+
+ dvi_i_selector =
+ drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_dvi_i_select_enum_list,
+ ARRAY_SIZE(drm_dvi_i_select_enum_list));
+ dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+ dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dvi_i_subconnector_enum_list,
+ ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+ dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device. Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[])
+{
+ struct drm_property *tv_selector;
+ struct drm_property *tv_subconnector;
+ unsigned int i;
+
+ if (dev->mode_config.tv_select_subconnector_property)
+ return 0;
+
+ /*
+ * Basic connector properties
+ */
+ tv_selector = drm_property_create_enum(dev, 0,
+ "select subconnector",
+ drm_tv_select_enum_list,
+ ARRAY_SIZE(drm_tv_select_enum_list));
+ if (!tv_selector)
+ goto nomem;
+
+ dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+ tv_subconnector =
+ drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_tv_subconnector_enum_list,
+ ARRAY_SIZE(drm_tv_subconnector_enum_list));
+ if (!tv_subconnector)
+ goto nomem;
+ dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+ /*
+ * Other, TV specific properties: margins & TV modes.
+ */
+ dev->mode_config.tv_left_margin_property =
+ drm_property_create_range(dev, 0, "left margin", 0, 100);
+ if (!dev->mode_config.tv_left_margin_property)
+ goto nomem;
+
+ dev->mode_config.tv_right_margin_property =
+ drm_property_create_range(dev, 0, "right margin", 0, 100);
+ if (!dev->mode_config.tv_right_margin_property)
+ goto nomem;
+
+ dev->mode_config.tv_top_margin_property =
+ drm_property_create_range(dev, 0, "top margin", 0, 100);
+ if (!dev->mode_config.tv_top_margin_property)
+ goto nomem;
+
+ dev->mode_config.tv_bottom_margin_property =
+ drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+ if (!dev->mode_config.tv_bottom_margin_property)
+ goto nomem;
+
+ dev->mode_config.tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ if (!dev->mode_config.tv_mode_property)
+ goto nomem;
+
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+ i, modes[i]);
+
+ dev->mode_config.tv_brightness_property =
+ drm_property_create_range(dev, 0, "brightness", 0, 100);
+ if (!dev->mode_config.tv_brightness_property)
+ goto nomem;
+
+ dev->mode_config.tv_contrast_property =
+ drm_property_create_range(dev, 0, "contrast", 0, 100);
+ if (!dev->mode_config.tv_contrast_property)
+ goto nomem;
+
+ dev->mode_config.tv_flicker_reduction_property =
+ drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+ if (!dev->mode_config.tv_flicker_reduction_property)
+ goto nomem;
+
+ dev->mode_config.tv_overscan_property =
+ drm_property_create_range(dev, 0, "overscan", 0, 100);
+ if (!dev->mode_config.tv_overscan_property)
+ goto nomem;
+
+ dev->mode_config.tv_saturation_property =
+ drm_property_create_range(dev, 0, "saturation", 0, 100);
+ if (!dev->mode_config.tv_saturation_property)
+ goto nomem;
+
+ dev->mode_config.tv_hue_property =
+ drm_property_create_range(dev, 0, "hue", 0, 100);
+ if (!dev->mode_config.tv_hue_property)
+ goto nomem;
+
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+ struct drm_property *scaling_mode;
+
+ if (dev->mode_config.scaling_mode_property)
+ return 0;
+
+ scaling_mode =
+ drm_property_create_enum(dev, 0, "scaling mode",
+ drm_scaling_mode_enum_list,
+ ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+ dev->mode_config.scaling_mode_property = scaling_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+ if (dev->mode_config.aspect_ratio_property)
+ return 0;
+
+ dev->mode_config.aspect_ratio_property =
+ drm_property_create_enum(dev, 0, "aspect ratio",
+ drm_aspect_ratio_enum_list,
+ ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+ if (dev->mode_config.aspect_ratio_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
+ * drm_mode_create_suggested_offset_properties - create suggests offset properties
+ * @dev: DRM device
+ *
+ * Create the the suggested x/y offset property for connectors.
+ */
+int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
+{
+ if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
+ return 0;
+
+ dev->mode_config.suggested_x_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
+
+ dev->mode_config.suggested_y_property =
+ drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
+
+ if (dev->mode_config.suggested_x_property == NULL ||
+ dev->mode_config.suggested_y_property == NULL)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
+
+/**
+ * drm_mode_connector_set_path_property - set tile property on connector
+ * @connector: connector to set property on.
+ * @path: path to use for property; must not be NULL.
+ *
+ * This creates a property to expose to userspace to specify a
+ * connector path. This is mainly used for DisplayPort MST where
+ * connectors have a topology and we want to allow userspace to give
+ * them more meaningful names.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ const char *path)
+{
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ ret = drm_property_replace_global_blob(dev,
+ &connector->path_blob_ptr,
+ strlen(path) + 1,
+ path,
+ &connector->base,
+ dev->mode_config.path_property);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
+/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ char tile[256];
+ int ret;
+
+ if (!connector->has_tile) {
+ ret = drm_property_replace_global_blob(dev,
+ &connector->tile_blob_ptr,
+ 0,
+ NULL,
+ &connector->base,
+ dev->mode_config.tile_property);
+ return ret;
+ }
+
+ snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+ connector->tile_group->id, connector->tile_is_single_monitor,
+ connector->num_h_tile, connector->num_v_tile,
+ connector->tile_h_loc, connector->tile_v_loc,
+ connector->tile_h_size, connector->tile_v_size);
+
+ ret = drm_property_replace_global_blob(dev,
+ &connector->tile_blob_ptr,
+ strlen(tile) + 1,
+ tile,
+ &connector->base,
+ dev->mode_config.tile_property);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
+ * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * @connector: drm connector
+ * @edid: new value of the edid property
+ *
+ * This function creates a new blob modeset object and assigns its id to the
+ * connector's edid property.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ size_t size = 0;
+ int ret;
+
+ /* ignore requests to set edid when overridden */
+ if (connector->override_edid)
+ return 0;
+
+ if (edid)
+ size = EDID_LENGTH * (1 + edid->extensions);
+
+ ret = drm_property_replace_global_blob(dev,
+ &connector->edid_blob_ptr,
+ size,
+ edid,
+ &connector->base,
+ dev->mode_config.edid_property);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ ret = (*connector->funcs->dpms)(connector, (int)value);
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_object_property_set_value(&connector->base, property, value);
+ return ret;
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
+{
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ if (connector->state)
+ return connector->state->best_encoder;
+ return connector->encoder;
+}
+
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+ const struct drm_file *file_priv)
+{
+ /*
+ * If user-space hasn't configured the driver to expose the stereo 3D
+ * modes, don't expose them.
+ */
+ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+ return false;
+
+ return true;
+}
+
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_connector *out_resp = data;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *mode;
+ int mode_count = 0;
+ int encoders_count = 0;
+ int ret = 0;
+ int copied = 0;
+ int i;
+ struct drm_mode_modeinfo u_mode;
+ struct drm_mode_modeinfo __user *mode_ptr;
+ uint32_t __user *encoder_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ connector = drm_connector_lookup(dev, out_resp->connector_id);
+ if (!connector) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ if (connector->encoder_ids[i] != 0)
+ encoders_count++;
+
+ if (out_resp->count_modes == 0) {
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ }
+
+ /* delayed so we get modes regardless of pre-fill_modes state */
+ list_for_each_entry(mode, &connector->modes, head)
+ if (drm_mode_expose_to_userspace(mode, file_priv))
+ mode_count++;
+
+ out_resp->connector_id = connector->base.id;
+ out_resp->connector_type = connector->connector_type;
+ out_resp->connector_type_id = connector->connector_type_id;
+ out_resp->mm_width = connector->display_info.width_mm;
+ out_resp->mm_height = connector->display_info.height_mm;
+ out_resp->subpixel = connector->display_info.subpixel_order;
+ out_resp->connection = connector->status;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if ((out_resp->count_modes >= mode_count) && mode_count) {
+ copied = 0;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (!drm_mode_expose_to_userspace(mode, file_priv))
+ continue;
+
+ drm_mode_convert_to_umode(&u_mode, mode);
+ if (copy_to_user(mode_ptr + copied,
+ &u_mode, sizeof(u_mode))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_modes = mode_count;
+
+ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ if (ret)
+ goto out;
+
+ if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ copied = 0;
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ if (connector->encoder_ids[i] != 0) {
+ if (put_user(connector->encoder_ids[i],
+ encoder_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ }
+ out_resp->count_encoders = encoders_count;
+
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_connector_unreference(connector);
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 192a5f9eeb74..3c4000facb36 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -54,7 +54,7 @@ struct drm_ctx_list {
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -92,7 +92,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
idr_init(&dev->ctx_idr);
@@ -109,7 +109,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -131,7 +131,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
struct drm_ctx_list *pos, *tmp;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->ctxlist_mutex);
@@ -177,7 +177,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_map_list *_entry;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -225,7 +225,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_map_list *r_list = NULL;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -329,7 +329,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
int i;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (res->count >= DRM_RESERVED_CONTEXTS) {
@@ -363,7 +363,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
ctx->handle = drm_legacy_ctxbitmap_next(dev);
@@ -410,7 +410,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
/* This is 0, because we don't handle any context flags */
@@ -436,7 +436,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle);
@@ -460,7 +460,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle);
@@ -486,7 +486,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- drm_core_check_feature(dev, DRIVER_MODESET))
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
DRM_DEBUG("%d\n", ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ddebe54cd5ca..2d7bedf28647 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -40,39 +40,14 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *r,
- struct drm_file *file_priv);
-
-/* Avoid boilerplate. I'm tired of typing. */
-#define DRM_ENUM_NAME_FN(fnname, list) \
- const char *fnname(int val) \
- { \
- int i; \
- for (i = 0; i < ARRAY_SIZE(list); i++) { \
- if (list[i].type == val) \
- return list[i].name; \
- } \
- return "(unknown)"; \
- }
-
/*
* Global properties
*/
-static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
- { DRM_MODE_DPMS_ON, "On" },
- { DRM_MODE_DPMS_STANDBY, "Standby" },
- { DRM_MODE_DPMS_SUSPEND, "Suspend" },
- { DRM_MODE_DPMS_OFF, "Off" }
-};
-
-DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
-
static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
{ DRM_PLANE_TYPE_OVERLAY, "Overlay" },
{ DRM_PLANE_TYPE_PRIMARY, "Primary" },
@@ -82,320 +57,6 @@ static const struct drm_prop_enum_list drm_plane_type_enum_list[] = {
/*
* Optional properties
*/
-static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = {
- { DRM_MODE_SCALE_NONE, "None" },
- { DRM_MODE_SCALE_FULLSCREEN, "Full" },
- { DRM_MODE_SCALE_CENTER, "Center" },
- { DRM_MODE_SCALE_ASPECT, "Full aspect" },
-};
-
-static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
- { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
- { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
- { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
-};
-
-/*
- * Non-global properties, but "required" for certain connectors.
- */
-static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
-
-static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
- { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
-};
-
-DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
- drm_dvi_i_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
-
-static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
- { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
- { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */
-};
-
-DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
- drm_tv_subconnector_enum_list)
-
-static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
- { DRM_MODE_DIRTY_OFF, "Off" },
- { DRM_MODE_DIRTY_ON, "On" },
- { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
-};
-
-struct drm_conn_prop_enum_list {
- int type;
- const char *name;
- struct ida ida;
-};
-
-/*
- * Connector and encoder types.
- */
-static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
- { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
- { DRM_MODE_CONNECTOR_VGA, "VGA" },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
- { DRM_MODE_CONNECTOR_Composite, "Composite" },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
- { DRM_MODE_CONNECTOR_Component, "Component" },
- { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
- { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
- { DRM_MODE_CONNECTOR_TV, "TV" },
- { DRM_MODE_CONNECTOR_eDP, "eDP" },
- { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
- { DRM_MODE_CONNECTOR_DSI, "DSI" },
- { DRM_MODE_CONNECTOR_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
- { DRM_MODE_ENCODER_NONE, "None" },
- { DRM_MODE_ENCODER_DAC, "DAC" },
- { DRM_MODE_ENCODER_TMDS, "TMDS" },
- { DRM_MODE_ENCODER_LVDS, "LVDS" },
- { DRM_MODE_ENCODER_TVDAC, "TV" },
- { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
- { DRM_MODE_ENCODER_DSI, "DSI" },
- { DRM_MODE_ENCODER_DPMST, "DP MST" },
- { DRM_MODE_ENCODER_DPI, "DPI" },
-};
-
-static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
- { SubPixelUnknown, "Unknown" },
- { SubPixelHorizontalRGB, "Horizontal RGB" },
- { SubPixelHorizontalBGR, "Horizontal BGR" },
- { SubPixelVerticalRGB, "Vertical RGB" },
- { SubPixelVerticalBGR, "Vertical BGR" },
- { SubPixelNone, "None" },
-};
-
-void drm_connector_ida_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
- ida_init(&drm_connector_enum_list[i].ida);
-}
-
-void drm_connector_ida_destroy(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
- ida_destroy(&drm_connector_enum_list[i].ida);
-}
-
-/**
- * drm_get_connector_status_name - return a string for connector status
- * @status: connector status to compute name of
- *
- * In contrast to the other drm_get_*_name functions this one here returns a
- * const pointer and hence is threadsafe.
- */
-const char *drm_get_connector_status_name(enum drm_connector_status status)
-{
- if (status == connector_status_connected)
- return "connected";
- else if (status == connector_status_disconnected)
- return "disconnected";
- else
- return "unknown";
-}
-EXPORT_SYMBOL(drm_get_connector_status_name);
-
-/**
- * drm_get_subpixel_order_name - return a string for a given subpixel enum
- * @order: enum of subpixel_order
- *
- * Note you could abuse this and return something out of bounds, but that
- * would be a caller error. No unscrubbed user data should make it here.
- */
-const char *drm_get_subpixel_order_name(enum subpixel_order order)
-{
- return drm_subpixel_enum_list[order].name;
-}
-EXPORT_SYMBOL(drm_get_subpixel_order_name);
-
-/*
- * Internal function to assign a slot in the object idr and optionally
- * register the object into the idr.
- */
-static int drm_mode_object_get_reg(struct drm_device *dev,
- struct drm_mode_object *obj,
- uint32_t obj_type,
- bool register_obj,
- void (*obj_free_cb)(struct kref *kref))
-{
- int ret;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- /*
- * Set up the object linking under the protection of the idr
- * lock so that other users can't see inconsistent state.
- */
- obj->id = ret;
- obj->type = obj_type;
- if (obj_free_cb) {
- obj->free_cb = obj_free_cb;
- kref_init(&obj->refcount);
- }
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return ret < 0 ? ret : 0;
-}
-
-/**
- * drm_mode_object_get - allocate a new modeset identifier
- * @dev: DRM device
- * @obj: object pointer, used to generate unique ID
- * @obj_type: object type
- *
- * Create a unique identifier based on @ptr in @dev's identifier space. Used
- * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
- * modeset identifiers are _not_ reference counted. Hence don't use this for
- * reference counted modeset objects like framebuffers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type)
-{
- return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
-}
-
-static void drm_mode_object_register(struct drm_device *dev,
- struct drm_mode_object *obj)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-/**
- * drm_mode_object_unregister - free a modeset identifer
- * @dev: DRM device
- * @object: object to free
- *
- * Free @id from @dev's unique identifier pool.
- * This function can be called multiple times, and guards against
- * multiple removals.
- * These modeset identifiers are _not_ reference counted. Hence don't use this
- * for reference counted modeset objects like framebuffers.
- */
-void drm_mode_object_unregister(struct drm_device *dev,
- struct drm_mode_object *object)
-{
- mutex_lock(&dev->mode_config.idr_mutex);
- if (object->id) {
- idr_remove(&dev->mode_config.crtc_idr, object->id);
- object->id = 0;
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
-}
-
-static struct drm_mode_object *_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type)
-{
- struct drm_mode_object *obj = NULL;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
- obj = NULL;
- if (obj && obj->id != id)
- obj = NULL;
-
- if (obj && obj->free_cb) {
- if (!kref_get_unless_zero(&obj->refcount))
- obj = NULL;
- }
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return obj;
-}
-
-/**
- * drm_mode_object_find - look up a drm object with static lifetime
- * @dev: drm device
- * @id: id of the mode object
- * @type: type of the mode object
- *
- * This function is used to look up a modeset object. It will acquire a
- * reference for reference counted objects. This reference must be dropped again
- * by callind drm_mode_object_unreference().
- */
-struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type)
-{
- struct drm_mode_object *obj = NULL;
-
- obj = _object_find(dev, id, type);
- return obj;
-}
-EXPORT_SYMBOL(drm_mode_object_find);
-
-/**
- * drm_mode_object_unreference - decr the object refcnt
- * @obj: mode_object
- *
- * This functions decrements the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. This is used to drop references
- * acquired with drm_mode_object_reference().
- */
-void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
- if (obj->free_cb) {
- DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
- kref_put(&obj->refcount, obj->free_cb);
- }
-}
-EXPORT_SYMBOL(drm_mode_object_unreference);
-
-/**
- * drm_mode_object_reference - incr the object refcnt
- * @obj: mode_object
- *
- * This functions increments the object's refcount if it is a refcounted modeset
- * object. It is a no-op on any other object. References should be dropped again
- * by calling drm_mode_object_unreference().
- */
-void drm_mode_object_reference(struct drm_mode_object *obj)
-{
- if (obj->free_cb) {
- DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
- kref_get(&obj->refcount);
- }
-}
-EXPORT_SYMBOL(drm_mode_object_reference);
-
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
@@ -441,199 +102,6 @@ out:
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
-static void drm_framebuffer_free(struct kref *kref)
-{
- struct drm_framebuffer *fb =
- container_of(kref, struct drm_framebuffer, base.refcount);
- struct drm_device *dev = fb->dev;
-
- /*
- * The lookup idr holds a weak reference, which has not necessarily been
- * removed at this point. Check for that.
- */
- drm_mode_object_unregister(dev, &fb->base);
-
- fb->funcs->destroy(fb);
-}
-
-/**
- * drm_framebuffer_init - initialize a framebuffer
- * @dev: DRM device
- * @fb: framebuffer to be initialized
- * @funcs: ... with these functions
- *
- * Allocates an ID for the framebuffer's parent mode object, sets its mode
- * functions & device file and adds it to the master fd list.
- *
- * IMPORTANT:
- * This functions publishes the fb and makes it available for concurrent access
- * by other users. Which means by this point the fb _must_ be fully set up -
- * since all the fb attributes are invariant over its lifetime, no further
- * locking but only correct reference counting is required.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
- const struct drm_framebuffer_funcs *funcs)
-{
- int ret;
-
- INIT_LIST_HEAD(&fb->filp_head);
- fb->dev = dev;
- fb->funcs = funcs;
-
- ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
- false, drm_framebuffer_free);
- if (ret)
- goto out;
-
- mutex_lock(&dev->mode_config.fb_lock);
- dev->mode_config.num_fb++;
- list_add(&fb->head, &dev->mode_config.fb_list);
- mutex_unlock(&dev->mode_config.fb_lock);
-
- drm_mode_object_register(dev, &fb->base);
-out:
- return ret;
-}
-EXPORT_SYMBOL(drm_framebuffer_init);
-
-/**
- * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
- * @dev: drm device
- * @id: id of the fb object
- *
- * If successful, this grabs an additional reference to the framebuffer -
- * callers need to make sure to eventually unreference the returned framebuffer
- * again, using @drm_framebuffer_unreference.
- */
-struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *obj;
- struct drm_framebuffer *fb = NULL;
-
- obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
- if (obj)
- fb = obj_to_fb(obj);
- return fb;
-}
-EXPORT_SYMBOL(drm_framebuffer_lookup);
-
-/**
- * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
- * @fb: fb to unregister
- *
- * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
- * those used for fbdev. Note that the caller must hold a reference of it's own,
- * i.e. the object may not be destroyed through this call (since it'll lead to a
- * locking inversion).
- */
-void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
-{
- struct drm_device *dev;
-
- if (!fb)
- return;
-
- dev = fb->dev;
-
- /* Mark fb as reaped and drop idr ref. */
- drm_mode_object_unregister(dev, &fb->base);
-}
-EXPORT_SYMBOL(drm_framebuffer_unregister_private);
-
-/**
- * drm_framebuffer_cleanup - remove a framebuffer object
- * @fb: framebuffer to remove
- *
- * Cleanup framebuffer. This function is intended to be used from the drivers
- * ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
- *
- * Note that this function does not remove the fb from active usuage - if it is
- * still used anywhere, hilarity can ensue since userspace could call getfb on
- * the id and get back -EINVAL. Obviously no concern at driver unload time.
- *
- * Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
- * driver-private objects (e.g. for fbdev) drivers need to explicitly call
- * drm_framebuffer_unregister_private.
- */
-void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
-{
- struct drm_device *dev = fb->dev;
-
- mutex_lock(&dev->mode_config.fb_lock);
- list_del(&fb->head);
- dev->mode_config.num_fb--;
- mutex_unlock(&dev->mode_config.fb_lock);
-}
-EXPORT_SYMBOL(drm_framebuffer_cleanup);
-
-/**
- * drm_framebuffer_remove - remove and unreference a framebuffer object
- * @fb: framebuffer to remove
- *
- * Scans all the CRTCs and planes in @dev's mode_config. If they're
- * using @fb, removes it, setting it to NULL. Then drops the reference to the
- * passed-in framebuffer. Might take the modeset locks.
- *
- * Note that this function optimizes the cleanup away if the caller holds the
- * last reference to the framebuffer. It is also guaranteed to not take the
- * modeset locks in this case.
- */
-void drm_framebuffer_remove(struct drm_framebuffer *fb)
-{
- struct drm_device *dev;
- struct drm_crtc *crtc;
- struct drm_plane *plane;
-
- if (!fb)
- return;
-
- dev = fb->dev;
-
- WARN_ON(!list_empty(&fb->filp_head));
-
- /*
- * drm ABI mandates that we remove any deleted framebuffers from active
- * useage. But since most sane clients only remove framebuffers they no
- * longer need, try to optimize this away.
- *
- * Since we're holding a reference ourselves, observing a refcount of 1
- * means that we're the last holder and can skip it. Also, the refcount
- * can never increase from 1 again, so we don't need any barriers or
- * locks.
- *
- * Note that userspace could try to race with use and instate a new
- * usage _after_ we've cleared all current ones. End result will be an
- * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
- * in this manner.
- */
- if (drm_framebuffer_read_refcount(fb) > 1) {
- drm_modeset_lock_all(dev);
- /* remove from any CRTC */
- drm_for_each_crtc(crtc, dev) {
- if (crtc->primary->fb == fb) {
- /* should turn off the crtc */
- if (drm_crtc_force_disable(crtc))
- DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
- }
- }
-
- drm_for_each_plane(plane, dev) {
- if (plane->fb == fb)
- drm_plane_force_disable(plane);
- }
- drm_modeset_unlock_all(dev);
- }
-
- drm_framebuffer_unreference(fb);
-}
-EXPORT_SYMBOL(drm_framebuffer_remove);
-
DEFINE_WW_CLASS(crtc_ww_class);
static unsigned int drm_num_crtcs(struct drm_device *dev)
@@ -683,7 +151,11 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
* @funcs: callbacks for the new CRTC
* @name: printf style format string for the CRTC name, or NULL for default name
*
- * Inits a new object created as base part of a driver crtc object.
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
*
* Returns:
* Zero on success, error code on failure.
@@ -783,720 +255,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-/*
- * drm_mode_remove - remove and free a mode
- * @connector: connector list to modify
- * @mode: mode to remove
- *
- * Remove @mode from @connector's mode list, then free it.
- */
-static void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- list_del(&mode->head);
- drm_mode_destroy(connector->dev, mode);
-}
-
-/**
- * drm_display_info_set_bus_formats - set the supported bus formats
- * @info: display info to store bus formats in
- * @formats: array containing the supported bus formats
- * @num_formats: the number of entries in the fmts array
- *
- * Store the supported bus formats in display info structure.
- * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for
- * a full list of available formats.
- */
-int drm_display_info_set_bus_formats(struct drm_display_info *info,
- const u32 *formats,
- unsigned int num_formats)
-{
- u32 *fmts = NULL;
-
- if (!formats && num_formats)
- return -EINVAL;
-
- if (formats && num_formats) {
- fmts = kmemdup(formats, sizeof(*formats) * num_formats,
- GFP_KERNEL);
- if (!fmts)
- return -ENOMEM;
- }
-
- kfree(info->bus_formats);
- info->bus_formats = fmts;
- info->num_bus_formats = num_formats;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_display_info_set_bus_formats);
-
-/**
- * drm_connector_get_cmdline_mode - reads the user's cmdline mode
- * @connector: connector to quwery
- *
- * The kernel supports per-connector configration of its consoles through
- * use of the video= parameter. This function parses that option and
- * extracts the user's specified mode (or enable/disable status) for a
- * particular connector. This is typically only used during the early fbdev
- * setup.
- */
-static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
-{
- struct drm_cmdline_mode *mode = &connector->cmdline_mode;
- char *option = NULL;
-
- if (fb_get_options(connector->name, &option))
- return;
-
- if (!drm_mode_parse_command_line_for_connector(option,
- connector,
- mode))
- return;
-
- if (mode->force) {
- const char *s;
-
- switch (mode->force) {
- case DRM_FORCE_OFF:
- s = "OFF";
- break;
- case DRM_FORCE_ON_DIGITAL:
- s = "ON - dig";
- break;
- default:
- case DRM_FORCE_ON:
- s = "ON";
- break;
- }
-
- DRM_INFO("forcing %s connector %s\n", connector->name, s);
- connector->force = mode->force;
- }
-
- DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
- connector->name,
- mode->xres, mode->yres,
- mode->refresh_specified ? mode->refresh : 60,
- mode->rb ? " reduced blanking" : "",
- mode->margins ? " with margins" : "",
- mode->interlace ? " interlaced" : "");
-}
-
-static void drm_connector_free(struct kref *kref)
-{
- struct drm_connector *connector =
- container_of(kref, struct drm_connector, base.refcount);
- struct drm_device *dev = connector->dev;
-
- drm_mode_object_unregister(dev, &connector->base);
- connector->funcs->destroy(connector);
-}
-
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
-{
- struct drm_mode_config *config = &dev->mode_config;
- int ret;
- struct ida *connector_ida =
- &drm_connector_enum_list[connector_type].ida;
-
- drm_modeset_lock_all(dev);
-
- ret = drm_mode_object_get_reg(dev, &connector->base,
- DRM_MODE_OBJECT_CONNECTOR,
- false, drm_connector_free);
- if (ret)
- goto out_unlock;
-
- connector->base.properties = &connector->properties;
- connector->dev = dev;
- connector->funcs = funcs;
-
- ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
- if (ret < 0)
- goto out_put;
- connector->index = ret;
- ret = 0;
-
- connector->connector_type = connector_type;
- connector->connector_type_id =
- ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
- if (connector->connector_type_id < 0) {
- ret = connector->connector_type_id;
- goto out_put_id;
- }
- connector->name =
- kasprintf(GFP_KERNEL, "%s-%d",
- drm_connector_enum_list[connector_type].name,
- connector->connector_type_id);
- if (!connector->name) {
- ret = -ENOMEM;
- goto out_put_type_id;
- }
-
- INIT_LIST_HEAD(&connector->probed_modes);
- INIT_LIST_HEAD(&connector->modes);
- connector->edid_blob_ptr = NULL;
- connector->status = connector_status_unknown;
-
- drm_connector_get_cmdline_mode(connector);
-
- /* We should add connectors at the end to avoid upsetting the connector
- * index too much. */
- list_add_tail(&connector->head, &config->connector_list);
- config->num_connector++;
-
- if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
- drm_object_attach_property(&connector->base,
- config->edid_property,
- 0);
-
- drm_object_attach_property(&connector->base,
- config->dpms_property, 0);
-
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
- }
-
- connector->debugfs_entry = NULL;
-out_put_type_id:
- if (ret)
- ida_remove(connector_ida, connector->connector_type_id);
-out_put_id:
- if (ret)
- ida_remove(&config->connector_ida, connector->index);
-out_put:
- if (ret)
- drm_mode_object_unregister(dev, &connector->base);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_connector_init);
-
-/**
- * drm_connector_cleanup - cleans up an initialised connector
- * @connector: connector to cleanup
- *
- * Cleans up the connector but doesn't free the object.
- */
-void drm_connector_cleanup(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *mode, *t;
-
- /* The connector should have been removed from userspace long before
- * it is finally destroyed.
- */
- if (WARN_ON(connector->registered))
- drm_connector_unregister(connector);
-
- if (connector->tile_group) {
- drm_mode_put_tile_group(dev, connector->tile_group);
- connector->tile_group = NULL;
- }
-
- list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
- list_for_each_entry_safe(mode, t, &connector->modes, head)
- drm_mode_remove(connector, mode);
-
- ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
- connector->connector_type_id);
-
- ida_remove(&dev->mode_config.connector_ida,
- connector->index);
-
- kfree(connector->display_info.bus_formats);
- drm_mode_object_unregister(dev, &connector->base);
- kfree(connector->name);
- connector->name = NULL;
- list_del(&connector->head);
- dev->mode_config.num_connector--;
-
- WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
- if (connector->state && connector->funcs->atomic_destroy_state)
- connector->funcs->atomic_destroy_state(connector,
- connector->state);
-
- memset(connector, 0, sizeof(*connector));
-}
-EXPORT_SYMBOL(drm_connector_cleanup);
-
-/**
- * drm_connector_register - register a connector
- * @connector: the connector to register
- *
- * Register userspace interfaces for a connector
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register(struct drm_connector *connector)
-{
- int ret;
-
- if (connector->registered)
- return 0;
-
- ret = drm_sysfs_connector_add(connector);
- if (ret)
- return ret;
-
- ret = drm_debugfs_connector_add(connector);
- if (ret) {
- goto err_sysfs;
- }
-
- if (connector->funcs->late_register) {
- ret = connector->funcs->late_register(connector);
- if (ret)
- goto err_debugfs;
- }
-
- drm_mode_object_register(connector->dev, &connector->base);
-
- connector->registered = true;
- return 0;
-
-err_debugfs:
- drm_debugfs_connector_remove(connector);
-err_sysfs:
- drm_sysfs_connector_remove(connector);
- return ret;
-}
-EXPORT_SYMBOL(drm_connector_register);
-
-/**
- * drm_connector_unregister - unregister a connector
- * @connector: the connector to unregister
- *
- * Unregister userspace interfaces for a connector
- */
-void drm_connector_unregister(struct drm_connector *connector)
-{
- if (!connector->registered)
- return;
-
- if (connector->funcs->early_unregister)
- connector->funcs->early_unregister(connector);
-
- drm_sysfs_connector_remove(connector);
- drm_debugfs_connector_remove(connector);
-
- connector->registered = false;
-}
-EXPORT_SYMBOL(drm_connector_unregister);
-
-static void drm_connector_unregister_all(struct drm_device *dev)
-{
- struct drm_connector *connector;
-
- /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_connector_unregister(connector);
-}
-
-static int drm_connector_register_all(struct drm_device *dev)
-{
- struct drm_connector *connector;
- int ret;
-
- /* FIXME: taking the mode config mutex ends up in a clash with
- * fbcon/backlight registration */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret)
- goto err;
- }
-
- return 0;
-
-err:
- mutex_unlock(&dev->mode_config.mutex);
- drm_connector_unregister_all(dev);
- return ret;
-}
-
-static int drm_encoder_register_all(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- int ret = 0;
-
- drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->late_register)
- ret = encoder->funcs->late_register(encoder);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void drm_encoder_unregister_all(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, dev) {
- if (encoder->funcs->early_unregister)
- encoder->funcs->early_unregister(encoder);
- }
-}
-
-/**
- * drm_encoder_init - Init a preallocated encoder
- * @dev: drm device
- * @encoder: the encoder to init
- * @funcs: callbacks for this encoder
- * @encoder_type: user visible type of the encoder
- * @name: printf style format string for the encoder name, or NULL for default name
- *
- * Initialises a preallocated encoder. Encoder should be
- * subclassed as part of driver encoder objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_encoder_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- const struct drm_encoder_funcs *funcs,
- int encoder_type, const char *name, ...)
-{
- int ret;
-
- drm_modeset_lock_all(dev);
-
- ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
- if (ret)
- goto out_unlock;
-
- encoder->dev = dev;
- encoder->encoder_type = encoder_type;
- encoder->funcs = funcs;
- if (name) {
- va_list ap;
-
- va_start(ap, name);
- encoder->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
- } else {
- encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
- drm_encoder_enum_list[encoder_type].name,
- encoder->base.id);
- }
- if (!encoder->name) {
- ret = -ENOMEM;
- goto out_put;
- }
-
- list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- encoder->index = dev->mode_config.num_encoder++;
-
-out_put:
- if (ret)
- drm_mode_object_unregister(dev, &encoder->base);
-
-out_unlock:
- drm_modeset_unlock_all(dev);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_encoder_init);
-
-/**
- * drm_encoder_cleanup - cleans up an initialised encoder
- * @encoder: encoder to cleanup
- *
- * Cleans up the encoder but doesn't free the object.
- */
-void drm_encoder_cleanup(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
-
- /* Note that the encoder_list is considered to be static; should we
- * remove the drm_encoder at runtime we would have to decrement all
- * the indices on the drm_encoder after us in the encoder_list.
- */
-
- drm_modeset_lock_all(dev);
- drm_mode_object_unregister(dev, &encoder->base);
- kfree(encoder->name);
- list_del(&encoder->head);
- dev->mode_config.num_encoder--;
- drm_modeset_unlock_all(dev);
-
- memset(encoder, 0, sizeof(*encoder));
-}
-EXPORT_SYMBOL(drm_encoder_cleanup);
-
-static unsigned int drm_num_planes(struct drm_device *dev)
-{
- unsigned int num = 0;
- struct drm_plane *tmp;
-
- drm_for_each_plane(tmp, dev) {
- num++;
- }
-
- return num;
-}
-
-/**
- * drm_universal_plane_init - Initialize a new universal plane object
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @type: type of plane (overlay, primary, cursor)
- * @name: printf style format string for the plane name, or NULL for default name
- *
- * Initializes a plane object of type @type.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- enum drm_plane_type type,
- const char *name, ...)
-{
- struct drm_mode_config *config = &dev->mode_config;
- int ret;
-
- ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
- if (ret)
- return ret;
-
- drm_modeset_lock_init(&plane->mutex);
-
- plane->base.properties = &plane->properties;
- plane->dev = dev;
- plane->funcs = funcs;
- plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
- GFP_KERNEL);
- if (!plane->format_types) {
- DRM_DEBUG_KMS("out of memory when allocating plane\n");
- drm_mode_object_unregister(dev, &plane->base);
- return -ENOMEM;
- }
-
- if (name) {
- va_list ap;
-
- va_start(ap, name);
- plane->name = kvasprintf(GFP_KERNEL, name, ap);
- va_end(ap);
- } else {
- plane->name = kasprintf(GFP_KERNEL, "plane-%d",
- drm_num_planes(dev));
- }
- if (!plane->name) {
- kfree(plane->format_types);
- drm_mode_object_unregister(dev, &plane->base);
- return -ENOMEM;
- }
-
- memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
- plane->format_count = format_count;
- plane->possible_crtcs = possible_crtcs;
- plane->type = type;
-
- list_add_tail(&plane->head, &config->plane_list);
- plane->index = config->num_total_plane++;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- config->num_overlay_plane++;
-
- drm_object_attach_property(&plane->base,
- config->plane_type_property,
- plane->type);
-
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
- drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
- drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
- drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
- drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
- drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
- drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
- drm_object_attach_property(&plane->base, config->prop_src_x, 0);
- drm_object_attach_property(&plane->base, config->prop_src_y, 0);
- drm_object_attach_property(&plane->base, config->prop_src_w, 0);
- drm_object_attach_property(&plane->base, config->prop_src_h, 0);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_universal_plane_init);
-
-static int drm_plane_register_all(struct drm_device *dev)
-{
- struct drm_plane *plane;
- int ret = 0;
-
- drm_for_each_plane(plane, dev) {
- if (plane->funcs->late_register)
- ret = plane->funcs->late_register(plane);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void drm_plane_unregister_all(struct drm_device *dev)
-{
- struct drm_plane *plane;
-
- drm_for_each_plane(plane, dev) {
- if (plane->funcs->early_unregister)
- plane->funcs->early_unregister(plane);
- }
-}
-
-/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (%DRM_FORMAT_*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary)
-{
- enum drm_plane_type type;
-
- type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
- * drm_plane_cleanup - Clean up the core plane usage
- * @plane: plane to cleanup
- *
- * This function cleans up @plane and removes it from the DRM mode setting
- * core. Note that the function does *not* free the plane structure itself,
- * this is the responsibility of the caller.
- */
-void drm_plane_cleanup(struct drm_plane *plane)
-{
- struct drm_device *dev = plane->dev;
-
- drm_modeset_lock_all(dev);
- kfree(plane->format_types);
- drm_mode_object_unregister(dev, &plane->base);
-
- BUG_ON(list_empty(&plane->head));
-
- /* Note that the plane_list is considered to be static; should we
- * remove the drm_plane at runtime we would have to decrement all
- * the indices on the drm_plane after us in the plane_list.
- */
-
- list_del(&plane->head);
- dev->mode_config.num_total_plane--;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- dev->mode_config.num_overlay_plane--;
- drm_modeset_unlock_all(dev);
-
- WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
- if (plane->state && plane->funcs->atomic_destroy_state)
- plane->funcs->atomic_destroy_state(plane, plane->state);
-
- kfree(plane->name);
-
- memset(plane, 0, sizeof(*plane));
-}
-EXPORT_SYMBOL(drm_plane_cleanup);
-
-/**
- * drm_plane_from_index - find the registered plane at an index
- * @dev: DRM device
- * @idx: index of registered plane to find for
- *
- * Given a plane index, return the registered plane from DRM device's
- * list of planes with matching index.
- */
-struct drm_plane *
-drm_plane_from_index(struct drm_device *dev, int idx)
-{
- struct drm_plane *plane;
-
- drm_for_each_plane(plane, dev)
- if (idx == plane->index)
- return plane;
-
- return NULL;
-}
-EXPORT_SYMBOL(drm_plane_from_index);
-
-/**
- * drm_plane_force_disable - Forcibly disable a plane
- * @plane: plane to disable
- *
- * Forces the plane to be disabled.
- *
- * Used when the plane's current framebuffer is destroyed,
- * and when restoring fbdev mode.
- */
-void drm_plane_force_disable(struct drm_plane *plane)
-{
- int ret;
-
- if (!plane->fb)
- return;
-
- plane->old_fb = plane->fb;
- ret = plane->funcs->disable_plane(plane);
- if (ret) {
- DRM_ERROR("failed to disable plane with busy fb\n");
- plane->old_fb = NULL;
- return;
- }
- /* disconnect the plane from the fb and crtc: */
- drm_framebuffer_unreference(plane->old_fb);
- plane->old_fb = NULL;
- plane->fb = NULL;
- plane->crtc = NULL;
-}
-EXPORT_SYMBOL(drm_plane_force_disable);
-
int drm_modeset_register_all(struct drm_device *dev)
{
int ret;
@@ -1540,39 +298,11 @@ void drm_modeset_unregister_all(struct drm_device *dev)
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
+ int ret;
- /*
- * Standard properties (apply to all connectors)
- */
- prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "EDID", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.edid_property = prop;
-
- prop = drm_property_create_enum(dev, 0,
- "DPMS", drm_dpms_enum_list,
- ARRAY_SIZE(drm_dpms_enum_list));
- if (!prop)
- return -ENOMEM;
- dev->mode_config.dpms_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "PATH", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.path_property = prop;
-
- prop = drm_property_create(dev,
- DRM_MODE_PROP_BLOB |
- DRM_MODE_PROP_IMMUTABLE,
- "TILE", 0);
- if (!prop)
- return -ENOMEM;
- dev->mode_config.tile_property = prop;
+ ret = drm_connector_create_standard_properties(dev);
+ if (ret)
+ return ret;
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"type", drm_plane_type_enum_list,
@@ -1693,250 +423,6 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
}
/**
- * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
- * @dev: DRM device
- *
- * Called by a driver the first time a DVI-I connector is made.
- */
-int drm_mode_create_dvi_i_properties(struct drm_device *dev)
-{
- struct drm_property *dvi_i_selector;
- struct drm_property *dvi_i_subconnector;
-
- if (dev->mode_config.dvi_i_select_subconnector_property)
- return 0;
-
- dvi_i_selector =
- drm_property_create_enum(dev, 0,
- "select subconnector",
- drm_dvi_i_select_enum_list,
- ARRAY_SIZE(drm_dvi_i_select_enum_list));
- dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
-
- dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "subconnector",
- drm_dvi_i_subconnector_enum_list,
- ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
- dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
-
-/**
- * drm_create_tv_properties - create TV specific connector properties
- * @dev: DRM device
- * @num_modes: number of different TV formats (modes) supported
- * @modes: array of pointers to strings containing name of each format
- *
- * Called by a driver's TV initialization routine, this function creates
- * the TV specific connector properties for a given device. Caller is
- * responsible for allocating a list of format names and passing them to
- * this routine.
- */
-int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[])
-{
- struct drm_property *tv_selector;
- struct drm_property *tv_subconnector;
- unsigned int i;
-
- if (dev->mode_config.tv_select_subconnector_property)
- return 0;
-
- /*
- * Basic connector properties
- */
- tv_selector = drm_property_create_enum(dev, 0,
- "select subconnector",
- drm_tv_select_enum_list,
- ARRAY_SIZE(drm_tv_select_enum_list));
- if (!tv_selector)
- goto nomem;
-
- dev->mode_config.tv_select_subconnector_property = tv_selector;
-
- tv_subconnector =
- drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "subconnector",
- drm_tv_subconnector_enum_list,
- ARRAY_SIZE(drm_tv_subconnector_enum_list));
- if (!tv_subconnector)
- goto nomem;
- dev->mode_config.tv_subconnector_property = tv_subconnector;
-
- /*
- * Other, TV specific properties: margins & TV modes.
- */
- dev->mode_config.tv_left_margin_property =
- drm_property_create_range(dev, 0, "left margin", 0, 100);
- if (!dev->mode_config.tv_left_margin_property)
- goto nomem;
-
- dev->mode_config.tv_right_margin_property =
- drm_property_create_range(dev, 0, "right margin", 0, 100);
- if (!dev->mode_config.tv_right_margin_property)
- goto nomem;
-
- dev->mode_config.tv_top_margin_property =
- drm_property_create_range(dev, 0, "top margin", 0, 100);
- if (!dev->mode_config.tv_top_margin_property)
- goto nomem;
-
- dev->mode_config.tv_bottom_margin_property =
- drm_property_create_range(dev, 0, "bottom margin", 0, 100);
- if (!dev->mode_config.tv_bottom_margin_property)
- goto nomem;
-
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- if (!dev->mode_config.tv_mode_property)
- goto nomem;
-
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property, i,
- i, modes[i]);
-
- dev->mode_config.tv_brightness_property =
- drm_property_create_range(dev, 0, "brightness", 0, 100);
- if (!dev->mode_config.tv_brightness_property)
- goto nomem;
-
- dev->mode_config.tv_contrast_property =
- drm_property_create_range(dev, 0, "contrast", 0, 100);
- if (!dev->mode_config.tv_contrast_property)
- goto nomem;
-
- dev->mode_config.tv_flicker_reduction_property =
- drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
- if (!dev->mode_config.tv_flicker_reduction_property)
- goto nomem;
-
- dev->mode_config.tv_overscan_property =
- drm_property_create_range(dev, 0, "overscan", 0, 100);
- if (!dev->mode_config.tv_overscan_property)
- goto nomem;
-
- dev->mode_config.tv_saturation_property =
- drm_property_create_range(dev, 0, "saturation", 0, 100);
- if (!dev->mode_config.tv_saturation_property)
- goto nomem;
-
- dev->mode_config.tv_hue_property =
- drm_property_create_range(dev, 0, "hue", 0, 100);
- if (!dev->mode_config.tv_hue_property)
- goto nomem;
-
- return 0;
-nomem:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_create_tv_properties);
-
-/**
- * drm_mode_create_scaling_mode_property - create scaling mode property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_scaling_mode_property(struct drm_device *dev)
-{
- struct drm_property *scaling_mode;
-
- if (dev->mode_config.scaling_mode_property)
- return 0;
-
- scaling_mode =
- drm_property_create_enum(dev, 0, "scaling mode",
- drm_scaling_mode_enum_list,
- ARRAY_SIZE(drm_scaling_mode_enum_list));
-
- dev->mode_config.scaling_mode_property = scaling_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
-
-/**
- * drm_mode_create_aspect_ratio_property - create aspect ratio property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
-{
- if (dev->mode_config.aspect_ratio_property)
- return 0;
-
- dev->mode_config.aspect_ratio_property =
- drm_property_create_enum(dev, 0, "aspect ratio",
- drm_aspect_ratio_enum_list,
- ARRAY_SIZE(drm_aspect_ratio_enum_list));
-
- if (dev->mode_config.aspect_ratio_property == NULL)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
-
-/**
- * drm_mode_create_dirty_property - create dirty property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dirty_info_property(struct drm_device *dev)
-{
- struct drm_property *dirty_info;
-
- if (dev->mode_config.dirty_info_property)
- return 0;
-
- dirty_info =
- drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
- "dirty",
- drm_dirty_info_enum_list,
- ARRAY_SIZE(drm_dirty_info_enum_list));
- dev->mode_config.dirty_info_property = dirty_info;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
-
-/**
- * drm_mode_create_suggested_offset_properties - create suggests offset properties
- * @dev: DRM device
- *
- * Create the the suggested x/y offset property for connectors.
- */
-int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
-{
- if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
- return 0;
-
- dev->mode_config.suggested_x_property =
- drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
-
- dev->mode_config.suggested_y_property =
- drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
-
- if (dev->mode_config.suggested_x_property == NULL ||
- dev->mode_config.suggested_y_property == NULL)
- return -ENOMEM;
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
-
-/**
* drm_mode_getresources - get graphics configuration
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
@@ -2123,599 +609,6 @@ int drm_mode_getcrtc(struct drm_device *dev,
return 0;
}
-static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
- const struct drm_file *file_priv)
-{
- /*
- * If user-space hasn't configured the driver to expose the stereo 3D
- * modes, don't expose them.
- */
- if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
- return false;
-
- return true;
-}
-
-static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
-{
- /* For atomic drivers only state objects are synchronously updated and
- * protected by modeset locks, so check those first. */
- if (connector->state)
- return connector->state->best_encoder;
- return connector->encoder;
-}
-
-/* helper for getconnector and getproperties ioctls */
-static int get_properties(struct drm_mode_object *obj, bool atomic,
- uint32_t __user *prop_ptr, uint64_t __user *prop_values,
- uint32_t *arg_count_props)
-{
- int props_count;
- int i, ret, copied;
-
- props_count = obj->properties->count;
- if (!atomic)
- props_count -= obj->properties->atomic_count;
-
- if ((*arg_count_props >= props_count) && props_count) {
- for (i = 0, copied = 0; copied < props_count; i++) {
- struct drm_property *prop = obj->properties->properties[i];
- uint64_t val;
-
- if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
- continue;
-
- ret = drm_object_property_get_value(obj, prop, &val);
- if (ret)
- return ret;
-
- if (put_user(prop->base.id, prop_ptr + copied))
- return -EFAULT;
-
- if (put_user(val, prop_values + copied))
- return -EFAULT;
-
- copied++;
- }
- }
- *arg_count_props = props_count;
-
- return 0;
-}
-
-/**
- * drm_mode_getconnector - get connector configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a connector configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getconnector(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_connector *out_resp = data;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_display_mode *mode;
- int mode_count = 0;
- int encoders_count = 0;
- int ret = 0;
- int copied = 0;
- int i;
- struct drm_mode_modeinfo u_mode;
- struct drm_mode_modeinfo __user *mode_ptr;
- uint32_t __user *encoder_ptr;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-
- mutex_lock(&dev->mode_config.mutex);
-
- connector = drm_connector_lookup(dev, out_resp->connector_id);
- if (!connector) {
- ret = -ENOENT;
- goto out_unlock;
- }
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
- if (connector->encoder_ids[i] != 0)
- encoders_count++;
-
- if (out_resp->count_modes == 0) {
- connector->funcs->fill_modes(connector,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- }
-
- /* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- if (drm_mode_expose_to_userspace(mode, file_priv))
- mode_count++;
-
- out_resp->connector_id = connector->base.id;
- out_resp->connector_type = connector->connector_type;
- out_resp->connector_type_id = connector->connector_type_id;
- out_resp->mm_width = connector->display_info.width_mm;
- out_resp->mm_height = connector->display_info.height_mm;
- out_resp->subpixel = connector->display_info.subpixel_order;
- out_resp->connection = connector->status;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- encoder = drm_connector_get_encoder(connector);
- if (encoder)
- out_resp->encoder_id = encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if ((out_resp->count_modes >= mode_count) && mode_count) {
- copied = 0;
- mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &connector->modes, head) {
- if (!drm_mode_expose_to_userspace(mode, file_priv))
- continue;
-
- drm_mode_convert_to_umode(&u_mode, mode);
- if (copy_to_user(mode_ptr + copied,
- &u_mode, sizeof(u_mode))) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- out_resp->count_modes = mode_count;
-
- ret = get_properties(&connector->base, file_priv->atomic,
- (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
- (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
- &out_resp->count_props);
- if (ret)
- goto out;
-
- if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
- copied = 0;
- encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] != 0) {
- if (put_user(connector->encoder_ids[i],
- encoder_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
- }
- }
- }
- out_resp->count_encoders = encoders_count;
-
-out:
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
- drm_connector_unreference(connector);
-out_unlock:
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
-static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
- struct drm_connector *connector;
- struct drm_device *dev = encoder->dev;
- bool uses_atomic = false;
-
- /* For atomic drivers only state objects are synchronously updated and
- * protected by modeset locks, so check those first. */
- drm_for_each_connector(connector, dev) {
- if (!connector->state)
- continue;
-
- uses_atomic = true;
-
- if (connector->state->best_encoder != encoder)
- continue;
-
- return connector->state->crtc;
- }
-
- /* Don't return stale data (e.g. pending async disable). */
- if (uses_atomic)
- return NULL;
-
- return encoder->crtc;
-}
-
-/**
- * drm_mode_getencoder - get encoder configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Construct a encoder configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getencoder(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_encoder *enc_resp = data;
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- encoder = drm_encoder_find(dev, enc_resp->encoder_id);
- if (!encoder)
- return -ENOENT;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- crtc = drm_encoder_get_crtc(encoder);
- if (crtc)
- enc_resp->crtc_id = crtc->base.id;
- else
- enc_resp->crtc_id = 0;
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
-
- enc_resp->encoder_type = encoder->encoder_type;
- enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
- enc_resp->possible_clones = encoder->possible_clones;
-
- return 0;
-}
-
-/**
- * drm_mode_getplane_res - enumerate all plane resources
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a list of plane ids to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_plane_res *plane_resp = data;
- struct drm_mode_config *config;
- struct drm_plane *plane;
- uint32_t __user *plane_ptr;
- int copied = 0;
- unsigned num_planes;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- config = &dev->mode_config;
-
- if (file_priv->universal_planes)
- num_planes = config->num_total_plane;
- else
- num_planes = config->num_overlay_plane;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if (num_planes &&
- (plane_resp->count_planes >= num_planes)) {
- plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
- /* Plane lists are invariant, no locking needed. */
- drm_for_each_plane(plane, dev) {
- /*
- * Unless userspace set the 'universal planes'
- * capability bit, only advertise overlays.
- */
- if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
- !file_priv->universal_planes)
- continue;
-
- if (put_user(plane->base.id, plane_ptr + copied))
- return -EFAULT;
- copied++;
- }
- }
- plane_resp->count_planes = num_planes;
-
- return 0;
-}
-
-/**
- * drm_mode_getplane - get plane configuration
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Construct a plane configuration structure to return to the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_get_plane *plane_resp = data;
- struct drm_plane *plane;
- uint32_t __user *format_ptr;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- plane = drm_plane_find(dev, plane_resp->plane_id);
- if (!plane)
- return -ENOENT;
-
- drm_modeset_lock(&plane->mutex, NULL);
- if (plane->crtc)
- plane_resp->crtc_id = plane->crtc->base.id;
- else
- plane_resp->crtc_id = 0;
-
- if (plane->fb)
- plane_resp->fb_id = plane->fb->base.id;
- else
- plane_resp->fb_id = 0;
- drm_modeset_unlock(&plane->mutex);
-
- plane_resp->plane_id = plane->base.id;
- plane_resp->possible_crtcs = plane->possible_crtcs;
- plane_resp->gamma_size = 0;
-
- /*
- * This ioctl is called twice, once to determine how much space is
- * needed, and the 2nd time to fill it.
- */
- if (plane->format_count &&
- (plane_resp->count_format_types >= plane->format_count)) {
- format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
- if (copy_to_user(format_ptr,
- plane->format_types,
- sizeof(uint32_t) * plane->format_count)) {
- return -EFAULT;
- }
- }
- plane_resp->count_format_types = plane->format_count;
-
- return 0;
-}
-
-/**
- * drm_plane_check_pixel_format - Check if the plane supports the pixel format
- * @plane: plane to check for format support
- * @format: the pixel format
- *
- * Returns:
- * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
- * otherwise.
- */
-int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
-{
- unsigned int i;
-
- for (i = 0; i < plane->format_count; i++) {
- if (format == plane->format_types[i])
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int check_src_coords(uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- const struct drm_framebuffer *fb)
-{
- unsigned int fb_width, fb_height;
-
- fb_width = fb->width << 16;
- fb_height = fb->height << 16;
-
- /* Make sure source coordinates are inside the fb. */
- if (src_w > fb_width ||
- src_x > fb_width - src_w ||
- src_h > fb_height ||
- src_y > fb_height - src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
- src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
- src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
- src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
- return -ENOSPC;
- }
-
- return 0;
-}
-
-/*
- * setplane_internal - setplane handler for internal callers
- *
- * Note that we assume an extra reference has already been taken on fb. If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
- *
- * src_{x,y,w,h} are provided in 16.16 fixed point format
- */
-static int __setplane_internal(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int32_t crtc_x, int32_t crtc_y,
- uint32_t crtc_w, uint32_t crtc_h,
- /* src_{x,y,w,h} values are 16.16 fixed point */
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- int ret = 0;
-
- /* No fb means shut it down */
- if (!fb) {
- plane->old_fb = plane->fb;
- ret = plane->funcs->disable_plane(plane);
- if (!ret) {
- plane->crtc = NULL;
- plane->fb = NULL;
- } else {
- plane->old_fb = NULL;
- }
- goto out;
- }
-
- /* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
- DRM_DEBUG_KMS("Invalid crtc for plane\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
- if (ret) {
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->pixel_format));
- goto out;
- }
-
- /* Give drivers some help against integer overflows */
- if (crtc_w > INT_MAX ||
- crtc_x > INT_MAX - (int32_t) crtc_w ||
- crtc_h > INT_MAX ||
- crtc_y > INT_MAX - (int32_t) crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- crtc_w, crtc_h, crtc_x, crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
- ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
- if (ret)
- goto out;
-
- plane->old_fb = plane->fb;
- ret = plane->funcs->update_plane(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- if (!ret) {
- plane->crtc = crtc;
- plane->fb = fb;
- fb = NULL;
- } else {
- plane->old_fb = NULL;
- }
-
-out:
- if (fb)
- drm_framebuffer_unreference(fb);
- if (plane->old_fb)
- drm_framebuffer_unreference(plane->old_fb);
- plane->old_fb = NULL;
-
- return ret;
-}
-
-static int setplane_internal(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int32_t crtc_x, int32_t crtc_y,
- uint32_t crtc_w, uint32_t crtc_h,
- /* src_{x,y,w,h} values are 16.16 fixed point */
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- int ret;
-
- drm_modeset_lock_all(plane->dev);
- ret = __setplane_internal(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
- drm_modeset_unlock_all(plane->dev);
-
- return ret;
-}
-
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
- *
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable (planes may be disabled without providing a
- * valid crtc).
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_set_plane *plane_req = data;
- struct drm_plane *plane;
- struct drm_crtc *crtc = NULL;
- struct drm_framebuffer *fb = NULL;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- /*
- * First, find the plane, crtc, and fb objects. If not available,
- * we don't bother to call the driver.
- */
- plane = drm_plane_find(dev, plane_req->plane_id);
- if (!plane) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- plane_req->plane_id);
- return -ENOENT;
- }
-
- if (plane_req->fb_id) {
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
- if (!fb) {
- DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
- plane_req->fb_id);
- return -ENOENT;
- }
-
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
- if (!crtc) {
- DRM_DEBUG_KMS("Unknown crtc ID %d\n",
- plane_req->crtc_id);
- return -ENOENT;
- }
- }
-
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
- return setplane_internal(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
-}
-
/**
* drm_mode_set_config_internal - helper to call ->set_config
* @set: modeset config to set
@@ -2802,12 +695,13 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
- crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_270)))
+ crtc->primary->state->rotation & (DRM_ROTATE_90 |
+ DRM_ROTATE_270))
swap(hdisplay, vdisplay);
- return check_src_coords(x << 16, y << 16,
- hdisplay << 16, vdisplay << 16, fb);
+ return drm_framebuffer_check_src_coords(x << 16, y << 16,
+ hdisplay << 16, vdisplay << 16,
+ fb);
}
EXPORT_SYMBOL(drm_crtc_check_viewport);
@@ -2902,8 +796,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
ret = drm_plane_check_pixel_format(crtc->primary,
fb->pixel_format);
if (ret) {
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->pixel_format));
+ char *format_name = drm_get_format_name(fb->pixel_format);
+ DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+ kfree(format_name);
goto out;
}
}
@@ -2993,2004 +888,9 @@ out:
return ret;
}
-/**
- * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
- * universal plane handler call
- * @crtc: crtc to update cursor for
- * @req: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Legacy cursor ioctl's work directly with driver buffer handles. To
- * translate legacy ioctl calls into universal plane handler calls, we need to
- * wrap the native buffer handle in a drm_framebuffer.
- *
- * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
- * buffer with a pitch of 4*width; the universal plane interface should be used
- * directly in cases where the hardware can support other buffer settings and
- * userspace wants to make use of these capabilities.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-static int drm_mode_cursor_universal(struct drm_crtc *crtc,
- struct drm_mode_cursor2 *req,
- struct drm_file *file_priv)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd2 fbreq = {
- .width = req->width,
- .height = req->height,
- .pixel_format = DRM_FORMAT_ARGB8888,
- .pitches = { req->width * 4 },
- .handles = { req->handle },
- };
- int32_t crtc_x, crtc_y;
- uint32_t crtc_w = 0, crtc_h = 0;
- uint32_t src_w = 0, src_h = 0;
- int ret = 0;
-
- BUG_ON(!crtc->cursor);
- WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
-
- /*
- * Obtain fb we'll be using (either new or existing) and take an extra
- * reference to it if fb != null. setplane will take care of dropping
- * the reference if the plane update fails.
- */
- if (req->flags & DRM_MODE_CURSOR_BO) {
- if (req->handle) {
- fb = internal_framebuffer_create(dev, &fbreq, file_priv);
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
- return PTR_ERR(fb);
- }
- fb->hot_x = req->hot_x;
- fb->hot_y = req->hot_y;
- } else {
- fb = NULL;
- }
- } else {
- fb = crtc->cursor->fb;
- if (fb)
- drm_framebuffer_reference(fb);
- }
-
- if (req->flags & DRM_MODE_CURSOR_MOVE) {
- crtc_x = req->x;
- crtc_y = req->y;
- } else {
- crtc_x = crtc->cursor_x;
- crtc_y = crtc->cursor_y;
- }
-
- if (fb) {
- crtc_w = fb->width;
- crtc_h = fb->height;
- src_w = fb->width << 16;
- src_h = fb->height << 16;
- }
-
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
- ret = __setplane_internal(crtc->cursor, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h);
-
- /* Update successful; save new cursor position, if necessary */
- if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
- crtc->cursor_x = req->x;
- crtc->cursor_y = req->y;
- }
-
- return ret;
-}
-
-static int drm_mode_cursor_common(struct drm_device *dev,
- struct drm_mode_cursor2 *req,
- struct drm_file *file_priv)
-{
- struct drm_crtc *crtc;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
- return -EINVAL;
-
- crtc = drm_crtc_find(dev, req->crtc_id);
- if (!crtc) {
- DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
- return -ENOENT;
- }
-
- /*
- * If this crtc has a universal cursor plane, call that plane's update
- * handler rather than using legacy cursor handlers.
- */
- drm_modeset_lock_crtc(crtc, crtc->cursor);
- if (crtc->cursor) {
- ret = drm_mode_cursor_universal(crtc, req, file_priv);
- goto out;
- }
-
- if (req->flags & DRM_MODE_CURSOR_BO) {
- if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
- ret = -ENXIO;
- goto out;
- }
- /* Turns off the cursor if handle is 0 */
- if (crtc->funcs->cursor_set2)
- ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
- req->width, req->height, req->hot_x, req->hot_y);
- else
- ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
- req->width, req->height);
- }
-
- if (req->flags & DRM_MODE_CURSOR_MOVE) {
- if (crtc->funcs->cursor_move) {
- ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
- } else {
- ret = -EFAULT;
- goto out;
- }
- }
-out:
- drm_modeset_unlock_crtc(crtc);
-
- return ret;
-
-}
-
-
-/**
- * drm_mode_cursor_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_cursor *req = data;
- struct drm_mode_cursor2 new_req;
-
- memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
- new_req.hot_x = new_req.hot_y = 0;
-
- return drm_mode_cursor_common(dev, &new_req, file_priv);
-}
-
-/**
- * drm_mode_cursor2_ioctl - set CRTC's cursor configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Set the cursor configuration based on user request. This implements the 2nd
- * version of the cursor ioctl, which allows userspace to additionally specify
- * the hotspot of the pointer.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_cursor2 *req = data;
-
- return drm_mode_cursor_common(dev, req, file_priv);
-}
-
-/**
- * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
- * @bpp: bits per pixels
- * @depth: bit depth per pixel
- *
- * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
- * Useful in fbdev emulation code, since that deals in those values.
- */
-uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
-{
- uint32_t fmt;
-
- switch (bpp) {
- case 8:
- fmt = DRM_FORMAT_C8;
- break;
- case 16:
- if (depth == 15)
- fmt = DRM_FORMAT_XRGB1555;
- else
- fmt = DRM_FORMAT_RGB565;
- break;
- case 24:
- fmt = DRM_FORMAT_RGB888;
- break;
- case 32:
- if (depth == 24)
- fmt = DRM_FORMAT_XRGB8888;
- else if (depth == 30)
- fmt = DRM_FORMAT_XRGB2101010;
- else
- fmt = DRM_FORMAT_ARGB8888;
- break;
- default:
- DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
- fmt = DRM_FORMAT_XRGB8888;
- break;
- }
-
- return fmt;
-}
-EXPORT_SYMBOL(drm_mode_legacy_fb_format);
-
-/**
- * drm_mode_addfb - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request. This is the
- * original addfb ioctl which only supported RGB formats.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *or = data;
- struct drm_mode_fb_cmd2 r = {};
- int ret;
-
- /* convert to new format and call new ioctl */
- r.fb_id = or->fb_id;
- r.width = or->width;
- r.height = or->height;
- r.pitches[0] = or->pitch;
- r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
- r.handles[0] = or->handle;
-
- ret = drm_mode_addfb2(dev, &r, file_priv);
- if (ret)
- return ret;
-
- or->fb_id = r.fb_id;
-
- return 0;
-}
-
-static int format_check(const struct drm_mode_fb_cmd2 *r)
-{
- uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
-
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 0;
- default:
- DRM_DEBUG_KMS("invalid pixel format %s\n",
- drm_get_format_name(r->pixel_format));
- return -EINVAL;
- }
-}
-
-static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
-{
- int ret, hsub, vsub, num_planes, i;
-
- ret = format_check(r);
- if (ret) {
- DRM_DEBUG_KMS("bad framebuffer format %s\n",
- drm_get_format_name(r->pixel_format));
- return ret;
- }
-
- hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
- num_planes = drm_format_num_planes(r->pixel_format);
-
- if (r->width == 0 || r->width % hsub) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
- return -EINVAL;
- }
-
- if (r->height == 0 || r->height % vsub) {
- DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
- return -EINVAL;
- }
-
- for (i = 0; i < num_planes; i++) {
- unsigned int width = r->width / (i != 0 ? hsub : 1);
- unsigned int height = r->height / (i != 0 ? vsub : 1);
- unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
-
- if (!r->handles[i]) {
- DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
- return -EINVAL;
- }
-
- if ((uint64_t) width * cpp > UINT_MAX)
- return -ERANGE;
-
- if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
- return -ERANGE;
-
- if (r->pitches[i] < width * cpp) {
- DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
- return -EINVAL;
- }
-
- if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
- return -EINVAL;
- }
-
- /* modifier specific checks: */
- switch (r->modifier[i]) {
- case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
- /* NOTE: the pitch restriction may be lifted later if it turns
- * out that no hw has this restriction:
- */
- if (r->pixel_format != DRM_FORMAT_NV12 ||
- width % 128 || height % 32 ||
- r->pitches[i] % 128) {
- DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
- return -EINVAL;
- }
- break;
-
- default:
- break;
- }
- }
-
- for (i = num_planes; i < 4; i++) {
- if (r->modifier[i]) {
- DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
- return -EINVAL;
- }
-
- /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
- if (!(r->flags & DRM_MODE_FB_MODIFIERS))
- continue;
-
- if (r->handles[i]) {
- DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
- return -EINVAL;
- }
-
- if (r->pitches[i]) {
- DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
- return -EINVAL;
- }
-
- if (r->offsets[i]) {
- DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static struct drm_framebuffer *
-internal_framebuffer_create(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *r,
- struct drm_file *file_priv)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
- int ret;
-
- if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
- return ERR_PTR(-EINVAL);
- }
-
- if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
- r->width, config->min_width, config->max_width);
- return ERR_PTR(-EINVAL);
- }
- if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
- r->height, config->min_height, config->max_height);
- return ERR_PTR(-EINVAL);
- }
-
- if (r->flags & DRM_MODE_FB_MODIFIERS &&
- !dev->mode_config.allow_fb_modifiers) {
- DRM_DEBUG_KMS("driver does not support fb modifiers\n");
- return ERR_PTR(-EINVAL);
- }
-
- ret = framebuffer_check(r);
- if (ret)
- return ERR_PTR(ret);
-
- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
- return fb;
- }
-
- return fb;
-}
-
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd2 *r = data;
- struct drm_framebuffer *fb;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- fb = internal_framebuffer_create(dev, r, file_priv);
- if (IS_ERR(fb))
- return PTR_ERR(fb);
-
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
- r->fb_id = fb->base.id;
-
- /* Transfer ownership to the filp for reaping on close */
- mutex_lock(&file_priv->fbs_lock);
- list_add(&fb->filp_head, &file_priv->fbs);
- mutex_unlock(&file_priv->fbs_lock);
-
- return 0;
-}
-
-struct drm_mode_rmfb_work {
- struct work_struct work;
- struct list_head fbs;
-};
-
-static void drm_mode_rmfb_work_fn(struct work_struct *w)
-{
- struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
-
- while (!list_empty(&arg->fbs)) {
- struct drm_framebuffer *fb =
- list_first_entry(&arg->fbs, typeof(*fb), filp_head);
-
- list_del_init(&fb->filp_head);
- drm_framebuffer_remove(fb);
- }
-}
-
-/**
- * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Remove the FB specified by the user.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_framebuffer *fb = NULL;
- struct drm_framebuffer *fbl = NULL;
- uint32_t *id = data;
- int found = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- fb = drm_framebuffer_lookup(dev, *id);
- if (!fb)
- return -ENOENT;
-
- mutex_lock(&file_priv->fbs_lock);
- list_for_each_entry(fbl, &file_priv->fbs, filp_head)
- if (fb == fbl)
- found = 1;
- if (!found) {
- mutex_unlock(&file_priv->fbs_lock);
- goto fail_unref;
- }
-
- list_del_init(&fb->filp_head);
- mutex_unlock(&file_priv->fbs_lock);
-
- /* drop the reference we picked up in framebuffer lookup */
- drm_framebuffer_unreference(fb);
-
- /*
- * we now own the reference that was stored in the fbs list
- *
- * drm_framebuffer_remove may fail with -EINTR on pending signals,
- * so run this in a separate stack as there's no way to correctly
- * handle this after the fb is already removed from the lookup table.
- */
- if (drm_framebuffer_read_refcount(fb) > 1) {
- struct drm_mode_rmfb_work arg;
-
- INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
- INIT_LIST_HEAD(&arg.fbs);
- list_add_tail(&fb->filp_head, &arg.fbs);
-
- schedule_work(&arg.work);
- flush_work(&arg.work);
- destroy_work_on_stack(&arg.work);
- } else
- drm_framebuffer_unreference(fb);
-
- return 0;
-
-fail_unref:
- drm_framebuffer_unreference(fb);
- return -ENOENT;
-}
-
-/**
- * drm_mode_getfb - get FB info
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB given its ID and return info about it.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_fb_cmd *r = data;
- struct drm_framebuffer *fb;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- fb = drm_framebuffer_lookup(dev, r->fb_id);
- if (!fb)
- return -ENOENT;
-
- r->height = fb->height;
- r->width = fb->width;
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitches[0];
- if (fb->funcs->create_handle) {
- if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
- drm_is_control_client(file_priv)) {
- ret = fb->funcs->create_handle(fb, file_priv,
- &r->handle);
- } else {
- /* GET_FB() is an unprivileged ioctl so we must not
- * return a buffer-handle to non-master processes! For
- * backwards-compatibility reasons, we cannot make
- * GET_FB() privileged, so just return an invalid handle
- * for non-masters. */
- r->handle = 0;
- ret = 0;
- }
- } else {
- ret = -ENODEV;
- }
-
- drm_framebuffer_unreference(fb);
-
- return ret;
-}
-
-/**
- * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Lookup the FB and flush out the damaged area supplied by userspace as a clip
- * rectangle list. Generic userspace which does frontbuffer rendering must call
- * this ioctl to flush out the changes on manual-update display outputs, e.g.
- * usb display-link, mipi manual update panels or edp panel self refresh modes.
- *
- * Modesetting drivers which always update the frontbuffer do not need to
- * implement the corresponding ->dirty framebuffer callback.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_clip_rect __user *clips_ptr;
- struct drm_clip_rect *clips = NULL;
- struct drm_mode_fb_dirty_cmd *r = data;
- struct drm_framebuffer *fb;
- unsigned flags;
- int num_clips;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- fb = drm_framebuffer_lookup(dev, r->fb_id);
- if (!fb)
- return -ENOENT;
-
- num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
-
- if (!num_clips != !clips_ptr) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
-
- /* If userspace annotates copy, clips must come in pairs */
- if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
- ret = -EINVAL;
- goto out_err1;
- }
-
- if (num_clips && clips_ptr) {
- if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
- ret = -EINVAL;
- goto out_err1;
- }
- clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
- ret = copy_from_user(clips, clips_ptr,
- num_clips * sizeof(*clips));
- if (ret) {
- ret = -EFAULT;
- goto out_err2;
- }
- }
-
- if (fb->funcs->dirty) {
- ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
- clips, num_clips);
- } else {
- ret = -ENOSYS;
- }
-
-out_err2:
- kfree(clips);
-out_err1:
- drm_framebuffer_unreference(fb);
-
- return ret;
-}
-
-/**
- * drm_fb_release - remove and free the FBs on this file
- * @priv: drm file for the ioctl
- *
- * Destroy all the FBs associated with @filp.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void drm_fb_release(struct drm_file *priv)
-{
- struct drm_framebuffer *fb, *tfb;
- struct drm_mode_rmfb_work arg;
-
- INIT_LIST_HEAD(&arg.fbs);
-
- /*
- * When the file gets released that means no one else can access the fb
- * list any more, so no need to grab fpriv->fbs_lock. And we need to
- * avoid upsetting lockdep since the universal cursor code adds a
- * framebuffer while holding mutex locks.
- *
- * Note that a real deadlock between fpriv->fbs_lock and the modeset
- * locks is impossible here since no one else but this function can get
- * at it any more.
- */
- list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- if (drm_framebuffer_read_refcount(fb) > 1) {
- list_move_tail(&fb->filp_head, &arg.fbs);
- } else {
- list_del_init(&fb->filp_head);
-
- /* This drops the fpriv->fbs reference. */
- drm_framebuffer_unreference(fb);
- }
- }
-
- if (!list_empty(&arg.fbs)) {
- INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
-
- schedule_work(&arg.work);
- flush_work(&arg.work);
- destroy_work_on_stack(&arg.work);
- }
-}
-
-static bool drm_property_type_valid(struct drm_property *property)
-{
- if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
- return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
- return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
-/**
- * drm_property_create - create a new property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Note that the DRM core keeps a per-device list of properties and that, if
- * drm_mode_config_cleanup() is called, it will destroy all properties created
- * by the driver.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create(struct drm_device *dev, int flags,
- const char *name, int num_values)
-{
- struct drm_property *property = NULL;
- int ret;
-
- property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
- if (!property)
- return NULL;
-
- property->dev = dev;
-
- if (num_values) {
- property->values = kcalloc(num_values, sizeof(uint64_t),
- GFP_KERNEL);
- if (!property->values)
- goto fail;
- }
-
- ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
- if (ret)
- goto fail;
-
- property->flags = flags;
- property->num_values = num_values;
- INIT_LIST_HEAD(&property->enum_list);
-
- if (name) {
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
- property->name[DRM_PROP_NAME_LEN-1] = '\0';
- }
-
- list_add_tail(&property->head, &dev->mode_config.property_list);
-
- WARN_ON(!drm_property_type_valid(property));
-
- return property;
-fail:
- kfree(property->values);
- kfree(property);
- return NULL;
-}
-EXPORT_SYMBOL(drm_property_create);
-
-/**
- * drm_property_create_enum - create a new enumeration property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property values
- * @num_values: number of pre-defined values
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set one of the predefined values for enumeration
- * properties.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
- const struct drm_prop_enum_list *props,
- int num_values)
-{
- struct drm_property *property;
- int i, ret;
-
- flags |= DRM_MODE_PROP_ENUM;
-
- property = drm_property_create(dev, flags, name, num_values);
- if (!property)
- return NULL;
-
- for (i = 0; i < num_values; i++) {
- ret = drm_property_add_enum(property, i,
- props[i].type,
- props[i].name);
- if (ret) {
- drm_property_destroy(dev, property);
- return NULL;
- }
- }
-
- return property;
-}
-EXPORT_SYMBOL(drm_property_create_enum);
-
-/**
- * drm_property_create_bitmask - create a new bitmask property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @props: enumeration lists with property bitflags
- * @num_props: size of the @props array
- * @supported_bits: bitmask of all supported enumeration values
- *
- * This creates a new bitmask drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Compared to plain enumeration properties userspace is allowed to set any
- * or'ed together combination of the predefined property bitflag values
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
- const struct drm_prop_enum_list *props,
- int num_props,
- uint64_t supported_bits)
-{
- struct drm_property *property;
- int i, ret, index = 0;
- int num_values = hweight64(supported_bits);
-
- flags |= DRM_MODE_PROP_BITMASK;
-
- property = drm_property_create(dev, flags, name, num_values);
- if (!property)
- return NULL;
- for (i = 0; i < num_props; i++) {
- if (!(supported_bits & (1ULL << props[i].type)))
- continue;
-
- if (WARN_ON(index >= num_values)) {
- drm_property_destroy(dev, property);
- return NULL;
- }
-
- ret = drm_property_add_enum(property, index++,
- props[i].type,
- props[i].name);
- if (ret) {
- drm_property_destroy(dev, property);
- return NULL;
- }
- }
-
- return property;
-}
-EXPORT_SYMBOL(drm_property_create_bitmask);
-
-static struct drm_property *property_create_range(struct drm_device *dev,
- int flags, const char *name,
- uint64_t min, uint64_t max)
-{
- struct drm_property *property;
-
- property = drm_property_create(dev, flags, name, 2);
- if (!property)
- return NULL;
-
- property->values[0] = min;
- property->values[1] = max;
-
- return property;
-}
-
-/**
- * drm_property_create_range - create a new unsigned ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any unsigned integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
- uint64_t min, uint64_t max)
-{
- return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
- name, min, max);
-}
-EXPORT_SYMBOL(drm_property_create_range);
-
-/**
- * drm_property_create_signed_range - create a new signed ranged property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @min: minimum value of the property
- * @max: maximum value of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is allowed to set any signed integer value in the (min, max)
- * range inclusive.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
- int flags, const char *name,
- int64_t min, int64_t max)
-{
- return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
- name, I642U64(min), I642U64(max));
-}
-EXPORT_SYMBOL(drm_property_create_signed_range);
-
-/**
- * drm_property_create_object - create a new object property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- * @type: object type from DRM_MODE_OBJECT_* defines
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * Userspace is only allowed to set this to any property value of the given
- * @type. Only useful for atomic properties, which is enforced.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_object(struct drm_device *dev,
- int flags, const char *name, uint32_t type)
-{
- struct drm_property *property;
-
- flags |= DRM_MODE_PROP_OBJECT;
-
- if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
- return NULL;
-
- property = drm_property_create(dev, flags, name, 1);
- if (!property)
- return NULL;
-
- property->values[0] = type;
-
- return property;
-}
-EXPORT_SYMBOL(drm_property_create_object);
-
-/**
- * drm_property_create_bool - create a new boolean property type
- * @dev: drm device
- * @flags: flags specifying the property type
- * @name: name of the property
- *
- * This creates a new generic drm property which can then be attached to a drm
- * object with drm_object_attach_property. The returned property object must be
- * freed with drm_property_destroy.
- *
- * This is implemented as a ranged property with only {0, 1} as valid values.
- *
- * Returns:
- * A pointer to the newly created property on success, NULL on failure.
- */
-struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
- const char *name)
-{
- return drm_property_create_range(dev, flags, name, 0, 1);
-}
-EXPORT_SYMBOL(drm_property_create_bool);
-
-/**
- * drm_property_add_enum - add a possible value to an enumeration property
- * @property: enumeration property to change
- * @index: index of the new enumeration
- * @value: value of the new enumeration
- * @name: symbolic name of the new enumeration
- *
- * This functions adds enumerations to a property.
- *
- * It's use is deprecated, drivers should use one of the more specific helpers
- * to directly create the property with all enumerations already attached.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_property_add_enum(struct drm_property *property, int index,
- uint64_t value, const char *name)
-{
- struct drm_property_enum *prop_enum;
-
- if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
- return -EINVAL;
-
- /*
- * Bitmask enum properties have the additional constraint of values
- * from 0 to 63
- */
- if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
- (value > 63))
- return -EINVAL;
-
- if (!list_empty(&property->enum_list)) {
- list_for_each_entry(prop_enum, &property->enum_list, head) {
- if (prop_enum->value == value) {
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- return 0;
- }
- }
- }
-
- prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
- if (!prop_enum)
- return -ENOMEM;
-
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
- prop_enum->value = value;
-
- property->values[index] = value;
- list_add_tail(&prop_enum->head, &property->enum_list);
- return 0;
-}
-EXPORT_SYMBOL(drm_property_add_enum);
-
-/**
- * drm_property_destroy - destroy a drm property
- * @dev: drm device
- * @property: property to destry
- *
- * This function frees a property including any attached resources like
- * enumeration values.
- */
-void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-{
- struct drm_property_enum *prop_enum, *pt;
-
- list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
- list_del(&prop_enum->head);
- kfree(prop_enum);
- }
-
- if (property->num_values)
- kfree(property->values);
- drm_mode_object_unregister(dev, &property->base);
- list_del(&property->head);
- kfree(property);
-}
-EXPORT_SYMBOL(drm_property_destroy);
-
-/**
- * drm_object_attach_property - attach a property to a modeset object
- * @obj: drm modeset object
- * @property: property to attach
- * @init_val: initial value of the property
- *
- * This attaches the given property to the modeset object with the given initial
- * value. Currently this function cannot fail since the properties are stored in
- * a statically sized array.
- */
-void drm_object_attach_property(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t init_val)
-{
- int count = obj->properties->count;
-
- if (count == DRM_OBJECT_MAX_PROPERTY) {
- WARN(1, "Failed to attach object property (type: 0x%x). Please "
- "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
- "you see this message on the same object type.\n",
- obj->type);
- return;
- }
-
- obj->properties->properties[count] = property;
- obj->properties->values[count] = init_val;
- obj->properties->count++;
- if (property->flags & DRM_MODE_PROP_ATOMIC)
- obj->properties->atomic_count++;
-}
-EXPORT_SYMBOL(drm_object_attach_property);
-
-/**
- * drm_object_property_set_value - set the value of a property
- * @obj: drm mode object to set property value for
- * @property: property to set
- * @val: value the property should be set to
- *
- * This functions sets a given property on a given object. This function only
- * changes the software state of the property, it does not call into the
- * driver's ->set_property callback.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_set_value(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t val)
-{
- int i;
-
- for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->properties[i] == property) {
- obj->properties->values[i] = val;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_set_value);
-
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val)
-{
- int i;
-
- /* read-only properties bypass atomic mechanism and still store
- * their value in obj->properties->values[].. mostly to avoid
- * having to deal w/ EDID and similar props in atomic paths:
- */
- if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
- !(property->flags & DRM_MODE_PROP_IMMUTABLE))
- return drm_atomic_get_property(obj, property, val);
-
- for (i = 0; i < obj->properties->count; i++) {
- if (obj->properties->properties[i] == property) {
- *val = obj->properties->values[i];
- return 0;
- }
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(drm_object_property_get_value);
-
-/**
- * drm_mode_getproperty_ioctl - get the property metadata
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the metadata for a given property, like the different
- * possible values for an enum property or the limits for a range property.
- *
- * Blob properties are special
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getproperty_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_get_property *out_resp = data;
- struct drm_property *property;
- int enum_count = 0;
- int value_count = 0;
- int ret = 0, i;
- int copied;
- struct drm_property_enum *prop_enum;
- struct drm_mode_property_enum __user *enum_ptr;
- uint64_t __user *values_ptr;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
- property = drm_property_find(dev, out_resp->prop_id);
- if (!property) {
- ret = -ENOENT;
- goto done;
- }
-
- if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- list_for_each_entry(prop_enum, &property->enum_list, head)
- enum_count++;
- }
-
- value_count = property->num_values;
-
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
- out_resp->flags = property->flags;
-
- if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
- for (i = 0; i < value_count; i++) {
- if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
- out_resp->count_values = value_count;
-
- if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
- drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
- copied = 0;
- enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
- list_for_each_entry(prop_enum, &property->enum_list, head) {
-
- if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
- ret = -EFAULT;
- goto done;
- }
-
- if (copy_to_user(&enum_ptr[copied].name,
- &prop_enum->name, DRM_PROP_NAME_LEN)) {
- ret = -EFAULT;
- goto done;
- }
- copied++;
- }
- }
- out_resp->count_enum_blobs = enum_count;
- }
-
- /*
- * NOTE: The idea seems to have been to use this to read all the blob
- * property values. But nothing ever added them to the corresponding
- * list, userspace always used the special-purpose get_blob ioctl to
- * read the value for a blob property. It also doesn't make a lot of
- * sense to return values here when everything else is just metadata for
- * the property itself.
- */
- if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
- out_resp->count_enum_blobs = 0;
-done:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-static void drm_property_free_blob(struct kref *kref)
-{
- struct drm_property_blob *blob =
- container_of(kref, struct drm_property_blob, base.refcount);
-
- mutex_lock(&blob->dev->mode_config.blob_lock);
- list_del(&blob->head_global);
- mutex_unlock(&blob->dev->mode_config.blob_lock);
-
- drm_mode_object_unregister(blob->dev, &blob->base);
-
- kfree(blob);
-}
-
-/**
- * drm_property_create_blob - Create new blob property
- *
- * Creates a new blob property for a specified DRM device, optionally
- * copying data.
- *
- * @dev: DRM device to create property for
- * @length: Length to allocate for blob data
- * @data: If specified, copies data into blob
- *
- * Returns:
- * New blob property with a single reference on success, or an ERR_PTR
- * value on failure.
- */
-struct drm_property_blob *
-drm_property_create_blob(struct drm_device *dev, size_t length,
- const void *data)
-{
- struct drm_property_blob *blob;
- int ret;
-
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
- return ERR_PTR(-EINVAL);
-
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
- if (!blob)
- return ERR_PTR(-ENOMEM);
-
- /* This must be explicitly initialised, so we can safely call list_del
- * on it in the removal handler, even if it isn't in a file list. */
- INIT_LIST_HEAD(&blob->head_file);
- blob->length = length;
- blob->dev = dev;
-
- if (data)
- memcpy(blob->data, data, length);
-
- ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
- true, drm_property_free_blob);
- if (ret) {
- kfree(blob);
- return ERR_PTR(-EINVAL);
- }
-
- mutex_lock(&dev->mode_config.blob_lock);
- list_add_tail(&blob->head_global,
- &dev->mode_config.property_blob_list);
- mutex_unlock(&dev->mode_config.blob_lock);
-
- return blob;
-}
-EXPORT_SYMBOL(drm_property_create_blob);
-
-/**
- * drm_property_unreference_blob - Unreference a blob property
- *
- * Drop a reference on a blob property. May free the object.
- *
- * @blob: Pointer to blob property
- */
-void drm_property_unreference_blob(struct drm_property_blob *blob)
-{
- if (!blob)
- return;
-
- drm_mode_object_unreference(&blob->base);
-}
-EXPORT_SYMBOL(drm_property_unreference_blob);
-
-/**
- * drm_property_destroy_user_blobs - destroy all blobs created by this client
- * @dev: DRM device
- * @file_priv: destroy all blobs owned by this file handle
- */
-void drm_property_destroy_user_blobs(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_property_blob *blob, *bt;
-
- /*
- * When the file gets released that means no one else can access the
- * blob list any more, so no need to grab dev->blob_lock.
- */
- list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
- list_del_init(&blob->head_file);
- drm_property_unreference_blob(blob);
- }
-}
-
-/**
- * drm_property_reference_blob - Take a reference on an existing property
- *
- * Take a new reference on an existing blob property.
- *
- * @blob: Pointer to blob property
- */
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
-{
- drm_mode_object_reference(&blob->base);
- return blob;
-}
-EXPORT_SYMBOL(drm_property_reference_blob);
-
-/**
- * drm_property_lookup_blob - look up a blob property and take a reference
- * @dev: drm device
- * @id: id of the blob property
- *
- * If successful, this takes an additional reference to the blob property.
- * callers need to make sure to eventually unreference the returned property
- * again, using @drm_property_unreference_blob.
- */
-struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *obj;
- struct drm_property_blob *blob = NULL;
-
- obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
- if (obj)
- blob = obj_to_blob(obj);
- return blob;
-}
-EXPORT_SYMBOL(drm_property_lookup_blob);
-
-/**
- * drm_property_replace_global_blob - atomically replace existing blob property
- * @dev: drm device
- * @replace: location of blob property pointer to be replaced
- * @length: length of data for new blob, or 0 for no data
- * @data: content for new blob, or NULL for no data
- * @obj_holds_id: optional object for property holding blob ID
- * @prop_holds_id: optional property holding blob ID
- * @return 0 on success or error on failure
- *
- * This function will atomically replace a global property in the blob list,
- * optionally updating a property which holds the ID of that property. It is
- * guaranteed to be atomic: no caller will be allowed to see intermediate
- * results, and either the entire operation will succeed and clean up the
- * previous property, or it will fail and the state will be unchanged.
- *
- * If length is 0 or data is NULL, no new blob will be created, and the holding
- * property, if specified, will be set to 0.
- *
- * Access to the replace pointer is assumed to be protected by the caller, e.g.
- * by holding the relevant modesetting object lock for its parent.
- *
- * For example, a drm_connector has a 'PATH' property, which contains the ID
- * of a blob property with the value of the MST path information. Calling this
- * function with replace pointing to the connector's path_blob_ptr, length and
- * data set for the new path information, obj_holds_id set to the connector's
- * base object, and prop_holds_id set to the path property name, will perform
- * a completely atomic update. The access to path_blob_ptr is protected by the
- * caller holding a lock on the connector.
- */
-static int drm_property_replace_global_blob(struct drm_device *dev,
- struct drm_property_blob **replace,
- size_t length,
- const void *data,
- struct drm_mode_object *obj_holds_id,
- struct drm_property *prop_holds_id)
-{
- struct drm_property_blob *new_blob = NULL;
- struct drm_property_blob *old_blob = NULL;
- int ret;
-
- WARN_ON(replace == NULL);
-
- old_blob = *replace;
-
- if (length && data) {
- new_blob = drm_property_create_blob(dev, length, data);
- if (IS_ERR(new_blob))
- return PTR_ERR(new_blob);
- }
-
- /* This does not need to be synchronised with blob_lock, as the
- * get_properties ioctl locks all modesetting objects, and
- * obj_holds_id must be locked before calling here, so we cannot
- * have its value out of sync with the list membership modified
- * below under blob_lock. */
- if (obj_holds_id) {
- ret = drm_object_property_set_value(obj_holds_id,
- prop_holds_id,
- new_blob ?
- new_blob->base.id : 0);
- if (ret != 0)
- goto err_created;
- }
-
- drm_property_unreference_blob(old_blob);
- *replace = new_blob;
-
- return 0;
-
-err_created:
- drm_property_unreference_blob(new_blob);
- return ret;
-}
-
-/**
- * drm_mode_getblob_ioctl - get the contents of a blob property value
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the contents of a blob property. The value stored in
- * an object's blob property is just a normal modeset object id.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_getblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_get_blob *out_resp = data;
- struct drm_property_blob *blob;
- int ret = 0;
- void __user *blob_ptr;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- blob = drm_property_lookup_blob(dev, out_resp->blob_id);
- if (!blob)
- return -ENOENT;
-
- if (out_resp->length == blob->length) {
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_to_user(blob_ptr, blob->data, blob->length)) {
- ret = -EFAULT;
- goto unref;
- }
- }
- out_resp->length = blob->length;
-unref:
- drm_property_unreference_blob(blob);
-
- return ret;
-}
-
-/**
- * drm_mode_createblob_ioctl - create a new blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function creates a new blob property with user-defined values. In order
- * to give us sensible validation and checking when creating, rather than at
- * every potential use, we also require a type to be provided upfront.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_createblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_create_blob *out_resp = data;
- struct drm_property_blob *blob;
- void __user *blob_ptr;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- blob = drm_property_create_blob(dev, out_resp->length, NULL);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
- blob_ptr = (void __user *)(unsigned long)out_resp->data;
- if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
- ret = -EFAULT;
- goto out_blob;
- }
-
- /* Dropping the lock between create_blob and our access here is safe
- * as only the same file_priv can remove the blob; at this point, it is
- * not associated with any file_priv. */
- mutex_lock(&dev->mode_config.blob_lock);
- out_resp->blob_id = blob->base.id;
- list_add_tail(&blob->head_file, &file_priv->blobs);
- mutex_unlock(&dev->mode_config.blob_lock);
-
- return 0;
-
-out_blob:
- drm_property_unreference_blob(blob);
- return ret;
-}
-
-/**
- * drm_mode_destroyblob_ioctl - destroy a user blob property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Destroy an existing user-defined blob property.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_destroyblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_destroy_blob *out_resp = data;
- struct drm_property_blob *blob = NULL, *bt;
- bool found = false;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- blob = drm_property_lookup_blob(dev, out_resp->blob_id);
- if (!blob)
- return -ENOENT;
-
- mutex_lock(&dev->mode_config.blob_lock);
- /* Ensure the property was actually created by this user. */
- list_for_each_entry(bt, &file_priv->blobs, head_file) {
- if (bt == blob) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- ret = -EPERM;
- goto err;
- }
-
- /* We must drop head_file here, because we may not be the last
- * reference on the blob. */
- list_del_init(&blob->head_file);
- mutex_unlock(&dev->mode_config.blob_lock);
-
- /* One reference from lookup, and one from the filp. */
- drm_property_unreference_blob(blob);
- drm_property_unreference_blob(blob);
-
- return 0;
-
-err:
- mutex_unlock(&dev->mode_config.blob_lock);
- drm_property_unreference_blob(blob);
-
- return ret;
-}
-
-/**
- * drm_mode_connector_set_path_property - set tile property on connector
- * @connector: connector to set property on.
- * @path: path to use for property; must not be NULL.
- *
- * This creates a property to expose to userspace to specify a
- * connector path. This is mainly used for DisplayPort MST where
- * connectors have a topology and we want to allow userspace to give
- * them more meaningful names.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path)
-{
- struct drm_device *dev = connector->dev;
- int ret;
-
- ret = drm_property_replace_global_blob(dev,
- &connector->path_blob_ptr,
- strlen(path) + 1,
- path,
- &connector->base,
- dev->mode_config.path_property);
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
-
-/**
- * drm_mode_connector_set_tile_property - set tile property on connector
- * @connector: connector to set property on.
- *
- * This looks up the tile information for a connector, and creates a
- * property for userspace to parse if it exists. The property is of
- * the form of 8 integers using ':' as a separator.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- char tile[256];
- int ret;
-
- if (!connector->has_tile) {
- ret = drm_property_replace_global_blob(dev,
- &connector->tile_blob_ptr,
- 0,
- NULL,
- &connector->base,
- dev->mode_config.tile_property);
- return ret;
- }
-
- snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
- connector->tile_group->id, connector->tile_is_single_monitor,
- connector->num_h_tile, connector->num_v_tile,
- connector->tile_h_loc, connector->tile_v_loc,
- connector->tile_h_size, connector->tile_v_size);
-
- ret = drm_property_replace_global_blob(dev,
- &connector->tile_blob_ptr,
- strlen(tile) + 1,
- tile,
- &connector->base,
- dev->mode_config.tile_property);
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
-
-/**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
- * @connector: drm connector
- * @edid: new value of the edid property
- *
- * This function creates a new blob modeset object and assigns its id to the
- * connector's edid property.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- size_t size = 0;
- int ret;
-
- /* ignore requests to set edid when overridden */
- if (connector->override_edid)
- return 0;
-
- if (edid)
- size = EDID_LENGTH * (1 + edid->extensions);
-
- ret = drm_property_replace_global_blob(dev,
- &connector->edid_blob_ptr,
- size,
- edid,
- &connector->base,
- dev->mode_config.edid_property);
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
-
-/* Some properties could refer to dynamic refcnt'd objects, or things that
- * need special locking to handle lifetime issues (ie. to ensure the prop
- * value doesn't become invalid part way through the property update due to
- * race). The value returned by reference via 'obj' should be passed back
- * to drm_property_change_valid_put() after the property is set (and the
- * object to which the property is attached has a chance to take it's own
- * reference).
- */
-bool drm_property_change_valid_get(struct drm_property *property,
- uint64_t value, struct drm_mode_object **ref)
-{
- int i;
-
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
- return false;
-
- *ref = NULL;
-
- if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
- if (value < property->values[0] || value > property->values[1])
- return false;
- return true;
- } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
- int64_t svalue = U642I64(value);
-
- if (svalue < U642I64(property->values[0]) ||
- svalue > U642I64(property->values[1]))
- return false;
- return true;
- } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
- uint64_t valid_mask = 0;
-
- for (i = 0; i < property->num_values; i++)
- valid_mask |= (1ULL << property->values[i]);
- return !(value & ~valid_mask);
- } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
- struct drm_property_blob *blob;
-
- if (value == 0)
- return true;
-
- blob = drm_property_lookup_blob(property->dev, value);
- if (blob) {
- *ref = &blob->base;
- return true;
- } else {
- return false;
- }
- } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
- /* a zero value for an object property translates to null: */
- if (value == 0)
- return true;
-
- *ref = _object_find(property->dev, value, property->values[0]);
- return *ref != NULL;
- }
-
- for (i = 0; i < property->num_values; i++)
- if (property->values[i] == value)
- return true;
- return false;
-}
-
-void drm_property_change_valid_put(struct drm_property *property,
- struct drm_mode_object *ref)
-{
- if (!ref)
- return;
-
- if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
- drm_mode_object_unreference(ref);
- } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
- drm_property_unreference_blob(obj_to_blob(ref));
-}
-
-/**
- * drm_mode_connector_property_set_ioctl - set the current value of a connector property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for a connectors's property. It also
- * calls into a driver's ->set_property callback to update the hardware state
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_connector_set_property *conn_set_prop = data;
- struct drm_mode_obj_set_property obj_set_prop = {
- .value = conn_set_prop->value,
- .prop_id = conn_set_prop->prop_id,
- .obj_id = conn_set_prop->connector_id,
- .obj_type = DRM_MODE_OBJECT_CONNECTOR
- };
-
- /* It does all the locking and checking we need */
- return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
-}
-
-static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t value)
-{
- int ret = -EINVAL;
- struct drm_connector *connector = obj_to_connector(obj);
-
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- ret = (*connector->funcs->dpms)(connector, (int)value);
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, value);
-
- /* store the property value if successful */
- if (!ret)
- drm_object_property_set_value(&connector->base, property, value);
- return ret;
-}
-
-static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
- struct drm_property *property,
- uint64_t value)
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
{
int ret = -EINVAL;
struct drm_crtc *crtc = obj_to_crtc(obj);
@@ -5004,497 +904,6 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
}
/**
- * drm_mode_plane_set_obj_prop - set the value of a property
- * @plane: drm plane object to set property value for
- * @property: property to set
- * @value: value the property should be set to
- *
- * This functions sets a given property on a given plane object. This function
- * calls the driver's ->set_property callback and changes the software state of
- * the property if the callback succeeds.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t value)
-{
- int ret = -EINVAL;
- struct drm_mode_object *obj = &plane->base;
-
- if (plane->funcs->set_property)
- ret = plane->funcs->set_property(plane, property, value);
- if (!ret)
- drm_object_property_set_value(obj, property, value);
-
- return ret;
-}
-EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
-
-/**
- * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function retrieves the current value for an object's property. Compared
- * to the connector specific ioctl this one is extended to also work on crtc and
- * plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_obj_get_properties *arg = data;
- struct drm_mode_object *obj;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
- if (!obj) {
- ret = -ENOENT;
- goto out;
- }
- if (!obj->properties) {
- ret = -EINVAL;
- goto out_unref;
- }
-
- ret = get_properties(obj, file_priv->atomic,
- (uint32_t __user *)(unsigned long)(arg->props_ptr),
- (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
- &arg->count_props);
-
-out_unref:
- drm_mode_object_unreference(obj);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-/**
- * drm_mode_obj_set_property_ioctl - set the current value of an object's property
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This function sets the current value for an object's property. It also calls
- * into a driver's ->set_property callback to update the hardware state.
- * Compared to the connector specific ioctl this one is extended to also work on
- * crtc and plane objects.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_mode_obj_set_property *arg = data;
- struct drm_mode_object *arg_obj;
- struct drm_mode_object *prop_obj;
- struct drm_property *property;
- int i, ret = -EINVAL;
- struct drm_mode_object *ref;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
- if (!arg_obj) {
- ret = -ENOENT;
- goto out;
- }
- if (!arg_obj->properties)
- goto out_unref;
-
- for (i = 0; i < arg_obj->properties->count; i++)
- if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
- break;
-
- if (i == arg_obj->properties->count)
- goto out_unref;
-
- prop_obj = drm_mode_object_find(dev, arg->prop_id,
- DRM_MODE_OBJECT_PROPERTY);
- if (!prop_obj) {
- ret = -ENOENT;
- goto out_unref;
- }
- property = obj_to_property(prop_obj);
-
- if (!drm_property_change_valid_get(property, arg->value, &ref))
- goto out_unref;
-
- switch (arg_obj->type) {
- case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(arg_obj, property,
- arg->value);
- break;
- case DRM_MODE_OBJECT_CRTC:
- ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
- break;
- case DRM_MODE_OBJECT_PLANE:
- ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
- property, arg->value);
- break;
- }
-
- drm_property_change_valid_put(property, ref);
-
-out_unref:
- drm_mode_object_unreference(arg_obj);
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-/**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
- * @connector: connector to attach
- * @encoder: encoder to attach @connector to
- *
- * This function links up a connector to an encoder. Note that the routing
- * restrictions between encoders and crtcs are exposed to userspace through the
- * possible_clones and possible_crtcs bitmasks.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
- struct drm_encoder *encoder)
-{
- int i;
-
- /*
- * In the past, drivers have attempted to model the static association
- * of connector to encoder in simple connector/encoder devices using a
- * direct assignment of connector->encoder = encoder. This connection
- * is a logical one and the responsibility of the core, so drivers are
- * expected not to mess with this.
- *
- * Note that the error return should've been enough here, but a large
- * majority of drivers ignores the return value, so add in a big WARN
- * to get people's attention.
- */
- if (WARN_ON(connector->encoder))
- return -EINVAL;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
-
-/**
- * drm_mode_crtc_set_gamma_size - set the gamma table size
- * @crtc: CRTC to set the gamma table size for
- * @gamma_size: size of the gamma table
- *
- * Drivers which support gamma tables should set this to the supported gamma
- * table size when initializing the CRTC. Currently the drm core only supports a
- * fixed gamma table size.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
- int gamma_size)
-{
- uint16_t *r_base, *g_base, *b_base;
- int i;
-
- crtc->gamma_size = gamma_size;
-
- crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
- GFP_KERNEL);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return -ENOMEM;
- }
-
- r_base = crtc->gamma_store;
- g_base = r_base + gamma_size;
- b_base = g_base + gamma_size;
- for (i = 0; i < gamma_size; i++) {
- r_base[i] = i << 8;
- g_base[i] = i << 8;
- b_base[i] = i << 8;
- }
-
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
-
-/**
- * drm_mode_gamma_set_ioctl - set the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Set the gamma table of a CRTC to the one passed in by the user. Userspace can
- * inquire the required gamma table size through drm_mode_gamma_get_ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- if (crtc->funcs->gamma_set == NULL) {
- ret = -ENOSYS;
- goto out;
- }
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
-
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-
-}
-
-/**
- * drm_mode_gamma_get_ioctl - get the gamma table
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * Copy the current gamma table into the storage provided. This also provides
- * the gamma table size the driver expects, which can be used to size the
- * allocated storage.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_lut *crtc_lut = data;
- struct drm_crtc *crtc;
- void *r_base, *g_base, *b_base;
- int size;
- int ret = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
- if (!crtc) {
- ret = -ENOENT;
- goto out;
- }
-
- /* memcpy into gamma store */
- if (crtc_lut->gamma_size != crtc->gamma_size) {
- ret = -EINVAL;
- goto out;
- }
-
- size = crtc_lut->gamma_size * (sizeof(uint16_t));
- r_base = crtc->gamma_store;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- g_base = r_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- b_base = g_base + size;
- if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
- ret = -EFAULT;
- goto out;
- }
-out:
- drm_modeset_unlock_all(dev);
- return ret;
-}
-
-/**
- * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
- * @dev: DRM device
- * @data: ioctl data
- * @file_priv: DRM file info
- *
- * This schedules an asynchronous update on a given CRTC, called page flip.
- * Optionally a drm event is generated to signal the completion of the event.
- * Generic drivers cannot assume that a pageflip with changed framebuffer
- * properties (including driver specific metadata like tiling layout) will work,
- * but some drivers support e.g. pixel format changes through the pageflip
- * ioctl.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_crtc_page_flip *page_flip = data;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb = NULL;
- struct drm_pending_vblank_event *e = NULL;
- int ret = -EINVAL;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
- page_flip->reserved != 0)
- return -EINVAL;
-
- if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
- return -EINVAL;
-
- crtc = drm_crtc_find(dev, page_flip->crtc_id);
- if (!crtc)
- return -ENOENT;
-
- drm_modeset_lock_crtc(crtc, crtc->primary);
- if (crtc->primary->fb == NULL) {
- /* The framebuffer is currently unbound, presumably
- * due to a hotplug event, that userspace has not
- * yet discovered.
- */
- ret = -EBUSY;
- goto out;
- }
-
- if (crtc->funcs->page_flip == NULL)
- goto out;
-
- fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
- if (!fb) {
- ret = -ENOENT;
- goto out;
- }
-
- if (crtc->state) {
- const struct drm_plane_state *state = crtc->primary->state;
-
- ret = check_src_coords(state->src_x, state->src_y,
- state->src_w, state->src_h, fb);
- } else {
- ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
- }
- if (ret)
- goto out;
-
- if (crtc->primary->fb->pixel_format != fb->pixel_format) {
- DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (!e) {
- ret = -ENOMEM;
- goto out;
- }
- e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = page_flip->user_data;
- ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- goto out;
- }
- }
-
- crtc->primary->old_fb = crtc->primary->fb;
- ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
- if (ret) {
- if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
- drm_event_cancel_free(dev, &e->base);
- /* Keep the old fb, don't unref it. */
- crtc->primary->old_fb = NULL;
- } else {
- crtc->primary->fb = fb;
- /* Unref only the old framebuffer. */
- fb = NULL;
- }
-
-out:
- if (fb)
- drm_framebuffer_unreference(fb);
- if (crtc->primary->old_fb)
- drm_framebuffer_unreference(crtc->primary->old_fb);
- crtc->primary->old_fb = NULL;
- drm_modeset_unlock_crtc(crtc);
-
- return ret;
-}
-
-/**
* drm_mode_config_reset - call ->reset callbacks
* @dev: drm device
*
@@ -5639,37 +1048,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
}
/**
- * drm_rotation_simplify() - Try to simplify the rotation
- * @rotation: Rotation to be simplified
- * @supported_rotations: Supported rotations
- *
- * Attempt to simplify the rotation to a form that is supported.
- * Eg. if the hardware supports everything except DRM_REFLECT_X
- * one could call this function like this:
- *
- * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
- * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
- * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
- *
- * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
- * transforms the hardware supports, this function may not
- * be able to produce a supported transform, so the caller should
- * check the result afterwards.
- */
-unsigned int drm_rotation_simplify(unsigned int rotation,
- unsigned int supported_rotations)
-{
- if (rotation & ~supported_rotations) {
- rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
- rotation = (rotation & DRM_REFLECT_MASK) |
- BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
- }
-
- return rotation;
-}
-EXPORT_SYMBOL(drm_rotation_simplify);
-
-/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -5785,24 +1163,6 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
-struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
- unsigned int supported_rotations)
-{
- static const struct drm_prop_enum_list props[] = {
- { DRM_ROTATE_0, "rotate-0" },
- { DRM_ROTATE_90, "rotate-90" },
- { DRM_ROTATE_180, "rotate-180" },
- { DRM_ROTATE_270, "rotate-270" },
- { DRM_REFLECT_X, "reflect-x" },
- { DRM_REFLECT_Y, "reflect-y" },
- };
-
- return drm_property_create_bitmask(dev, 0, "rotation",
- props, ARRAY_SIZE(props),
- supported_rotations);
-}
-EXPORT_SYMBOL(drm_mode_create_rotation_property);
-
/**
* DOC: Tile group
*
@@ -5901,48 +1261,3 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
-
-/**
- * drm_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @has_ctm: whether to attach ctm_property for CSC matrix
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction
- * properties on a CRTC. This includes 3 degamma, csc and gamma
- * properties that userspace can set and 2 size properties to inform
- * the userspace of the lut sizes. Each of the properties are
- * optional. The gamma and degamma properties are only attached if
- * their size is not 0 and ctm_property is only attached if has_ctm is
- * true.
- */
-void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
- uint degamma_lut_size,
- bool has_ctm,
- uint gamma_lut_size)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (degamma_lut_size) {
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_property, 0);
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_size_property,
- degamma_lut_size);
- }
-
- if (has_ctm)
- drm_object_attach_property(&crtc->base,
- config->ctm_property, 0);
-
- if (gamma_lut_size) {
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_property, 0);
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_size_property,
- gamma_lut_size);
- }
-}
-EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 604d3ef72ffa..5d2cb138eba6 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -75,35 +75,6 @@
*/
/**
- * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
- * connector list
- * @dev: drm device to operate on
- *
- * Some userspace presumes that the first connected connector is the main
- * display, where it's supposed to display e.g. the login screen. For
- * laptops, this should be the main panel. Use this function to sort all
- * (eDP/LVDS) panels to the front of the connector list, instead of
- * painstakingly trying to initialize them in the right order.
- */
-void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
-{
- struct drm_connector *connector, *tmp;
- struct list_head panel_list;
-
- INIT_LIST_HEAD(&panel_list);
-
- list_for_each_entry_safe(connector, tmp,
- &dev->mode_config.connector_list, head) {
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- list_move_tail(&connector->head, &panel_list);
- }
-
- list_splice(&panel_list, &dev->mode_config.connector_list);
-}
-EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
-
-/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
*
@@ -913,33 +884,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
EXPORT_SYMBOL(drm_helper_connector_dpms);
/**
- * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
- * @fb: drm_framebuffer object to fill out
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * This helper can be used in a drivers fb_create callback to pre-fill the fb's
- * metadata fields.
- */
-void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- int i;
-
- fb->width = mode_cmd->width;
- fb->height = mode_cmd->height;
- for (i = 0; i < 4; i++) {
- fb->pitches[i] = mode_cmd->pitches[i];
- fb->offsets[i] = mode_cmd->offsets[i];
- fb->modifier[i] = mode_cmd->modifier[i];
- }
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
- &fb->bits_per_pixel);
- fb->pixel_format = mode_cmd->pixel_format;
- fb->flags = mode_cmd->flags;
-}
-EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
-
-/**
* drm_helper_resume_force_mode - force-restore mode setting configuration
* @dev: drm_device which should be restored
*
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
new file mode 100644
index 000000000000..28295e5d0d9e
--- /dev/null
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * This header file contains mode setting related functions and definitions
+ * which are only used within the drm kms helper module as internal
+ * implementation details and are not exported to drivers.
+ */
+
+#include <drm/drm_dp_helper.h>
+
+/* drm_fb_helper.c */
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fb_helper_modinit(void);
+#else
+static inline int drm_fb_helper_modinit(void)
+{
+ return 0;
+}
+#endif
+
+/* drm_dp_aux_dev.c */
+#ifdef CONFIG_DRM_DP_AUX_CHARDEV
+int drm_dp_aux_dev_init(void);
+void drm_dp_aux_dev_exit(void);
+int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
+#else
+static inline int drm_dp_aux_dev_init(void)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_dev_exit(void)
+{
+}
+
+static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 0c34e6d906d1..c48ba02c5365 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -33,28 +33,15 @@
/* drm_crtc.c */
-void drm_connector_ida_init(void);
-void drm_connector_ida_destroy(void);
-int drm_mode_object_get(struct drm_device *dev,
- struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_unregister(struct drm_device *dev,
- struct drm_mode_object *object);
-bool drm_property_change_valid_get(struct drm_property *property,
- uint64_t value,
- struct drm_mode_object **ref);
-void drm_property_change_valid_put(struct drm_property *property,
- struct drm_mode_object *ref);
-
-int drm_plane_check_pixel_format(const struct drm_plane *plane,
- u32 format);
+int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value);
int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb);
void drm_fb_release(struct drm_file *file_priv);
-void drm_property_destroy_user_blobs(struct drm_device *dev,
- struct drm_file *file_priv);
/* dumb buffer support IOCTLs */
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
@@ -64,42 +51,32 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-/* framebuffer IOCTLs */
-extern int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
/* IOCTLs */
-int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_getplane_res(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_getconnector(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
int drm_mode_setcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_getplane(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_setplane(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_cursor2_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+
+/* drm_color_mgmt.c */
+
+/* IOCTLs */
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* drm_property.c */
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+ struct drm_file *file_priv);
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value,
+ struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
+
+/* IOCTL */
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getblob_ioctl(struct drm_device *dev,
@@ -108,17 +85,80 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_destroyblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+
+/* drm_mode_object.c */
+int drm_mode_object_get_reg(struct drm_device *dev,
+ struct drm_mode_object *obj,
+ uint32_t obj_type,
+ bool register_obj,
+ void (*obj_free_cb)(struct kref *kref));
+void drm_mode_object_register(struct drm_device *dev,
+ struct drm_mode_object *obj);
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type);
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+void drm_mode_object_unregister(struct drm_device *dev,
+ struct drm_mode_object *object);
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ uint32_t __user *prop_ptr,
+ uint64_t __user *prop_values,
+ uint32_t *arg_count_props);
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+ uint32_t prop_id);
+
+/* IOCTL */
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* drm_encoder.c */
+int drm_encoder_register_all(struct drm_device *dev);
+void drm_encoder_unregister_all(struct drm_device *dev);
+
+/* IOCTL */
int drm_mode_getencoder(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+/* drm_connector.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
+void drm_connector_unregister_all(struct drm_device *dev);
+int drm_connector_register_all(struct drm_device *dev);
+int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value);
+int drm_connector_create_standard_properties(struct drm_device *dev);
+
+/* IOCTL */
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* drm_framebuffer.c */
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv);
+void drm_framebuffer_free(struct kref *kref);
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ const struct drm_framebuffer *fb);
+
+/* IOCTL */
+int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
@@ -129,6 +169,23 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
-/* drm_blend.c */
-int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
- struct drm_atomic_state *state);
+
+/* drm_plane.c */
+int drm_plane_register_all(struct drm_device *dev);
+void drm_plane_unregister_all(struct drm_device *dev);
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
+
+/* IOCTL */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index fa10cef2ba37..1205790ed960 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -104,8 +104,8 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
- root->d_name.name, files[i].name);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
+ root, files[i].name);
kfree(tmp);
ret = -1;
goto fail;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index ea481800ef56..3f83e2ca80ad 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -50,9 +50,8 @@ int drm_legacy_dma_setup(struct drm_device *dev)
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- drm_core_check_feature(dev, DRIVER_MODESET)) {
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
- }
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
@@ -81,9 +80,8 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
int i, j;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
- drm_core_check_feature(dev, DRIVER_MODESET)) {
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
return;
- }
if (!dma)
return;
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 734f86a345f6..ec1ed94b2390 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -36,6 +36,8 @@
#include <drm/drm_crtc.h>
#include <drm/drmP.h>
+#include "drm_crtc_helper_internal.h"
+
struct drm_dp_aux_dev {
unsigned index;
struct drm_dp_aux *aux;
@@ -283,12 +285,7 @@ static int auxdev_wait_atomic_t(atomic_t *p)
schedule();
return 0;
}
-/**
- * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
+
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
@@ -314,14 +311,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name);
kref_put(&aux_dev->refcount, release_drm_dp_aux_dev);
}
-EXPORT_SYMBOL(drm_dp_aux_unregister_devnode);
-/**
- * drm_dp_aux_register_devnode() - register a devnode for this aux channel
- * @aux: DisplayPort AUX channel
- *
- * Returns 0 on success or a negative error code on failure.
- */
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
@@ -347,7 +337,6 @@ error:
drm_dp_aux_unregister_devnode(aux);
return res;
}
-EXPORT_SYMBOL(drm_dp_aux_register_devnode);
int drm_dp_aux_dev_init(void)
{
@@ -369,11 +358,9 @@ out:
class_destroy(drm_dp_aux_dev_class);
return res;
}
-EXPORT_SYMBOL(drm_dp_aux_dev_init);
void drm_dp_aux_dev_exit(void)
{
unregister_chrdev(drm_dev_major, "aux");
class_destroy(drm_dp_aux_dev_class);
}
-EXPORT_SYMBOL(drm_dp_aux_dev_exit);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index eae5ef963cb7..3e6fe82c6d64 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -27,10 +27,12 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
+#include <linux/seq_file.h>
#include <drm/drm_dp_helper.h>
-#include <drm/drm_dp_aux_dev.h>
#include <drm/drmP.h>
+#include "drm_crtc_helper_internal.h"
+
/**
* DOC: dp helpers
*
@@ -223,7 +225,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
err = ret;
}
- DRM_DEBUG_KMS("too many retries, giving up\n");
+ DRM_DEBUG_KMS("Too many retries, giving up. First error: %d\n", err);
ret = err;
unlock:
@@ -438,6 +440,179 @@ int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
EXPORT_SYMBOL(drm_dp_link_configure);
+/**
+ * drm_dp_downstream_max_clock() - extract branch device max
+ * pixel rate for legacy VGA
+ * converter or max TMDS clock
+ * rate for others
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max clock in kHz on success or 0 if max clock not defined
+ */
+int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+ bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DETAILED_CAP_INFO_AVAILABLE;
+
+ if (!detailed_cap_info)
+ return 0;
+
+ switch (type) {
+ case DP_DS_PORT_TYPE_VGA:
+ return port_cap[1] * 8 * 1000;
+ case DP_DS_PORT_TYPE_DVI:
+ case DP_DS_PORT_TYPE_HDMI:
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ return port_cap[1] * 2500;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract branch device max
+ * bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Returns max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+ bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DETAILED_CAP_INFO_AVAILABLE;
+ int bpc;
+
+ if (!detailed_cap_info)
+ return 0;
+
+ switch (type) {
+ case DP_DS_PORT_TYPE_VGA:
+ case DP_DS_PORT_TYPE_DVI:
+ case DP_DS_PORT_TYPE_HDMI:
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+
+ switch (bpc) {
+ case DP_DS_8BPC:
+ return 8;
+ case DP_DS_10BPC:
+ return 10;
+ case DP_DS_12BPC:
+ return 12;
+ case DP_DS_16BPC:
+ return 16;
+ }
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
+
+/**
+ * drm_dp_downstream_id() - identify branch device
+ * @aux: DisplayPort AUX channel
+ * @id: DisplayPort branch device id
+ *
+ * Returns branch device id on success or NULL on failure
+ */
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6])
+{
+ return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6);
+}
+EXPORT_SYMBOL(drm_dp_downstream_id);
+
+/**
+ * drm_dp_downstream_debug() - debug DP branch devices
+ * @m: pointer for debugfs file
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @aux: DisplayPort AUX channel
+ *
+ */
+void drm_dp_downstream_debug(struct seq_file *m,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], struct drm_dp_aux *aux)
+{
+ bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DETAILED_CAP_INFO_AVAILABLE;
+ int clk;
+ int bpc;
+ char id[6];
+ int len;
+ uint8_t rev[2];
+ int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+ bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT;
+
+ seq_printf(m, "\tDP branch device present: %s\n",
+ branch_device ? "yes" : "no");
+
+ if (!branch_device)
+ return;
+
+ switch (type) {
+ case DP_DS_PORT_TYPE_DP:
+ seq_puts(m, "\t\tType: DisplayPort\n");
+ break;
+ case DP_DS_PORT_TYPE_VGA:
+ seq_puts(m, "\t\tType: VGA\n");
+ break;
+ case DP_DS_PORT_TYPE_DVI:
+ seq_puts(m, "\t\tType: DVI\n");
+ break;
+ case DP_DS_PORT_TYPE_HDMI:
+ seq_puts(m, "\t\tType: HDMI\n");
+ break;
+ case DP_DS_PORT_TYPE_NON_EDID:
+ seq_puts(m, "\t\tType: others without EDID support\n");
+ break;
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ seq_puts(m, "\t\tType: DP++\n");
+ break;
+ case DP_DS_PORT_TYPE_WIRELESS:
+ seq_puts(m, "\t\tType: Wireless\n");
+ break;
+ default:
+ seq_puts(m, "\t\tType: N/A\n");
+ }
+
+ drm_dp_downstream_id(aux, id);
+ seq_printf(m, "\t\tID: %s\n", id);
+
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1);
+ if (len > 0)
+ seq_printf(m, "\t\tHW: %d.%d\n",
+ (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
+
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ if (len > 0)
+ seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
+
+ if (detailed_cap_info) {
+ clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+
+ if (clk > 0) {
+ if (type == DP_DS_PORT_TYPE_VGA)
+ seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
+ else
+ seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+ }
+
+ bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+
+ if (bpc > 0)
+ seq_printf(m, "\t\tMax bpc: %d\n", bpc);
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_debug);
+
/*
* I2C-over-AUX implementation
*/
@@ -574,7 +749,17 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (ret == -EBUSY)
continue;
- DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+ /*
+ * While timeouts can be errors, they're usually normal
+ * behavior (for instance, when a driver tries to
+ * communicate with a non-existant DisplayPort device).
+ * Avoid spamming the kernel log with timeout errors.
+ */
+ if (ret == -ETIMEDOUT)
+ DRM_DEBUG_KMS_RATELIMITED("transaction timed out\n");
+ else
+ DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+
return ret;
}
@@ -790,6 +975,12 @@ static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
}
+static const struct i2c_lock_operations drm_dp_i2c_lock_ops = {
+ .lock_bus = lock_bus,
+ .trylock_bus = trylock_bus,
+ .unlock_bus = unlock_bus,
+};
+
/**
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
@@ -807,9 +998,7 @@ void drm_dp_aux_init(struct drm_dp_aux *aux)
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
- aux->ddc.lock_bus = lock_bus;
- aux->ddc.trylock_bus = trylock_bus;
- aux->ddc.unlock_bus = unlock_bus;
+ aux->ddc.lock_ops = &drm_dp_i2c_lock_ops;
}
EXPORT_SYMBOL(drm_dp_aux_init);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117980..aa644487749c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be27ed36f56e..6efdba4993fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -33,7 +33,6 @@
#include <linux/mount.h>
#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/drm_core.h>
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -46,8 +45,8 @@
unsigned int drm_debug = 0;
EXPORT_SYMBOL(drm_debug);
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
+MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
+MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
@@ -63,37 +62,52 @@ static struct idr drm_minors_idr;
static struct dentry *drm_debugfs_root;
-void drm_err(const char *format, ...)
+#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
+
+void drm_dev_printk(const struct device *dev, const char *level,
+ unsigned int category, const char *function_name,
+ const char *prefix, const char *format, ...)
{
struct va_format vaf;
va_list args;
- va_start(args, format);
+ if (category != DRM_UT_NONE && !(drm_debug & category))
+ return;
+ va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
- __builtin_return_address(0), &vaf);
+ if (dev)
+ dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
+ &vaf);
+ else
+ printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
va_end(args);
}
-EXPORT_SYMBOL(drm_err);
+EXPORT_SYMBOL(drm_dev_printk);
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
+void drm_printk(const char *level, unsigned int category,
+ const char *format, ...)
{
struct va_format vaf;
va_list args;
+ if (category != DRM_UT_NONE && !(drm_debug & category))
+ return;
+
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+ printk("%s" "[" DRM_NAME ":%ps]%s %pV",
+ level, __builtin_return_address(0),
+ strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
va_end(args);
}
-EXPORT_SYMBOL(drm_ut_debug_printk);
+EXPORT_SYMBOL(drm_printk);
/*
* DRM Minors
@@ -112,7 +126,7 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
unsigned int type)
{
switch (type) {
- case DRM_MINOR_LEGACY:
+ case DRM_MINOR_PRIMARY:
return &dev->primary;
case DRM_MINOR_RENDER:
return &dev->render;
@@ -325,6 +339,9 @@ void drm_minor_release(struct drm_minor *minor)
static int drm_dev_set_unique(struct drm_device *dev, const char *name)
{
+ if (!name)
+ return -EINVAL;
+
kfree(dev->unique);
dev->unique = kstrdup(name, GFP_KERNEL);
@@ -512,7 +529,7 @@ int drm_dev_init(struct drm_device *dev,
goto err_minors;
}
- ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
if (ret)
goto err_minors;
@@ -545,7 +562,7 @@ err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
err_minors:
- drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_minor_free(dev, DRM_MINOR_CONTROL);
drm_fs_inode_free(dev->anon_inode);
@@ -575,7 +592,7 @@ EXPORT_SYMBOL(drm_dev_init);
* own struct should look at using drm_dev_init() instead.
*
* RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * Pointer to new DRM device, or ERR_PTR on failure.
*/
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent)
@@ -585,12 +602,12 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = drm_dev_init(dev, driver, parent);
if (ret) {
kfree(dev);
- return NULL;
+ return ERR_PTR(ret);
}
return dev;
@@ -608,7 +625,7 @@ static void drm_dev_release(struct kref *ref)
drm_ht_remove(&dev->map_hash);
drm_fs_inode_free(dev->anon_inode);
- drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_minor_free(dev, DRM_MINOR_CONTROL);
@@ -684,7 +701,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
- ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+ ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
if (ret)
goto err_minors;
@@ -701,7 +718,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto out_unlock;
err_minors:
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
out_unlock:
@@ -741,7 +758,7 @@ void drm_dev_unregister(struct drm_device *dev)
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_legacy_rmmap(dev, r_list->map);
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
}
@@ -807,53 +824,48 @@ static const struct file_operations drm_stub_fops = {
.llseek = noop_llseek,
};
+static void drm_core_exit(void)
+{
+ unregister_chrdev(DRM_MAJOR, "drm");
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+ idr_destroy(&drm_minors_idr);
+ drm_connector_ida_destroy();
+ drm_global_release();
+}
+
static int __init drm_core_init(void)
{
- int ret = -ENOMEM;
+ int ret;
drm_global_init();
drm_connector_ida_init();
idr_init(&drm_minors_idr);
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
-
ret = drm_sysfs_init();
if (ret < 0) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- goto err_p2;
+ DRM_ERROR("Cannot create DRM class: %d\n", ret);
+ goto error;
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
+ ret = -ENOMEM;
+ DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
+ goto error;
}
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
+ if (ret < 0)
+ goto error;
+
+ DRM_INFO("Initialized\n");
return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
- idr_destroy(&drm_minors_idr);
-err_p1:
+error:
+ drm_core_exit();
return ret;
}
-static void __exit drm_core_exit(void)
-{
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
-
- unregister_chrdev(DRM_MAJOR, "drm");
-
- drm_connector_ida_destroy();
- idr_destroy(&drm_minors_idr);
-}
-
module_init(drm_core_init);
module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 637a0aa4d3a0..ec77bd3e1f08 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -991,7 +991,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
- 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
};
@@ -3253,16 +3253,12 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
}
static void
-parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
u8 len = cea_db_payload_len(db);
- if (len >= 6) {
+ if (len >= 6)
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
- connector->dvi_dual = db[6] & 1;
- }
- if (len >= 7)
- connector->max_tmds_clock = db[7] * 5;
if (len >= 8) {
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
@@ -3276,19 +3272,15 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
if (len >= 12)
connector->audio_latency[1] = db[12];
- DRM_DEBUG_KMS("HDMI: DVI dual %d, "
- "max TMDS clock %d, "
- "latency present %d %d, "
- "video latency %d %d, "
- "audio latency %d %d\n",
- connector->dvi_dual,
- connector->max_tmds_clock,
- (int) connector->latency_present[0],
- (int) connector->latency_present[1],
- connector->video_latency[0],
- connector->video_latency[1],
- connector->audio_latency[0],
- connector->audio_latency[1]);
+ DRM_DEBUG_KMS("HDMI: latency present %d %d, "
+ "video latency %d %d, "
+ "audio latency %d %d\n",
+ connector->latency_present[0],
+ connector->latency_present[1],
+ connector->video_latency[0],
+ connector->video_latency[1],
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
}
static void
@@ -3358,6 +3350,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
memset(eld, 0, sizeof(connector->eld));
+ connector->latency_present[0] = false;
+ connector->latency_present[1] = false;
+ connector->video_latency[0] = 0;
+ connector->audio_latency[0] = 0;
+ connector->video_latency[1] = 0;
+ connector->audio_latency[1] = 0;
+
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
@@ -3407,7 +3406,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
- parse_hdmi_vsdb(connector, db);
+ drm_parse_hdmi_vsdb_audio(connector, db);
break;
default:
break;
@@ -3721,122 +3720,127 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
-/**
- * drm_assign_hdmi_deep_color_info - detect whether monitor supports
- * hdmi deep color modes and update drm_display_info if so.
- * @edid: monitor EDID information
- * @info: Updated with maximum supported deep color bpc and color format
- * if deep color supported.
- * @connector: DRM connector, used only for debug output
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI deep color supported, false if not or unknown.
- */
-static bool drm_assign_hdmi_deep_color_info(struct edid *edid,
- struct drm_display_info *info,
- struct drm_connector *connector)
+static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
+ const u8 *hdmi)
{
- u8 *edid_ext, *hdmi;
- int i;
- int start_offset, end_offset;
+ struct drm_display_info *info = &connector->display_info;
unsigned int dc_bpc = 0;
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return false;
+ /* HDMI supports at least 8 bpc */
+ info->bpc = 8;
- if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
- return false;
+ if (cea_db_payload_len(hdmi) < 6)
+ return;
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
+ dc_bpc = 10;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
+ DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
+ dc_bpc = 12;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
+ DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
+ connector->name);
+ }
+
+ if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
+ dc_bpc = 16;
+ info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
+ DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
+ connector->name);
+ }
+
+ if (dc_bpc == 0) {
+ DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
+ connector->name);
+ return;
+ }
+
+ DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
+ connector->name, dc_bpc);
+ info->bpc = dc_bpc;
/*
- * Because HDMI identifier is in Vendor Specific Block,
- * search it from all data blocks of CEA extension.
+ * Deep color support mandates RGB444 support for all video
+ * modes and forbids YCRCB422 support for all video modes per
+ * HDMI 1.3 spec.
*/
- for_each_cea_db(edid_ext, i, start_offset, end_offset) {
- if (cea_db_is_hdmi_vsdb(&edid_ext[i])) {
- /* HDMI supports at least 8 bpc */
- info->bpc = 8;
-
- hdmi = &edid_ext[i];
- if (cea_db_payload_len(hdmi) < 6)
- return false;
-
- if (hdmi[6] & DRM_EDID_HDMI_DC_30) {
- dc_bpc = 10;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_30;
- DRM_DEBUG("%s: HDMI sink does deep color 30.\n",
- connector->name);
- }
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
- if (hdmi[6] & DRM_EDID_HDMI_DC_36) {
- dc_bpc = 12;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_36;
- DRM_DEBUG("%s: HDMI sink does deep color 36.\n",
- connector->name);
- }
+ /* YCRCB444 is optional according to spec. */
+ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
+ connector->name);
+ }
- if (hdmi[6] & DRM_EDID_HDMI_DC_48) {
- dc_bpc = 16;
- info->edid_hdmi_dc_modes |= DRM_EDID_HDMI_DC_48;
- DRM_DEBUG("%s: HDMI sink does deep color 48.\n",
- connector->name);
- }
+ /*
+ * Spec says that if any deep color mode is supported at all,
+ * then deep color 36 bit must be supported.
+ */
+ if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
+ DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
+ connector->name);
+ }
+}
- if (dc_bpc > 0) {
- DRM_DEBUG("%s: Assigning HDMI sink color depth as %d bpc.\n",
- connector->name, dc_bpc);
- info->bpc = dc_bpc;
-
- /*
- * Deep color support mandates RGB444 support for all video
- * modes and forbids YCRCB422 support for all video modes per
- * HDMI 1.3 spec.
- */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
-
- /* YCRCB444 is optional according to spec. */
- if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) {
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
- DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n",
- connector->name);
- }
+static void
+drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ u8 len = cea_db_payload_len(db);
- /*
- * Spec says that if any deep color mode is supported at all,
- * then deep color 36 bit must be supported.
- */
- if (!(hdmi[6] & DRM_EDID_HDMI_DC_36)) {
- DRM_DEBUG("%s: HDMI sink should do DC_36, but does not!\n",
- connector->name);
- }
+ if (len >= 6)
+ info->dvi_dual = db[6] & 1;
+ if (len >= 7)
+ info->max_tmds_clock = db[7] * 5000;
- return true;
- }
- else {
- DRM_DEBUG("%s: No deep color support on this HDMI sink.\n",
- connector->name);
- }
- }
- }
+ DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+ "max TMDS clock %d kHz\n",
+ info->dvi_dual,
+ info->max_tmds_clock);
- return false;
+ drm_parse_hdmi_deep_color_info(connector, db);
}
-/**
- * drm_add_display_info - pull display info out if present
- * @edid: EDID data
- * @info: display info (attached to connector)
- * @connector: connector whose edid is used to build display info
- *
- * Grab any available display info and stuff it into the drm_display_info
- * structure that's part of the connector. Useful for tracking bpp and
- * color spaces.
- */
-static void drm_add_display_info(struct edid *edid,
- struct drm_display_info *info,
- struct drm_connector *connector)
+static void drm_parse_cea_ext(struct drm_connector *connector,
+ struct edid *edid)
{
- u8 *edid_ext;
+ struct drm_display_info *info = &connector->display_info;
+ const u8 *edid_ext;
+ int i, start, end;
+
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return;
+
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+
+ if (cea_db_offsets(edid_ext, &start, &end))
+ return;
+
+ for_each_cea_db(edid_ext, i, start, end) {
+ const u8 *db = &edid_ext[i];
+
+ if (cea_db_is_hdmi_vsdb(db))
+ drm_parse_hdmi_vsdb_video(connector, db);
+ }
+}
+
+static void drm_add_display_info(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -3844,6 +3848,9 @@ static void drm_add_display_info(struct edid *edid,
/* driver figures it out in this case */
info->bpc = 0;
info->color_formats = 0;
+ info->cea_rev = 0;
+ info->max_tmds_clock = 0;
+ info->dvi_dual = false;
if (edid->revision < 3)
return;
@@ -3851,21 +3858,7 @@ static void drm_add_display_info(struct edid *edid,
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
- /* Get data from CEA blocks if present */
- edid_ext = drm_find_cea_extension(edid);
- if (edid_ext) {
- info->cea_rev = edid_ext[1];
-
- /* The existence of a CEA block should imply RGB support */
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
- if (edid_ext[3] & EDID_CEA_YCRCB444)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
- if (edid_ext[3] & EDID_CEA_YCRCB422)
- info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
- }
-
- /* HDMI deep color modes supported? Assign to info, if so */
- drm_assign_hdmi_deep_color_info(edid, info, connector);
+ drm_parse_cea_ext(connector, edid);
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
@@ -4052,7 +4045,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
* @connector: connector we're probing
* @edid: EDID data
*
- * Add the specified modes to the connector's mode list.
+ * Add the specified modes to the connector's mode list. Also fills out the
+ * &drm_display_info structure in @connector with any information which can be
+ * derived from the edid.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
@@ -4099,7 +4094,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- drm_add_display_info(edid, &connector->display_info, connector);
+ drm_add_display_info(connector, edid);
if (quirks & EDID_QUIRK_FORCE_6BPC)
connector->display_info.bpc = 6;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
new file mode 100644
index 000000000000..5c067719164d
--- /dev/null
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Encoders represent the connecting element between the CRTC (as the overall
+ * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the
+ * generic sink entity, represented by struct &drm_connector). An encoder takes
+ * pixel data from a CRTC and converts it to a format suitable for any attached
+ * connector. Encoders are objects exposed to userspace, originally to allow
+ * userspace to infer cloning and connector/CRTC restrictions. Unfortunately
+ * almost all drivers get this wrong, making the uabi pretty much useless. On
+ * top of that the exposed restrictions are too simple for today's hardware, and
+ * the recommended way to infer restrictions is by using the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL.
+ *
+ * Otherwise encoders aren't used in the uapi at all (any modeset request from
+ * userspace directly connects a connector with a CRTC), drivers are therefore
+ * free to use them however they wish. Modeset helper libraries make strong use
+ * of encoders to facilitate code sharing. But for more complex settings it is
+ * usually better to move shared code into a separate &drm_bridge. Compared to
+ * encoders, bridges also have the benefit of being purely an internal
+ * abstraction since they are not exposed to userspace at all.
+ *
+ * Encoders are initialized with drm_encoder_init() and cleaned up using
+ * drm_encoder_cleanup().
+ */
+static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
+ { DRM_MODE_ENCODER_NONE, "None" },
+ { DRM_MODE_ENCODER_DAC, "DAC" },
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
+ { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+ { DRM_MODE_ENCODER_DSI, "DSI" },
+ { DRM_MODE_ENCODER_DPMST, "DP MST" },
+ { DRM_MODE_ENCODER_DPI, "DPI" },
+};
+
+int drm_encoder_register_all(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->late_register)
+ ret = encoder->funcs->late_register(encoder);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void drm_encoder_unregister_all(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->early_unregister)
+ encoder->funcs->early_unregister(encoder);
+ }
+}
+
+/**
+ * drm_encoder_init - Init a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initialises a preallocated encoder. Encoder should be subclassed as part of
+ * driver encoder objects. At driver unload time drm_encoder_cleanup() should be
+ * called from the driver's destroy hook in &drm_encoder_funcs.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+ if (ret)
+ goto out_unlock;
+
+ encoder->dev = dev;
+ encoder->encoder_type = encoder_type;
+ encoder->funcs = funcs;
+ if (name) {
+ va_list ap;
+
+ va_start(ap, name);
+ encoder->name = kvasprintf(GFP_KERNEL, name, ap);
+ va_end(ap);
+ } else {
+ encoder->name = kasprintf(GFP_KERNEL, "%s-%d",
+ drm_encoder_enum_list[encoder_type].name,
+ encoder->base.id);
+ }
+ if (!encoder->name) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ encoder->index = dev->mode_config.num_encoder++;
+
+out_put:
+ if (ret)
+ drm_mode_object_unregister(dev, &encoder->base);
+
+out_unlock:
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+/**
+ * drm_encoder_cleanup - cleans up an initialised encoder
+ * @encoder: encoder to cleanup
+ *
+ * Cleans up the encoder but doesn't free the object.
+ */
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+
+ /* Note that the encoder_list is considered to be static; should we
+ * remove the drm_encoder at runtime we would have to decrement all
+ * the indices on the drm_encoder after us in the encoder_list.
+ */
+
+ drm_modeset_lock_all(dev);
+ drm_mode_object_unregister(dev, &encoder->base);
+ kfree(encoder->name);
+ list_del(&encoder->head);
+ dev->mode_config.num_encoder--;
+ drm_modeset_unlock_all(dev);
+
+ memset(encoder, 0, sizeof(*encoder));
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_device *dev = encoder->dev;
+ bool uses_atomic = false;
+
+ /* For atomic drivers only state objects are synchronously updated and
+ * protected by modeset locks, so check those first. */
+ drm_for_each_connector(connector, dev) {
+ if (!connector->state)
+ continue;
+
+ uses_atomic = true;
+
+ if (connector->state->best_encoder != encoder)
+ continue;
+
+ return connector->state->crtc;
+ }
+
+ /* Don't return stale data (e.g. pending async disable). */
+ if (uses_atomic)
+ return NULL;
+
+ return encoder->crtc;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_encoder *enc_resp = data;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ if (!encoder)
+ return -ENOENT;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ crtc = drm_encoder_get_crtc(encoder);
+ if (crtc)
+ enc_resp->crtc_id = crtc->base.id;
+ else
+ enc_resp->crtc_id = 0;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ enc_resp->encoder_type = encoder->encoder_type;
+ enc_resp->encoder_id = encoder->base.id;
+ enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_clones = encoder->possible_clones;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a06f9120b5a..6c75e62c0b22 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -29,10 +29,10 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -41,6 +41,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include "drm_crtc_helper_internal.h"
+
static bool drm_fbdev_emulation = true;
module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
MODULE_PARM_DESC(fbdev_emulation,
@@ -129,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
+ struct drm_fb_helper_connector *fb_helper_connector =
+ fb_helper->connector_info[i];
+
+ drm_connector_unreference(fb_helper_connector->connector);
+
+ kfree(fb_helper_connector);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
@@ -335,7 +342,7 @@ retry:
goto fail;
}
- plane_state->rotation = BIT(DRM_ROTATE_0);
+ plane_state->rotation = DRM_ROTATE_0;
plane->old_fb = plane->fb;
plane_mask |= 1 << drm_plane_index(plane);
@@ -395,7 +402,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (dev->mode_config.rotation_property) {
drm_mode_plane_set_obj_prop(plane,
dev->mode_config.rotation_property,
- BIT(DRM_ROTATE_0));
+ DRM_ROTATE_0);
}
}
@@ -601,6 +608,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
+static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
+ struct drm_mode_set *modeset)
+{
+ int i;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_unreference(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+
+ drm_mode_destroy(helper->dev, modeset->mode);
+ modeset->mode = NULL;
+
+ /* FIXME should hold a ref? */
+ modeset->fb = NULL;
+}
+
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
@@ -610,14 +635,26 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->connector_info[i]);
}
kfree(helper->connector_info);
+
for (i = 0; i < helper->crtc_count; i++) {
- kfree(helper->crtc_info[i].mode_set.connectors);
- if (helper->crtc_info[i].mode_set.mode)
- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
+
+ drm_fb_helper_modeset_release(helper, modeset);
+ kfree(modeset->connectors);
}
kfree(helper->crtc_info);
}
+static void drm_fb_helper_resume_worker(struct work_struct *work)
+{
+ struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+ resume_work);
+
+ console_lock();
+ fb_set_suspend(helper->fbdev, 0);
+ console_unlock();
+}
+
static void drm_fb_helper_dirty_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -632,7 +669,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ /* call dirty callback only when it has been really touched */
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
/**
@@ -649,6 +688,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->dirty_lock);
+ INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
helper->funcs = funcs;
@@ -1024,17 +1064,65 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
/**
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend
* @fb_helper: driver-allocated fbdev helper
- * @state: desired state, zero to resume, non-zero to suspend
+ * @suspend: whether to suspend or resume
*
- * A wrapper around fb_set_suspend implemented by fbdev core
+ * A wrapper around fb_set_suspend implemented by fbdev core.
+ * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take
+ * the lock yourself
*/
-void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend)
{
if (fb_helper && fb_helper->fbdev)
- fb_set_suspend(fb_helper->fbdev, state);
+ fb_set_suspend(fb_helper->fbdev, suspend);
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+/**
+ * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also
+ * takes the console lock
+ * @fb_helper: driver-allocated fbdev helper
+ * @suspend: whether to suspend or resume
+ *
+ * A wrapper around fb_set_suspend() that takes the console lock. If the lock
+ * isn't available on resume, a worker is tasked with waiting for the lock
+ * to become available. The console lock can be pretty contented on resume
+ * due to all the printk activity.
+ *
+ * This function can be called multiple times with the same state since
+ * &fb_info->state is checked to see if fbdev is running or not before locking.
+ *
+ * Use drm_fb_helper_set_suspend() if you need to take the lock yourself.
+ */
+void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
+ bool suspend)
+{
+ if (!fb_helper || !fb_helper->fbdev)
+ return;
+
+ /* make sure there's no pending/ongoing resume */
+ flush_work(&fb_helper->resume_work);
+
+ if (suspend) {
+ if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING)
+ return;
+
+ console_lock();
+
+ } else {
+ if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING)
+ return;
+
+ if (!console_trylock()) {
+ schedule_work(&fb_helper->resume_work);
+ return;
+ }
+ }
+
+ fb_set_suspend(fb_helper->fbdev, suspend);
+ console_unlock();
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
+
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@@ -2027,7 +2115,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
- struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i;
@@ -2075,45 +2162,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- modeset->fb = NULL;
- }
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ drm_fb_helper_modeset_release(fb_helper,
+ &fb_helper->crtc_info[i].mode_set);
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- modeset = &fb_crtc->mode_set;
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ drm_connector_reference(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
-
- /* Clear out any old modes if there are no more connected outputs. */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- if (modeset->num_connectors == 0) {
- BUG_ON(modeset->fb);
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = NULL;
- }
- }
out:
kfree(crtcs);
kfree(modes);
@@ -2194,7 +2271,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* @fb_helper: the drm_fb_helper
*
* Scan the connectors attached to the fb_helper and try to put together a
- * setup after *notification of a change in output configuration.
+ * setup after notification of a change in output configuration.
*
* Called at runtime, takes the mode config locks to be able to check/change the
* modeset configuration. Must be run from process context (which usually means
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 323c238fcac7..e84faecf5225 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -92,7 +92,7 @@ static int drm_setup(struct drm_device * dev)
int ret;
if (dev->driver->firstopen &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_core_check_feature(dev, DRIVER_LEGACY)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
@@ -199,7 +199,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
filp->private_data = priv;
priv->filp = filp;
- priv->uid = current_euid();
priv->pid = get_pid(task_pid(current));
priv->minor = minor;
@@ -346,7 +345,7 @@ void drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
}
@@ -389,7 +388,7 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->kdev->devt),
dev->open_count);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_lock_release(dev, filp);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 0645c85d5f95..29c56b4331e0 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -36,19 +36,60 @@ static char printable_char(int c)
}
/**
+ * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description
+ * @bpp: bits per pixels
+ * @depth: bit depth per pixel
+ *
+ * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
+ * Useful in fbdev emulation code, since that deals in those values.
+ */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_C8;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
* drm_get_format_name - return a string for drm fourcc format
* @format: format to compute name of
*
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
+ * Note that the buffer returned by this function is owned by the caller
+ * and will need to be freed using kfree().
*/
-const char *drm_get_format_name(uint32_t format)
+char *drm_get_format_name(uint32_t format)
{
- static char buf[32];
+ char *buf = kmalloc(32, GFP_KERNEL);
- snprintf(buf, sizeof(buf),
+ snprintf(buf, 32,
"%c%c%c%c %s-endian (0x%08x)",
printable_char(format & 0xff),
printable_char((format >> 8) & 0xff),
@@ -73,6 +114,8 @@ EXPORT_SYMBOL(drm_get_format_name);
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
+ char *format_name;
+
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
@@ -127,8 +170,9 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
*bpp = 32;
break;
default:
- DRM_DEBUG_KMS("unsupported pixel format %s\n",
- drm_get_format_name(format));
+ format_name = drm_get_format_name(format);
+ DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
+ kfree(format_name);
*depth = 0;
*bpp = 0;
break;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
new file mode 100644
index 000000000000..398efd67cb93
--- /dev/null
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_framebuffer.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Frame buffers are abstract memory objects that provide a source of pixels to
+ * scanout to a CRTC. Applications explicitly request the creation of frame
+ * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque
+ * handle that can be passed to the KMS CRTC control, plane configuration and
+ * page flip functions.
+ *
+ * Frame buffers rely on the underlying memory manager for allocating backing
+ * storage. When creating a frame buffer applications pass a memory handle
+ * (or a list of memory handles for multi-planar formats) through the
+ * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace
+ * buffer management interface this would be a GEM handle. Drivers are however
+ * free to use their own backing storage object handles, e.g. vmwgfx directly
+ * exposes special TTM handles to userspace and so expects TTM handles in the
+ * create ioctl and not GEM handles.
+ *
+ * Framebuffers are tracked with struct &drm_framebuffer. They are published
+ * using drm_framebuffer_init() - after calling that function userspace can use
+ * and access the framebuffer object. The helper function
+ * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required
+ * metadata fields.
+ *
+ * The lifetime of a drm framebuffer is controlled with a reference count,
+ * drivers can grab additional references with drm_framebuffer_reference() and
+ * drop them again with drm_framebuffer_unreference(). For driver-private
+ * framebuffers for which the last reference is never dropped (e.g. for the
+ * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into
+ * the fbdev helper struct) drivers can manually clean up a framebuffer at
+ * module unload time with drm_framebuffer_unregister_private(). But doing this
+ * is not recommended, and it's better to have a normal free-standing struct
+ * &drm_framebuffer.
+ */
+
+int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ const struct drm_framebuffer *fb)
+{
+ unsigned int fb_width, fb_height;
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (src_w > fb_width ||
+ src_x > fb_width - src_w ||
+ src_h > fb_height ||
+ src_y > fb_height - src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request. This is the
+ * original addfb ioctl which only supported RGB formats.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ int ret;
+
+ /* convert to new format and call new ioctl */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ ret = drm_mode_addfb2(dev, &r, file_priv);
+ if (ret)
+ return ret;
+
+ or->fb_id = r.fb_id;
+
+ return 0;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+ char *format_name;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ format_name = drm_get_format_name(r->pixel_format);
+ DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
+ kfree(format_name);
+ return -EINVAL;
+ }
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ char *format_name = drm_get_format_name(r->pixel_format);
+ DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
+ kfree(format_name);
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+ unsigned int height = r->height / (i != 0 ? vsub : 1);
+ unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if ((uint64_t) width * cpp > UINT_MAX)
+ return -ERANGE;
+
+ if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+ return -ERANGE;
+
+ if (r->pitches[i] < width * cpp) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+
+ if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+ DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
+ return -EINVAL;
+ }
+
+ /* modifier specific checks: */
+ switch (r->modifier[i]) {
+ case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+ /* NOTE: the pitch restriction may be lifted later if it turns
+ * out that no hw has this restriction:
+ */
+ if (r->pixel_format != DRM_FORMAT_NV12 ||
+ width % 128 || height % 32 ||
+ r->pitches[i] % 128) {
+ DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ for (i = num_planes; i < 4; i++) {
+ if (r->modifier[i]) {
+ DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+ return -EINVAL;
+ }
+
+ /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */
+ if (!(r->flags & DRM_MODE_FB_MODIFIERS))
+ continue;
+
+ if (r->handles[i]) {
+ DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if (r->pitches[i]) {
+ DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if (r->offsets[i]) {
+ DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+struct drm_framebuffer *
+drm_internal_framebuffer_create(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+ DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
+ return ERR_PTR(-EINVAL);
+ }
+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (r->flags & DRM_MODE_FB_MODIFIERS &&
+ !dev->mode_config.allow_fb_modifiers) {
+ DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = framebuffer_check(r);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("could not create framebuffer\n");
+ return fb;
+ }
+
+ return fb;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_internal_framebuffer_create(dev, r, file_priv);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ r->fb_id = fb->base.id;
+
+ /* Transfer ownership to the filp for reaping on close */
+ mutex_lock(&file_priv->fbs_lock);
+ list_add(&fb->filp_head, &file_priv->fbs);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ return 0;
+}
+
+struct drm_mode_rmfb_work {
+ struct work_struct work;
+ struct list_head fbs;
+};
+
+static void drm_mode_rmfb_work_fn(struct work_struct *w)
+{
+ struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
+
+ while (!list_empty(&arg->fbs)) {
+ struct drm_framebuffer *fb =
+ list_first_entry(&arg->fbs, typeof(*fb), filp_head);
+
+ list_del_init(&fb->filp_head);
+ drm_framebuffer_remove(fb);
+ }
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ return -ENOENT;
+
+ mutex_lock(&file_priv->fbs_lock);
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ goto fail_unref;
+ }
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_unreference(fb);
+
+ /*
+ * we now own the reference that was stored in the fbs list
+ *
+ * drm_framebuffer_remove may fail with -EINTR on pending signals,
+ * so run this in a separate stack as there's no way to correctly
+ * handle this after the fb is already removed from the lookup table.
+ */
+ if (drm_framebuffer_read_refcount(fb) > 1) {
+ struct drm_mode_rmfb_work arg;
+
+ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+ INIT_LIST_HEAD(&arg.fbs);
+ list_add_tail(&fb->filp_head, &arg.fbs);
+
+ schedule_work(&arg.work);
+ flush_work(&arg.work);
+ destroy_work_on_stack(&arg.work);
+ } else
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+
+fail_unref:
+ drm_framebuffer_unreference(fb);
+ return -ENOENT;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd *r = data;
+ struct drm_framebuffer *fb;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->depth = fb->depth;
+ r->bpp = fb->bits_per_pixel;
+ r->pitch = fb->pitches[0];
+ if (fb->funcs->create_handle) {
+ if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
+ drm_is_control_client(file_priv)) {
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handle);
+ } else {
+ /* GET_FB() is an unprivileged ioctl so we must not
+ * return a buffer-handle to non-master processes! For
+ * backwards-compatibility reasons, we cannot make
+ * GET_FB() privileged, so just return an invalid handle
+ * for non-masters. */
+ r->handle = 0;
+ ret = 0;
+ }
+ } else {
+ ret = -ENODEV;
+ }
+
+ drm_framebuffer_unreference(fb);
+
+ return ret;
+}
+
+/**
+ * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB and flush out the damaged area supplied by userspace as a clip
+ * rectangle list. Generic userspace which does frontbuffer rendering must call
+ * this ioctl to flush out the changes on manual-update display outputs, e.g.
+ * usb display-link, mipi manual update panels or edp panel self refresh modes.
+ *
+ * Modesetting drivers which always update the frontbuffer do not need to
+ * implement the corresponding ->dirty framebuffer callback.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_err2;
+ }
+ }
+
+ if (fb->funcs->dirty) {
+ ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+ clips, num_clips);
+ } else {
+ ret = -ENOSYS;
+ }
+
+out_err2:
+ kfree(clips);
+out_err1:
+ drm_framebuffer_unreference(fb);
+
+ return ret;
+}
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @priv: drm file for the ioctl
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+ struct drm_framebuffer *fb, *tfb;
+ struct drm_mode_rmfb_work arg;
+
+ INIT_LIST_HEAD(&arg.fbs);
+
+ /*
+ * When the file gets released that means no one else can access the fb
+ * list any more, so no need to grab fpriv->fbs_lock. And we need to
+ * avoid upsetting lockdep since the universal cursor code adds a
+ * framebuffer while holding mutex locks.
+ *
+ * Note that a real deadlock between fpriv->fbs_lock and the modeset
+ * locks is impossible here since no one else but this function can get
+ * at it any more.
+ */
+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+ if (drm_framebuffer_read_refcount(fb) > 1) {
+ list_move_tail(&fb->filp_head, &arg.fbs);
+ } else {
+ list_del_init(&fb->filp_head);
+
+ /* This drops the fpriv->fbs reference. */
+ drm_framebuffer_unreference(fb);
+ }
+ }
+
+ if (!list_empty(&arg.fbs)) {
+ INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
+
+ schedule_work(&arg.work);
+ flush_work(&arg.work);
+ destroy_work_on_stack(&arg.work);
+ }
+}
+
+void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, base.refcount);
+ struct drm_device *dev = fb->dev;
+
+ /*
+ * The lookup idr holds a weak reference, which has not necessarily been
+ * removed at this point. Check for that.
+ */
+ drm_mode_object_unregister(dev, &fb->base);
+
+ fb->funcs->destroy(fb);
+}
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&fb->filp_head);
+ fb->dev = dev;
+ fb->funcs = funcs;
+
+ ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+ false, drm_framebuffer_free);
+ if (ret)
+ goto out;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ dev->mode_config.num_fb++;
+ list_add(&fb->head, &dev->mode_config.fb_list);
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ drm_mode_object_register(dev, &fb->base);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again, using @drm_framebuffer_unreference.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb = NULL;
+
+ obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+ if (obj)
+ fb = obj_to_fb(obj);
+ return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev;
+
+ if (!fb)
+ return;
+
+ dev = fb->dev;
+
+ /* Mark fb as reaped and drop idr ref. */
+ drm_mode_object_unregister(dev, &fb->base);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Cleanup framebuffer. This function is intended to be used from the drivers
+ * ->destroy callback. It can also be used to clean up driver private
+ * framebuffers embedded into a larger structure.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = fb->dev;
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_del(&fb->head);
+ dev->mode_config.num_fb--;
+ mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config. If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+
+ if (!fb)
+ return;
+
+ dev = fb->dev;
+
+ WARN_ON(!list_empty(&fb->filp_head));
+
+ /*
+ * drm ABI mandates that we remove any deleted framebuffers from active
+ * useage. But since most sane clients only remove framebuffers they no
+ * longer need, try to optimize this away.
+ *
+ * Since we're holding a reference ourselves, observing a refcount of 1
+ * means that we're the last holder and can skip it. Also, the refcount
+ * can never increase from 1 again, so we don't need any barriers or
+ * locks.
+ *
+ * Note that userspace could try to race with use and instate a new
+ * usage _after_ we've cleared all current ones. End result will be an
+ * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+ * in this manner.
+ */
+ if (drm_framebuffer_read_refcount(fb) > 1) {
+ drm_modeset_lock_all(dev);
+ /* remove from any CRTC */
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->primary->fb == fb) {
+ /* should turn off the crtc */
+ if (drm_crtc_force_disable(crtc))
+ DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->fb == fb)
+ drm_plane_force_disable(plane);
+ }
+ drm_modeset_unlock_all(dev);
+ }
+
+ drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 9134ae134667..465bacd0a630 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
- drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+ drm_vma_node_revoke(&obj->vma_node, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -372,7 +372,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
handle = ret;
- ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv);
if (ret)
goto err_remove;
@@ -386,7 +386,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
return 0;
err_revoke:
- drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+ drm_vma_node_revoke(&obj->vma_node, file_priv);
err_remove:
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
@@ -991,7 +991,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (!obj)
return -EINVAL;
- if (!drm_vma_node_is_allowed(node, filp)) {
+ if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index 3d2e91c4d78e..b404287abb97 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -65,30 +65,34 @@ void drm_global_release(void)
int drm_global_item_ref(struct drm_global_reference *ref)
{
- int ret;
+ int ret = 0;
struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
if (item->refcount == 0) {
- item->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(item->object == NULL)) {
+ ref->object = kzalloc(ref->size, GFP_KERNEL);
+ if (unlikely(ref->object == NULL)) {
ret = -ENOMEM;
- goto out_err;
+ goto error_unlock;
}
-
- ref->object = item->object;
ret = ref->init(ref);
if (unlikely(ret != 0))
- goto out_err;
+ goto error_free;
+ item->object = ref->object;
+ } else {
+ ref->object = item->object;
}
+
++item->refcount;
- ref->object = item->object;
mutex_unlock(&item->mutex);
return 0;
-out_err:
+
+error_free:
+ kfree(ref->object);
+ ref->object = NULL;
+error_unlock:
mutex_unlock(&item->mutex);
- item->object = NULL;
return ret;
}
EXPORT_SYMBOL(drm_global_item_ref);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7b30b307674b..dae18e58e79b 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -142,7 +142,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
unsigned long add)
{
int ret;
- unsigned long mask = (1 << bits) - 1;
+ unsigned long mask = (1UL << bits) - 1;
unsigned long first, unshifted_key;
unshifted_key = hash_long(seed, bits);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 9ae353f4dd06..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
mutex_lock(&dev->master_mutex);
master = dev->master;
- if (!master)
- goto out_unlock;
-
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
-out_unlock:
mutex_unlock(&dev->master_mutex);
return 0;
@@ -80,6 +76,7 @@ int drm_clients_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_file *priv;
+ kuid_t uid;
seq_printf(m,
"%20s %5s %3s master a %5s %10s\n",
@@ -98,13 +95,14 @@ int drm_clients_info(struct seq_file *m, void *data)
rcu_read_lock(); /* locks pid_task()->comm */
task = pid_task(priv->pid, PIDTYPE_PID);
+ uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
drm_is_current_master(priv) ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
- from_kuid_munged(seq_user_ns(m), priv->uid),
+ from_kuid_munged(seq_user_ns(m), uid),
priv->magic);
rcu_read_unlock();
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b86dc9b921a5..e66af289a016 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,6 +21,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
/* drm_irq.c */
extern unsigned int drm_timestamp_monotonic;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index a6289752be16..867ab8c1582b 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -32,7 +32,6 @@
#include <linux/export.h>
#include <drm/drmP.h>
-#include <drm/drm_core.h>
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -346,6 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
struct drm_stats __user *stats;
int i, err;
+ memset(&s32, 0, sizeof(drm_stats32_t));
stats = compat_alloc_user_space(sizeof(*stats));
if (!stats)
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 33af4a5ddca1..0ad2c47f808f 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -29,7 +29,6 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_core.h>
#include <drm/drm_auth.h>
#include "drm_legacy.h"
#include "drm_internal.h"
@@ -189,9 +188,8 @@ static int drm_getclient(struct drm_device *dev, void *data,
*/
if (client->idx == 0) {
client->auth = file_priv->authenticated;
- client->pid = pid_vnr(file_priv->pid);
- client->uid = from_kuid_munged(current_user_ns(),
- file_priv->uid);
+ client->pid = task_pid_vnr(current);
+ client->uid = overflowuid;
client->magic = 0;
client->iocs = 0;
@@ -228,6 +226,7 @@ static int drm_getstats(struct drm_device *dev, void *data,
static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
+ struct drm_crtc *crtc;
req->value = 0;
switch (req->capability) {
@@ -254,6 +253,13 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
+ case DRM_CAP_PAGE_FLIP_TARGET:
+ req->value = 1;
+ drm_for_each_crtc(crtc, dev) {
+ if (!crtc->funcs->page_flip_target)
+ req->value = 0;
+ }
+ break;
case DRM_CAP_CURSOR_WIDTH:
if (dev->mode_config.cursor_width)
req->value = dev->mode_config.cursor_width;
@@ -714,9 +720,9 @@ long drm_ioctl(struct file *filp,
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
- /* Enforce sane locking for kms driver ioctls. Core ioctls are
+ /* Enforce sane locking for modern driver ioctls. Core ioctls are
* too messy still. */
- if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
+ if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) ||
(ioctl->flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 77f357b2c386..b969a64a1514 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -482,7 +482,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
return ret;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
/* After installing handler */
@@ -491,7 +491,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (ret < 0) {
dev->irq_enabled = false;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_register(dev->pdev, NULL, NULL, NULL);
free_irq(irq, dev);
} else {
@@ -557,7 +557,7 @@ int drm_irq_uninstall(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", dev->irq);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_register(dev->pdev, NULL, NULL, NULL);
if (dev->driver->irq_uninstall)
@@ -592,7 +592,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
@@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* Negative value on error, failure or if not supported in current
* video mode:
*
- * -EINVAL - Invalid CRTC.
- * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
- * -ENOTSUPP - Function not supported in current display mode.
- * -EIO - Failed, e.g., due to failed scanout position query.
+ * -EINVAL Invalid CRTC.
+ * -EAGAIN Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP Function not supported in current display mode.
+ * -EIO Failed, e.g., due to failed scanout position query.
*
* Returns or'ed positive status flags on success:
*
@@ -1008,6 +1008,31 @@ static void send_vblank_event(struct drm_device *dev,
* period. This helper function implements exactly the required vblank arming
* behaviour.
*
+ * NOTE: Drivers using this to send out the event in struct &drm_crtc_state
+ * as part of an atomic commit must ensure that the next vblank happens at
+ * exactly the same time as the atomic commit is committed to the hardware. This
+ * function itself does **not** protect again the next vblank interrupt racing
+ * with either this function call or the atomic commit operation. A possible
+ * sequence could be:
+ *
+ * 1. Driver commits new hardware state into vblank-synchronized registers.
+ * 2. A vblank happens, committing the hardware state. Also the corresponding
+ * vblank interrupt is fired off and fully processed by the interrupt
+ * handler.
+ * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event().
+ * 4. The event is only send out for the next vblank, which is wrong.
+ *
+ * An equivalent race can happen when the driver calls
+ * drm_crtc_arm_vblank_event() before writing out the new hardware state.
+ *
+ * The only way to make this work safely is to prevent the vblank from firing
+ * (and the hardware from committing anything else) until the entire atomic
+ * commit sequence has run to completion. If the hardware does not have such a
+ * feature (e.g. using a "go" bit), then it is unsafe to use this functions.
+ * Instead drivers need to manually send out the event from their interrupt
+ * handler by calling drm_crtc_send_vblank_event() and make sure that there's no
+ * possible race with the hardware committing the atomic update.
+ *
* Caller must hold event lock. Caller must also hold a vblank reference for
* the event @e, which will be dropped when the next vblank arrives.
*/
@@ -1030,8 +1055,11 @@ EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
+ * Updates sequence # and timestamp on event for the most recently processed
+ * vblank, and sends it to userspace. Caller must hold event lock.
+ *
+ * See drm_crtc_arm_vblank_event() for a helper which can be used in certain
+ * situation, especially to send out events for atomic commit operations.
*/
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
@@ -1295,7 +1323,7 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: "
- "wanted %d, current %d\n",
+ "wanted %u, current %u\n",
e->event.sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
@@ -1519,7 +1547,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
return 0;
/* KMS drivers handle this internally */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
pipe = modeset->crtc;
@@ -1585,7 +1613,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
+ DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
vblwait->request.sequence, seq, pipe);
trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1693,7 +1721,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
- DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+ DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
vblwait->request.sequence, pipe);
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, pipe) -
@@ -1708,7 +1736,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
- DRM_DEBUG("returning %d to client\n",
+ DRM_DEBUG("returning %u to client\n",
vblwait->reply.sequence);
} else {
DRM_DEBUG("vblank wait interrupted by signal\n");
@@ -1735,7 +1763,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
if ((seq - e->event.sequence) > (1<<23))
continue;
- DRM_DEBUG("vblank event on %d, current %d\n",
+ DRM_DEBUG("vblank event on %u, current %u\n",
e->event.sequence, seq);
list_del(&e->base.link);
@@ -1826,6 +1854,7 @@ EXPORT_SYMBOL(drm_crtc_handle_vblank);
*/
u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
{
+ WARN_ON_ONCE(dev->max_vblank_count != 0);
return 0;
}
EXPORT_SYMBOL(drm_vblank_no_hw_counter);
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 3187c4bb01cb..45db36cd3d20 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -27,7 +27,8 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_aux_dev.h>
+
+#include "drm_crtc_helper_internal.h"
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 48ac0ebbd663..c901f3c5b269 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -163,7 +163,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
struct drm_master *master = file_priv->master;
int ret = 0;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
++file_priv->lock_count;
@@ -252,7 +252,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (lock->context == DRM_KERNEL_CONTEXT) {
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index af0d471ee246..1160a579e0dc 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -999,6 +999,27 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
+ * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
+ * data used by the interface
+ * @dsi: DSI peripheral device
+ * @format: pixel format
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
+ sizeof(format));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+
+/**
* mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
* the Tearing Effect output signal of the display module
* @dsi: DSI peripheral device
@@ -1021,25 +1042,53 @@ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
/**
- * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
- * data used by the interface
+ * mipi_dsi_dcs_set_display_brightness() - sets the brightness value of the
+ * display
* @dsi: DSI peripheral device
- * @format: pixel format
+ * @brightness: brightness value
*
* Return: 0 on success or a negative error code on failure.
*/
-int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
+int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+ u16 brightness)
{
+ u8 payload[2] = { brightness & 0xff, brightness >> 8 };
ssize_t err;
- err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
- sizeof(format));
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ payload, sizeof(payload));
if (err < 0)
return err;
return 0;
}
-EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness() - gets the current brightness value
+ * of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ u16 *brightness)
+{
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ brightness, sizeof(*brightness));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
static int mipi_dsi_drv_probe(struct device *dev)
{
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cb39f45d6a16..11d44a1e0ab3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -46,6 +46,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/export.h>
+#include <linux/interval_tree_generic.h>
/**
* DOC: Overview
@@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+ u64, __subtree_last,
+ START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node)
+{
+ struct drm_mm *mm = hole_node->mm;
+ struct rb_node **link, *rb;
+ struct drm_mm_node *parent;
+
+ node->__subtree_last = LAST(node);
+
+ if (hole_node->allocated) {
+ rb = &hole_node->rb;
+ while (rb) {
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last >= node->__subtree_last)
+ break;
+
+ parent->__subtree_last = node->__subtree_last;
+ rb = rb_parent(rb);
+ }
+
+ rb = &hole_node->rb;
+ link = &hole_node->rb.rb_right;
+ } else {
+ rb = NULL;
+ link = &mm->interval_tree.rb_node;
+ }
+
+ while (*link) {
+ rb = *link;
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last < node->__subtree_last)
+ parent->__subtree_last = node->__subtree_last;
+ if (node->start < parent->start)
+ link = &parent->rb.rb_left;
+ else
+ link = &parent->rb.rb_right;
+ }
+
+ rb_link_node(&node->rb, rb, link);
+ rb_insert_augmented(&node->rb,
+ &mm->interval_tree,
+ &drm_mm_interval_tree_augment);
+}
+
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
u64 size, unsigned alignment,
@@ -150,9 +217,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
@@ -178,41 +246,54 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
+ u64 end = node->start + node->size;
struct drm_mm_node *hole;
- u64 end;
- u64 hole_start;
- u64 hole_end;
+ u64 hole_start, hole_end;
- BUG_ON(node == NULL);
+ if (WARN_ON(node->size == 0))
+ return -EINVAL;
end = node->start + node->size;
/* Find the relevant hole to add our node to */
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > node->start || hole_end < end)
- continue;
+ hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ node->start, ~(u64)0);
+ if (hole) {
+ if (hole->start < end)
+ return -ENOSPC;
+ } else {
+ hole = list_entry(&mm->head_node.node_list,
+ typeof(*hole), node_list);
+ }
- node->mm = mm;
- node->allocated = 1;
+ hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+ if (!hole->hole_follows)
+ return -ENOSPC;
- INIT_LIST_HEAD(&node->hole_stack);
- list_add(&node->node_list, &hole->node_list);
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = __drm_mm_hole_node_end(hole);
+ if (hole_start > node->start || hole_end < end)
+ return -ENOSPC;
- if (node->start == hole_start) {
- hole->hole_follows = 0;
- list_del_init(&hole->hole_stack);
- }
+ node->mm = mm;
+ node->allocated = 1;
- node->hole_follows = 0;
- if (end != hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
- }
+ list_add(&node->node_list, &hole->node_list);
- return 0;
+ drm_mm_interval_tree_add_node(hole, node);
+
+ if (node->start == hole_start) {
+ hole->hole_follows = 0;
+ list_del(&hole->hole_stack);
}
- return -ENOSPC;
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -239,6 +320,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, sflags);
if (!hole_node)
@@ -299,9 +383,10 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start < start);
BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
@@ -340,6 +425,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, sflags);
@@ -390,6 +478,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
+ drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
node->allocated = 0;
}
@@ -516,11 +605,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->hole_stack, &new->hole_stack);
+ rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
+ new->__subtree_last = old->__subtree_last;
old->allocated = 0;
new->allocated = 1;
@@ -748,7 +839,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
@@ -758,6 +848,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->interval_tree = RB_ROOT;
+
mm->color_adjust = NULL;
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
new file mode 100644
index 000000000000..9f17085b1fdd
--- /dev/null
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_mode_object.h>
+
+#include "drm_crtc_internal.h"
+
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+int drm_mode_object_get_reg(struct drm_device *dev,
+ struct drm_mode_object *obj,
+ uint32_t obj_type,
+ bool register_obj,
+ void (*obj_free_cb)(struct kref *kref))
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ if (obj_free_cb) {
+ obj->free_cb = obj_free_cb;
+ kref_init(&obj->refcount);
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
+ * for tracking modes, CRTCs and connectors. Note that despite the _get postfix
+ * modeset identifiers are _not_ reference counted. Hence don't use this for
+ * reference counted modeset objects like framebuffers.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_object_get(struct drm_device *dev,
+ struct drm_mode_object *obj, uint32_t obj_type)
+{
+ return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
+}
+
+void drm_mode_object_register(struct drm_device *dev,
+ struct drm_mode_object *obj)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_unregister - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
+ * for reference counted modeset objects like framebuffers.
+ */
+void drm_mode_object_unregister(struct drm_device *dev,
+ struct drm_mode_object *object)
+{
+ mutex_lock(&dev->mode_config.idr_mutex);
+ if (object->id) {
+ idr_remove(&dev->mode_config.crtc_idr, object->id);
+ object->id = 0;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ obj = idr_find(&dev->mode_config.crtc_idr, id);
+ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+ obj = NULL;
+ if (obj && obj->id != id)
+ obj = NULL;
+
+ if (obj && obj->free_cb) {
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return obj;
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
+{
+ struct drm_mode_object *obj = NULL;
+
+ obj = __drm_mode_object_find(dev, id, type);
+ return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This function decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+ if (obj->free_cb) {
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ kref_put(&obj->refcount, obj->free_cb);
+ }
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This function increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+ if (obj->free_cb) {
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ kref_get(&obj->refcount);
+ }
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+/**
+ * drm_object_attach_property - attach a property to a modeset object
+ * @obj: drm modeset object
+ * @property: property to attach
+ * @init_val: initial value of the property
+ *
+ * This attaches the given property to the modeset object with the given initial
+ * value. Currently this function cannot fail since the properties are stored in
+ * a statically sized array.
+ */
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->properties[count] = property;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+/**
+ * drm_object_property_set_value - set the value of a property
+ * @obj: drm mode object to set property value for
+ * @property: property to set
+ * @val: value the property should be set to
+ *
+ * This function sets a given property on a given object. This function only
+ * changes the software state of the property, it does not call into the
+ * driver's ->set_property callback.
+ *
+ * Note that atomic drivers should not have any need to call this, the core will
+ * ensure consistency of values reported back to userspace through the
+ * appropriate ->atomic_get_property callback. Only legacy drivers should call
+ * this function to update the tracked value (after clamping and other
+ * restrictions have been applied).
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->properties[i] == property) {
+ obj->properties->values[i] = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ int i;
+
+ /* read-only properties bypass atomic mechanism and still store
+ * their value in obj->properties->values[].. mostly to avoid
+ * having to deal w/ EDID and similar props in atomic paths:
+ */
+ if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) &&
+ !(property->flags & DRM_MODE_PROP_IMMUTABLE))
+ return drm_atomic_get_property(obj, property, val);
+
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->properties[i] == property) {
+ *val = obj->properties->values[i];
+ return 0;
+ }
+
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+/* helper for getconnector and getproperties ioctls */
+int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ uint32_t __user *prop_ptr,
+ uint64_t __user *prop_values,
+ uint32_t *arg_count_props)
+{
+ int i, ret, count;
+
+ for (i = 0, count = 0; i < obj->properties->count; i++) {
+ struct drm_property *prop = obj->properties->properties[i];
+ uint64_t val;
+
+ if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
+ continue;
+
+ if (*arg_count_props > count) {
+ ret = drm_object_property_get_value(obj, prop, &val);
+ if (ret)
+ return ret;
+
+ if (put_user(prop->base.id, prop_ptr + count))
+ return -EFAULT;
+
+ if (put_user(val, prop_values + count))
+ return -EFAULT;
+ }
+
+ count++;
+ }
+ *arg_count_props = count;
+
+ return 0;
+}
+
+/**
+ * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * This function retrieves the current value for an object's property. Compared
+ * to the connector specific ioctl this one is extended to also work on crtc and
+ * plane objects.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
+ goto out_unref;
+ }
+
+ ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(arg->props_ptr),
+ (uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
+ &arg->count_props);
+
+out_unref:
+ drm_mode_object_unreference(obj);
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
+ uint32_t prop_id)
+{
+ int i;
+
+ for (i = 0; i < obj->properties->count; i++)
+ if (obj->properties->properties[i]->base.id == prop_id)
+ return obj->properties->properties[i];
+
+ return NULL;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ struct drm_mode_object *ref;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!arg_obj->properties)
+ goto out_unref;
+
+ property = drm_mode_obj_find_prop_id(arg_obj, arg->prop_id);
+ if (!property)
+ goto out_unref;
+
+ if (!drm_property_change_valid_get(property, arg->value, &ref))
+ goto out_unref;
+
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+ property, arg->value);
+ break;
+ }
+
+ drm_property_change_valid_put(property, ref);
+
+out_unref:
+ drm_mode_object_unreference(arg_obj);
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc5040ae5f25..53f07ac7c174 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -657,11 +657,36 @@ void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
}
EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
+/**
+ * drm_bus_flags_from_videomode - extract information about pixelclk and
+ * DE polarity from videomode and store it in a separate variable
+ * @vm: videomode structure to use
+ * @bus_flags: information about pixelclk and DE polarity will be stored here
+ *
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
+ * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ */
+void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
+{
+ *bus_flags = 0;
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+ if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+ *bus_flags |= DRM_BUS_FLAG_DE_LOW;
+ if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
+ *bus_flags |= DRM_BUS_FLAG_DE_HIGH;
+}
+EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
+
#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
+ * @bus_flags: information about pixelclk and DE polarity
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
@@ -672,7 +697,8 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* 0 on success, a negative errno code when no of videomode node was found.
*/
int of_get_drm_display_mode(struct device_node *np,
- struct drm_display_mode *dmode, int index)
+ struct drm_display_mode *dmode, u32 *bus_flags,
+ int index)
{
struct videomode vm;
int ret;
@@ -682,6 +708,8 @@ int of_get_drm_display_mode(struct device_node *np,
return ret;
drm_display_mode_from_videomode(&vm, dmode);
+ if (bus_flags)
+ drm_bus_flags_from_videomode(&vm, bus_flags);
pr_debug("%s: got %dx%d display mode from %s\n",
of_node_full_name(np), vm.hactive, vm.vactive, np->name);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
new file mode 100644
index 000000000000..1d45738f8f98
--- /dev/null
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_plane_helper.h>
+
+/**
+ * DOC: aux kms helpers
+ *
+ * This helper library contains various one-off functions which don't really fit
+ * anywhere else in the DRM modeset helper library.
+ */
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+ struct drm_connector *connector, *tmp;
+ struct list_head panel_list;
+
+ INIT_LIST_HEAD(&panel_list);
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ list_move_tail(&connector->head, &panel_list);
+ }
+
+ list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+/**
+ * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata
+ * @fb: drm_framebuffer object to fill out
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * This helper can be used in a drivers fb_create callback to pre-fill the fb's
+ * metadata fields.
+ */
+void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int i;
+
+ fb->width = mode_cmd->width;
+ fb->height = mode_cmd->height;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ fb->modifier[i] = mode_cmd->modifier[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
+ fb->flags = mode_cmd->flags;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+/*
+ * This is the minimal list of formats that seem to be safe for modeset use
+ * with all current DRM drivers. Most hardware can actually support more
+ * formats than this and drivers may specify a more accurate list when
+ * creating the primary plane. However drivers that still call
+ * drm_plane_init() will use this minimal format list as the default.
+ */
+static const uint32_t safe_modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ /*
+ * Remove the format_default field from drm_plane when dropping
+ * this helper.
+ */
+ primary->format_default = true;
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ safe_modeset_formats,
+ ARRAY_SIZE(safe_modeset_formats),
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+
+ return primary;
+}
+
+/**
+ * drm_crtc_init - Legacy CRTC initialization function
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Initialize a CRTC object with a default helper-provided primary plane and no
+ * cursor plane.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs)
+{
+ struct drm_plane *primary;
+
+ primary = create_primary_plane(dev);
+ return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
+ NULL);
+}
+EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index b2f8f1062d5f..3ceea9cb9d3e 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -175,7 +175,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
/* UMS was only ever support on PCI devices. */
@@ -236,8 +236,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
DRM_DEBUG("\n");
dev = drm_dev_alloc(driver, &pdev->dev);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
ret = pci_enable_device(pdev);
if (ret)
@@ -263,7 +263,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
/* No locking needed since shadow-attach is single-threaded since it may
* only be called from the per-driver module init hook. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list);
return 0;
@@ -299,7 +299,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- if (driver->driver_features & DRIVER_MODESET)
+ if (!(driver->driver_features & DRIVER_LEGACY))
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
@@ -421,7 +421,7 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
- if (driver->driver_features & DRIVER_MODESET) {
+ if (!(driver->driver_features & DRIVER_LEGACY)) {
pci_unregister_driver(pdriver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
new file mode 100644
index 000000000000..249c0ae52c6d
--- /dev/null
+++ b/drivers/gpu/drm/drm_plane.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * A plane represents an image source that can be blended with or overlayed on
+ * top of a CRTC during the scanout process. Planes take their input data from a
+ * &drm_framebuffer object. The plane itself specifies the cropping and scaling
+ * of that image, and where it is placed on the visible are of a display
+ * pipeline, represented by &drm_crtc. A plane can also have additional
+ * properties that specify how the pixels are positioned and blended, like
+ * rotation or Z-position. All these properties are stored in &drm_plane_state.
+ *
+ * To create a plane, a KMS drivers allocates and zeroes an instances of
+ * struct &drm_plane (possibly as part of a larger structure) and registers it
+ * with a call to drm_universal_plane_init().
+ *
+ * Cursor and overlay planes are optional. All drivers should provide one
+ * primary plane per CRTC to avoid surprising userspace too much. See enum
+ * &drm_plane_type for a more in-depth discussion of these special uapi-relevant
+ * plane types. Special planes are associated with their CRTC by calling
+ * drm_crtc_init_with_planes().
+ *
+ * The type of a plane is exposed in the immutable "type" enumeration property,
+ * which has one of the following values: "Overlay", "Primary", "Cursor".
+ */
+
+static unsigned int drm_num_planes(struct drm_device *dev)
+{
+ unsigned int num = 0;
+ struct drm_plane *tmp;
+
+ drm_for_each_plane(tmp, dev) {
+ num++;
+ }
+
+ return num;
+}
+
+/**
+ * drm_universal_plane_init - Initialize a new universal plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Initializes a plane object of type @type.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
+ ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ if (ret)
+ return ret;
+
+ drm_modeset_lock_init(&plane->mutex);
+
+ plane->base.properties = &plane->properties;
+ plane->dev = dev;
+ plane->funcs = funcs;
+ plane->format_types = kmalloc_array(format_count, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_unregister(dev, &plane->base);
+ return -ENOMEM;
+ }
+
+ if (name) {
+ va_list ap;
+
+ va_start(ap, name);
+ plane->name = kvasprintf(GFP_KERNEL, name, ap);
+ va_end(ap);
+ } else {
+ plane->name = kasprintf(GFP_KERNEL, "plane-%d",
+ drm_num_planes(dev));
+ }
+ if (!plane->name) {
+ kfree(plane->format_types);
+ drm_mode_object_unregister(dev, &plane->base);
+ return -ENOMEM;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+ plane->type = type;
+
+ list_add_tail(&plane->head, &config->plane_list);
+ plane->index = config->num_total_plane++;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ config->num_overlay_plane++;
+
+ drm_object_attach_property(&plane->base,
+ config->plane_type_property,
+ plane->type);
+
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_crtc_h, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_x, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_y, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_w, 0);
+ drm_object_attach_property(&plane->base, config->prop_src_h, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_universal_plane_init);
+
+int drm_plane_register_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+ int ret = 0;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->late_register)
+ ret = plane->funcs->late_register(plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void drm_plane_unregister_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->early_unregister)
+ plane->funcs->early_unregister(plane);
+ }
+}
+
+/**
+ * drm_plane_init - Initialize a legacy plane
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @is_primary: plane type (primary vs overlay)
+ *
+ * Legacy API to initialize a DRM plane.
+ *
+ * New drivers should call drm_universal_plane_init() instead.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ bool is_primary)
+{
+ enum drm_plane_type type;
+
+ type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, type, NULL);
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ drm_modeset_lock_all(dev);
+ kfree(plane->format_types);
+ drm_mode_object_unregister(dev, &plane->base);
+
+ BUG_ON(list_empty(&plane->head));
+
+ /* Note that the plane_list is considered to be static; should we
+ * remove the drm_plane at runtime we would have to decrement all
+ * the indices on the drm_plane after us in the plane_list.
+ */
+
+ list_del(&plane->head);
+ dev->mode_config.num_total_plane--;
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+ dev->mode_config.num_overlay_plane--;
+ drm_modeset_unlock_all(dev);
+
+ WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
+ if (plane->state && plane->funcs->atomic_destroy_state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ kfree(plane->name);
+
+ memset(plane, 0, sizeof(*plane));
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_plane_from_index - find the registered plane at an index
+ * @dev: DRM device
+ * @idx: index of registered plane to find for
+ *
+ * Given a plane index, return the registered plane from DRM device's
+ * list of planes with matching index.
+ */
+struct drm_plane *
+drm_plane_from_index(struct drm_device *dev, int idx)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev)
+ if (idx == plane->index)
+ return plane;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_plane_from_index);
+
+/**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+ int ret;
+
+ if (!plane->fb)
+ return;
+
+ plane->old_fb = plane->fb;
+ ret = plane->funcs->disable_plane(plane);
+ if (ret) {
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ plane->old_fb = NULL;
+ return;
+ }
+ /* disconnect the plane from the fb and crtc: */
+ drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
+ plane->fb = NULL;
+ plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_mode_object *obj = &plane->base;
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
+
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0;
+ unsigned num_planes;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ config = &dev->mode_config;
+
+ if (file_priv->universal_planes)
+ num_planes = config->num_total_plane;
+ else
+ num_planes = config->num_overlay_plane;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (num_planes &&
+ (plane_resp->count_planes >= num_planes)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ /* Plane lists are invariant, no locking needed. */
+ drm_for_each_plane(plane, dev) {
+ /*
+ * Unless userspace set the 'universal planes'
+ * capability bit, only advertise overlays.
+ */
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+ !file_priv->universal_planes)
+ continue;
+
+ if (put_user(plane->base.id, plane_ptr + copied))
+ return -EFAULT;
+ copied++;
+ }
+ }
+ plane_resp->count_planes = num_planes;
+
+ return 0;
+}
+
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ plane = drm_plane_find(dev, plane_resp->plane_id);
+ if (!plane)
+ return -ENOENT;
+
+ drm_modeset_lock(&plane->mutex, NULL);
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+ drm_modeset_unlock(&plane->mutex);
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = 0;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ return -EFAULT;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+ return 0;
+}
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < plane->format_count; i++) {
+ if (format == plane->format_types[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * setplane_internal - setplane handler for internal callers
+ *
+ * Note that we assume an extra reference has already been taken on fb. If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
+ *
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
+ */
+static int __setplane_internal(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ /* src_{x,y,w,h} values are 16.16 fixed point */
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret = 0;
+
+ /* No fb means shut it down */
+ if (!fb) {
+ plane->old_fb = plane->fb;
+ ret = plane->funcs->disable_plane(plane);
+ if (!ret) {
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ } else {
+ plane->old_fb = NULL;
+ }
+ goto out;
+ }
+
+ /* Check whether this plane is usable on this CRTC */
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+ DRM_DEBUG_KMS("Invalid crtc for plane\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check whether this plane supports the fb pixel format. */
+ ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+ if (ret) {
+ char *format_name = drm_get_format_name(fb->pixel_format);
+ DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name);
+ kfree(format_name);
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (crtc_w > INT_MAX ||
+ crtc_x > INT_MAX - (int32_t) crtc_w ||
+ crtc_h > INT_MAX ||
+ crtc_y > INT_MAX - (int32_t) crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ crtc_w, crtc_h, crtc_x, crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+ if (ret)
+ goto out;
+
+ plane->old_fb = plane->fb;
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ fb = NULL;
+ } else {
+ plane->old_fb = NULL;
+ }
+
+out:
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (plane->old_fb)
+ drm_framebuffer_unreference(plane->old_fb);
+ plane->old_fb = NULL;
+
+ return ret;
+}
+
+static int setplane_internal(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ /* src_{x,y,w,h} values are 16.16 fixed point */
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ int ret;
+
+ drm_modeset_lock_all(plane->dev);
+ ret = __setplane_internal(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ drm_modeset_unlock_all(plane->dev);
+
+ return ret;
+}
+
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc = NULL;
+ struct drm_framebuffer *fb = NULL;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ plane = drm_plane_find(dev, plane_req->plane_id);
+ if (!plane) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+
+ if (plane_req->fb_id) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ return -ENOENT;
+ }
+
+ crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ if (!crtc) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ return -ENOENT;
+ }
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ return setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+}
+
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 fbreq = {
+ .width = req->width,
+ .height = req->height,
+ .pixel_format = DRM_FORMAT_ARGB8888,
+ .pitches = { req->width * 4 },
+ .handles = { req->handle },
+ };
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w = 0, crtc_h = 0;
+ uint32_t src_w = 0, src_h = 0;
+ int ret = 0;
+
+ BUG_ON(!crtc->cursor);
+ WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
+
+ /*
+ * Obtain fb we'll be using (either new or existing) and take an extra
+ * reference to it if fb != null. setplane will take care of dropping
+ * the reference if the plane update fails.
+ */
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (req->handle) {
+ fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ return PTR_ERR(fb);
+ }
+ fb->hot_x = req->hot_x;
+ fb->hot_y = req->hot_y;
+ } else {
+ fb = NULL;
+ }
+ } else {
+ fb = crtc->cursor->fb;
+ if (fb)
+ drm_framebuffer_reference(fb);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc_x = req->x;
+ crtc_y = req->y;
+ } else {
+ crtc_x = crtc->cursor_x;
+ crtc_y = crtc->cursor_y;
+ }
+
+ if (fb) {
+ crtc_w = fb->width;
+ crtc_h = fb->height;
+ src_w = fb->width << 16;
+ src_h = fb->height << 16;
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ ret = __setplane_internal(crtc->cursor, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h);
+
+ /* Update successful; save new cursor position, if necessary */
+ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc->cursor_x = req->x;
+ crtc->cursor_y = req->y;
+ }
+
+ return ret;
+}
+
+static int drm_mode_cursor_common(struct drm_device *dev,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, req->crtc_id);
+ if (!crtc) {
+ DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+ return -ENOENT;
+ }
+
+ /*
+ * If this crtc has a universal cursor plane, call that plane's update
+ * handler rather than using legacy cursor handlers.
+ */
+ drm_modeset_lock_crtc(crtc, crtc->cursor);
+ if (crtc->cursor) {
+ ret = drm_mode_cursor_universal(crtc, req, file_priv);
+ goto out;
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
+ ret = -ENXIO;
+ goto out;
+ }
+ /* Turns off the cursor if handle is 0 */
+ if (crtc->funcs->cursor_set2)
+ ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle,
+ req->width, req->height, req->hot_x, req->hot_y);
+ else
+ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+ req->width, req->height);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ drm_modeset_unlock_crtc(crtc);
+
+ return ret;
+
+}
+
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor *req = data;
+ struct drm_mode_cursor2 new_req;
+
+ memcpy(&new_req, req, sizeof(struct drm_mode_cursor));
+ new_req.hot_x = new_req.hot_y = 0;
+
+ return drm_mode_cursor_common(dev, &new_req, file_priv);
+}
+
+/*
+ * Set the cursor configuration based on user request. This implements the 2nd
+ * version of the cursor ioctl, which allows userspace to additionally specify
+ * the hotspot of the pointer.
+ */
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_cursor2 *req = data;
+
+ return drm_mode_cursor_common(dev, req, file_priv);
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip_target *page_flip = data;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_pending_vblank_event *e = NULL;
+ u32 target_vblank = page_flip->sequence;
+ int ret = -EINVAL;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
+ return -EINVAL;
+
+ if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET))
+ return -EINVAL;
+
+ /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags
+ * can be specified
+ */
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET)
+ return -EINVAL;
+
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ if (crtc->funcs->page_flip_target) {
+ u32 current_vblank;
+ int r;
+
+ r = drm_crtc_vblank_get(crtc);
+ if (r)
+ return r;
+
+ current_vblank = drm_crtc_vblank_count(crtc);
+
+ switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) {
+ case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+ if ((int)(target_vblank - current_vblank) > 1) {
+ DRM_DEBUG("Invalid absolute flip target %u, "
+ "must be <= %u\n", target_vblank,
+ current_vblank + 1);
+ drm_crtc_vblank_put(crtc);
+ return -EINVAL;
+ }
+ break;
+ case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE:
+ if (target_vblank != 0 && target_vblank != 1) {
+ DRM_DEBUG("Invalid relative flip target %u, "
+ "must be 0 or 1\n", target_vblank);
+ drm_crtc_vblank_put(crtc);
+ return -EINVAL;
+ }
+ target_vblank += current_vblank;
+ break;
+ default:
+ target_vblank = current_vblank +
+ !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC);
+ break;
+ }
+ } else if (crtc->funcs->page_flip == NULL ||
+ (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) {
+ return -EINVAL;
+ }
+
+ drm_modeset_lock_crtc(crtc, crtc->primary);
+ if (crtc->primary->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ if (!fb) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (crtc->state) {
+ const struct drm_plane_state *state = crtc->primary->state;
+
+ ret = drm_framebuffer_check_src_coords(state->src_x,
+ state->src_y,
+ state->src_w,
+ state->src_h,
+ fb);
+ } else {
+ ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+ }
+ if (ret)
+ goto out;
+
+ if (crtc->primary->fb->pixel_format != fb->pixel_format) {
+ DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (!e) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = page_flip->user_data;
+ ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+ if (ret) {
+ kfree(e);
+ goto out;
+ }
+ }
+
+ crtc->primary->old_fb = crtc->primary->fb;
+ if (crtc->funcs->page_flip_target)
+ ret = crtc->funcs->page_flip_target(crtc, fb, e,
+ page_flip->flags,
+ target_vblank);
+ else
+ ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
+ if (ret) {
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+ drm_event_cancel_free(dev, &e->base);
+ /* Keep the old fb, don't unref it. */
+ crtc->primary->old_fb = NULL;
+ } else {
+ crtc->primary->fb = fb;
+ /* Unref only the old framebuffer. */
+ fb = NULL;
+ }
+
+out:
+ if (ret && crtc->funcs->page_flip_target)
+ drm_crtc_vblank_put(crtc);
+ if (fb)
+ drm_framebuffer_unreference(fb);
+ if (crtc->primary->old_fb)
+ drm_framebuffer_unreference(crtc->primary->old_fb);
+ crtc->primary->old_fb = NULL;
+ drm_modeset_unlock_crtc(crtc);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 16c4a7bd7465..7899fc1dcdb0 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -64,18 +64,6 @@
*/
/*
- * This is the minimal list of formats that seem to be safe for modeset use
- * with all current DRM drivers. Most hardware can actually support more
- * formats than this and drivers may specify a more accurate list when
- * creating the primary plane. However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
- */
-static const uint32_t safe_modeset_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
-};
-
-/*
* Returns the connectors currently associated with a CRTC. This function
* should be called twice: once with a NULL connector list to retrieve
* the list size, and once with the properly allocated list to be filled in.
@@ -108,14 +96,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
}
/**
- * drm_plane_helper_check_update() - Check plane update for validity
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @src: source coordinates in 16.16 fixed point
- * @dest: integer destination coordinates
+ * drm_plane_helper_check_state() - Check plane state for validity
+ * @state: plane state to check
* @clip: integer clipping coordinates
- * @rotation: plane rotation
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
@@ -123,10 +106,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* only be false for primary planes.
* @can_update_disabled: can the plane be updated while the crtc
* is disabled?
- * @visible: output parameter indicating whether plane is still visible after
- * clipping
*
- * Checks that a desired plane update is valid. Drivers that provide
+ * Checks that a desired plane update is valid, and updates various
+ * bits of derived state (clipped coordinates etc.). Drivers that provide
* their own plane handling rather than helper-provided implementations may
* still wish to call this function to avoid duplication of error checking
* code.
@@ -134,29 +116,38 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* RETURNS:
* Zero if update appears valid, error code on failure
*/
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- const struct drm_rect *clip,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible)
+int drm_plane_helper_check_state(struct drm_plane_state *state,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled)
{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dst = &state->dst;
+ unsigned int rotation = state->rotation;
int hscale, vscale;
+ src->x1 = state->src_x;
+ src->y1 = state->src_y;
+ src->x2 = state->src_x + state->src_w;
+ src->y2 = state->src_y + state->src_h;
+
+ dst->x1 = state->crtc_x;
+ dst->y1 = state->crtc_y;
+ dst->x2 = state->crtc_x + state->crtc_w;
+ dst->y2 = state->crtc_y + state->crtc_h;
+
if (!fb) {
- *visible = false;
+ state->visible = false;
return 0;
}
/* crtc should only be NULL when disabling (i.e., !fb) */
if (WARN_ON(!crtc)) {
- *visible = false;
+ state->visible = false;
return 0;
}
@@ -168,20 +159,20 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
/* Check scaling */
- hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
- vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
+ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
+ vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
DRM_DEBUG_KMS("Invalid scaling of plane\n");
- drm_rect_debug_print("src: ", src, true);
- drm_rect_debug_print("dst: ", dest, false);
+ drm_rect_debug_print("src: ", &state->src, true);
+ drm_rect_debug_print("dst: ", &state->dst, false);
return -ERANGE;
}
- *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+ state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
- if (!*visible)
+ if (!state->visible)
/*
* Plane isn't visible; some drivers can handle this
* so we just return success here. Drivers that can't
@@ -191,15 +182,87 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
*/
return 0;
- if (!can_position && !drm_rect_equals(dest, clip)) {
+ if (!can_position && !drm_rect_equals(dst, clip)) {
DRM_DEBUG_KMS("Plane must cover entire CRTC\n");
- drm_rect_debug_print("dst: ", dest, false);
+ drm_rect_debug_print("dst: ", dst, false);
drm_rect_debug_print("clip: ", clip, false);
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL(drm_plane_helper_check_state);
+
+/**
+ * drm_plane_helper_check_update() - Check plane update for validity
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @src: source coordinates in 16.16 fixed point
+ * @dst: integer destination coordinates
+ * @clip: integer clipping coordinates
+ * @rotation: plane rotation
+ * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
+ * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
+ * @can_position: is it legal to position the plane such that it
+ * doesn't cover the entire crtc? This will generally
+ * only be false for primary planes.
+ * @can_update_disabled: can the plane be updated while the crtc
+ * is disabled?
+ * @visible: output parameter indicating whether plane is still visible after
+ * clipping
+ *
+ * Checks that a desired plane update is valid. Drivers that provide
+ * their own plane handling rather than helper-provided implementations may
+ * still wish to call this function to avoid duplication of error checking
+ * code.
+ *
+ * RETURNS:
+ * Zero if update appears valid, error code on failure
+ */
+int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dst,
+ const struct drm_rect *clip,
+ unsigned int rotation,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
+{
+ struct drm_plane_state state = {
+ .plane = plane,
+ .crtc = crtc,
+ .fb = fb,
+ .src_x = src->x1,
+ .src_y = src->y1,
+ .src_w = drm_rect_width(src),
+ .src_h = drm_rect_height(src),
+ .crtc_x = dst->x1,
+ .crtc_y = dst->y1,
+ .crtc_w = drm_rect_width(dst),
+ .crtc_h = drm_rect_height(dst),
+ .rotation = rotation,
+ .visible = *visible,
+ };
+ int ret;
+
+ ret = drm_plane_helper_check_state(&state, clip,
+ min_scale, max_scale,
+ can_position,
+ can_update_disabled);
+ if (ret)
+ return ret;
+
+ *src = state.src;
+ *dst = state.dst;
+ *visible = state.visible;
+
+ return 0;
+}
EXPORT_SYMBOL(drm_plane_helper_check_update);
/**
@@ -274,7 +337,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
- BIT(DRM_ROTATE_0),
+ DRM_ROTATE_0,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
@@ -363,60 +426,6 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- /*
- * Remove the format_default field from drm_plane when dropping
- * this helper.
- */
- primary->format_default = true;
-
- /* possible_crtc's will be filled in later by crtc_init */
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- safe_modeset_formats,
- ARRAY_SIZE(safe_modeset_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
-
-/**
- * drm_crtc_init - Legacy CRTC initialization function
- * @dev: DRM device
- * @crtc: CRTC object to init
- * @funcs: callbacks for the new CRTC
- *
- * Initialize a CRTC object with a default helper-provided primary plane and no
- * cursor plane.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- const struct drm_crtc_funcs *funcs)
-{
- struct drm_plane *primary;
-
- primary = create_primary_plane(dev);
- return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
- NULL);
-}
-EXPORT_SYMBOL(drm_crtc_init);
-
int drm_plane_helper_commit(struct drm_plane *plane,
struct drm_plane_state *plane_state,
struct drm_framebuffer *old_fb)
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 2c819ef90090..026269851ce9 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -48,8 +48,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
DRM_DEBUG("\n");
dev = drm_dev_alloc(driver, &platdev->dev);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
dev->platformdev = platdev;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 780589b420a4..b22a94dd7b53 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/dma-buf.h>
+#include <linux/rbtree.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
@@ -61,9 +62,11 @@
*/
struct drm_prime_member {
- struct list_head entry;
struct dma_buf *dma_buf;
uint32_t handle;
+
+ struct rb_node dmabuf_rb;
+ struct rb_node handle_rb;
};
struct drm_prime_attachment {
@@ -75,6 +78,7 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
+ struct rb_node **p, *rb;
member = kmalloc(sizeof(*member), GFP_KERNEL);
if (!member)
@@ -83,18 +87,56 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
get_dma_buf(dma_buf);
member->dma_buf = dma_buf;
member->handle = handle;
- list_add(&member->entry, &prime_fpriv->head);
+
+ rb = NULL;
+ p = &prime_fpriv->dmabufs.rb_node;
+ while (*p) {
+ struct drm_prime_member *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
+ if (dma_buf > pos->dma_buf)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&member->dmabuf_rb, rb, p);
+ rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
+
+ rb = NULL;
+ p = &prime_fpriv->handles.rb_node;
+ while (*p) {
+ struct drm_prime_member *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (handle > pos->handle)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&member->handle_rb, rb, p);
+ rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
+
return 0;
}
static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle)
{
- struct drm_prime_member *member;
+ struct rb_node *rb;
+
+ rb = prime_fpriv->handles.rb_node;
+ while (rb) {
+ struct drm_prime_member *member;
- list_for_each_entry(member, &prime_fpriv->head, entry) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
if (member->handle == handle)
return member->dma_buf;
+ else if (member->handle < handle)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
}
return NULL;
@@ -104,14 +146,23 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
struct dma_buf *dma_buf,
uint32_t *handle)
{
- struct drm_prime_member *member;
+ struct rb_node *rb;
+
+ rb = prime_fpriv->dmabufs.rb_node;
+ while (rb) {
+ struct drm_prime_member *member;
- list_for_each_entry(member, &prime_fpriv->head, entry) {
+ member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (member->dma_buf == dma_buf) {
*handle = member->handle;
return 0;
+ } else if (member->dma_buf < dma_buf) {
+ rb = rb->rb_right;
+ } else {
+ rb = rb->rb_left;
}
}
+
return -ENOENT;
}
@@ -166,13 +217,24 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
{
- struct drm_prime_member *member, *safe;
+ struct rb_node *rb;
- list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ rb = prime_fpriv->dmabufs.rb_node;
+ while (rb) {
+ struct drm_prime_member *member;
+
+ member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
if (member->dma_buf == dma_buf) {
+ rb_erase(&member->handle_rb, &prime_fpriv->handles);
+ rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
+
dma_buf_put(dma_buf);
- list_del(&member->entry);
kfree(member);
+ return;
+ } else if (member->dma_buf < dma_buf) {
+ rb = rb->rb_right;
+ } else {
+ rb = rb->rb_left;
}
}
}
@@ -222,18 +284,47 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
}
/**
+ * drm_gem_dmabuf_export - dma_buf export implementation for GEM
+ * @dev: parent device for the exported dmabuf
+ * @exp_info: the export information used by dma_buf_export()
+ *
+ * This wraps dma_buf_export() for use by generic GEM drivers that are using
+ * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
+ * a reference to the drm_device which is released by drm_gem_dmabuf_release().
+ *
+ * Returns the new dmabuf.
+ */
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+ struct dma_buf_export_info *exp_info)
+{
+ struct dma_buf *dma_buf;
+
+ dma_buf = dma_buf_export(exp_info);
+ if (!IS_ERR(dma_buf))
+ drm_dev_ref(dev);
+
+ return dma_buf;
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_export);
+
+/**
* drm_gem_dmabuf_release - dma_buf release implementation for GEM
* @dma_buf: buffer to be released
*
* Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
* must use this in their dma_buf ops structure as the release callback.
+ * drm_gem_dmabuf_release() should be used in conjunction with
+ * drm_gem_dmabuf_export().
*/
void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
/* drop the reference on the export fd holds */
drm_gem_object_unreference_unlocked(obj);
+
+ drm_dev_unref(dev);
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
@@ -335,19 +426,22 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* using the PRIME helpers.
*/
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+ struct drm_gem_object *obj,
+ int flags)
{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &drm_gem_prime_dmabuf_ops;
- exp_info.size = obj->size;
- exp_info.flags = flags;
- exp_info.priv = obj;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &drm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ };
if (dev->driver->gem_prime_res_obj)
exp_info.resv = dev->driver->gem_prime_res_obj(obj);
- return dma_buf_export(&exp_info);
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
EXPORT_SYMBOL(drm_gem_prime_export);
@@ -759,12 +853,13 @@ EXPORT_SYMBOL(drm_prime_gem_destroy);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
- INIT_LIST_HEAD(&prime_fpriv->head);
mutex_init(&prime_fpriv->lock);
+ prime_fpriv->dmabufs = RB_ROOT;
+ prime_fpriv->handles = RB_ROOT;
}
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
/* by now drm_gem_release should've made sure the list is empty */
- WARN_ON(!list_empty(&prime_fpriv->head));
+ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a0df377d7d1c..f6b64d7d3528 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -129,6 +129,7 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
+ unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -141,8 +142,13 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
poll = true;
}
+ if (dev->mode_config.delayed_event) {
+ poll = true;
+ delay = 0;
+ }
+
if (poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+ schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
new file mode 100644
index 000000000000..a4d81cf4ffa0
--- /dev/null
+++ b/drivers/gpu/drm/drm_property.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_property.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * Properties as represented by &drm_property are used to extend the modeset
+ * interface exposed to userspace. For the atomic modeset IOCTL properties are
+ * even the only way to transport metadata about the desired new modeset
+ * configuration from userspace to the kernel. Properties have a well-defined
+ * value range, which is enforced by the drm core. See the documentation of the
+ * flags member of struct &drm_property for an overview of the different
+ * property types and ranges.
+ *
+ * Properties don't store the current value directly, but need to be
+ * instatiated by attaching them to a &drm_mode_object with
+ * drm_object_attach_property().
+ *
+ * Property values are only 64bit. To support bigger piles of data (like gamma
+ * tables, color correction matrizes or large structures) a property can instead
+ * point at a &drm_property_blob with that additional data
+ *
+ * Properties are defined by their symbolic name, userspace must keep a
+ * per-object mapping from those names to the property ID used in the atomic
+ * IOCTL and in the get/set property IOCTL.
+ */
+
+static bool drm_property_type_valid(struct drm_property *property)
+{
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
+/**
+ * drm_property_create - create a new property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values)
+{
+ struct drm_property *property = NULL;
+ int ret;
+
+ property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ property->dev = dev;
+
+ if (num_values) {
+ property->values = kcalloc(num_values, sizeof(uint64_t),
+ GFP_KERNEL);
+ if (!property->values)
+ goto fail;
+ }
+
+ ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+ if (ret)
+ goto fail;
+
+ property->flags = flags;
+ property->num_values = num_values;
+ INIT_LIST_HEAD(&property->enum_list);
+
+ if (name) {
+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
+ property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ }
+
+ list_add_tail(&property->head, &dev->mode_config.property_list);
+
+ WARN_ON(!drm_property_type_valid(property));
+
+ return property;
+fail:
+ kfree(property->values);
+ kfree(property);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+/**
+ * drm_property_create_enum - create a new enumeration property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property values
+ * @num_values: number of pre-defined values
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set one of the predefined values for enumeration
+ * properties.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_ENUM;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+/**
+ * drm_property_create_bitmask - create a new bitmask property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @props: enumeration lists with property bitflags
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
+ *
+ * This creates a new bitmask drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Compared to plain enumeration properties userspace is allowed to set any
+ * or'ed together combination of the predefined property bitflag values
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_props,
+ uint64_t supported_bits)
+{
+ struct drm_property *property;
+ int i, ret, index = 0;
+ int num_values = hweight64(supported_bits);
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+ for (i = 0; i < num_props; i++) {
+ if (!(supported_bits & (1ULL << props[i].type)))
+ continue;
+
+ if (WARN_ON(index >= num_values)) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+
+ ret = drm_property_add_enum(property, index++,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+static struct drm_property *property_create_range(struct drm_device *dev,
+ int flags, const char *name,
+ uint64_t min, uint64_t max)
+{
+ struct drm_property *property;
+
+ property = drm_property_create(dev, flags, name, 2);
+ if (!property)
+ return NULL;
+
+ property->values[0] = min;
+ property->values[1] = max;
+
+ return property;
+}
+
+/**
+ * drm_property_create_range - create a new unsigned ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any unsigned integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_RANGE | flags,
+ name, min, max);
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+/**
+ * drm_property_create_signed_range - create a new signed ranged property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @min: minimum value of the property
+ * @max: maximum value of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is allowed to set any signed integer value in the (min, max)
+ * range inclusive.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max)
+{
+ return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags,
+ name, I642U64(min), I642U64(max));
+}
+EXPORT_SYMBOL(drm_property_create_signed_range);
+
+/**
+ * drm_property_create_object - create a new object property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ * @type: object type from DRM_MODE_OBJECT_* defines
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * Userspace is only allowed to set this to any property value of the given
+ * @type. Only useful for atomic properties, which is enforced.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name,
+ uint32_t type)
+{
+ struct drm_property *property;
+
+ flags |= DRM_MODE_PROP_OBJECT;
+
+ if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC)))
+ return NULL;
+
+ property = drm_property_create(dev, flags, name, 1);
+ if (!property)
+ return NULL;
+
+ property->values[0] = type;
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_object);
+
+/**
+ * drm_property_create_bool - create a new boolean property type
+ * @dev: drm device
+ * @flags: flags specifying the property type
+ * @name: name of the property
+ *
+ * This creates a new generic drm property which can then be attached to a drm
+ * object with drm_object_attach_property. The returned property object must be
+ * freed with drm_property_destroy(), which is done automatically when calling
+ * drm_mode_config_cleanup().
+ *
+ * This is implemented as a ranged property with only {0, 1} as valid values.
+ *
+ * Returns:
+ * A pointer to the newly created property on success, NULL on failure.
+ */
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name)
+{
+ return drm_property_create_range(dev, flags, name, 0, 1);
+}
+EXPORT_SYMBOL(drm_property_create_bool);
+
+/**
+ * drm_property_add_enum - add a possible value to an enumeration property
+ * @property: enumeration property to change
+ * @index: index of the new enumeration
+ * @value: value of the new enumeration
+ * @name: symbolic name of the new enumeration
+ *
+ * This functions adds enumerations to a property.
+ *
+ * It's use is deprecated, drivers should use one of the more specific helpers
+ * to directly create the property with all enumerations already attached.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name)
+{
+ struct drm_property_enum *prop_enum;
+
+ if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) &&
+ (value > 63))
+ return -EINVAL;
+
+ if (!list_empty(&property->enum_list)) {
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
+ if (prop_enum->value == value) {
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ return 0;
+ }
+ }
+ }
+
+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+ if (!prop_enum)
+ return -ENOMEM;
+
+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ prop_enum->value = value;
+
+ property->values[index] = value;
+ list_add_tail(&prop_enum->head, &property->enum_list);
+ return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+/**
+ * drm_property_destroy - destroy a drm property
+ * @dev: drm device
+ * @property: property to destry
+ *
+ * This function frees a property including any attached resources like
+ * enumeration values.
+ */
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+ struct drm_property_enum *prop_enum, *pt;
+
+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
+ list_del(&prop_enum->head);
+ kfree(prop_enum);
+ }
+
+ if (property->num_values)
+ kfree(property->values);
+ drm_mode_object_unregister(dev, &property->base);
+ list_del(&property->head);
+ kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_get_property *out_resp = data;
+ struct drm_property *property;
+ int enum_count = 0;
+ int value_count = 0;
+ int ret = 0, i;
+ int copied;
+ struct drm_property_enum *prop_enum;
+ struct drm_mode_property_enum __user *enum_ptr;
+ uint64_t __user *values_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ property = drm_property_find(dev, out_resp->prop_id);
+ if (!property) {
+ ret = -ENOENT;
+ goto done;
+ }
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ list_for_each_entry(prop_enum, &property->enum_list, head)
+ enum_count++;
+ }
+
+ value_count = property->num_values;
+
+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ out_resp->flags = property->flags;
+
+ if ((out_resp->count_values >= value_count) && value_count) {
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+ for (i = 0; i < value_count; i++) {
+ if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ out_resp->count_values = value_count;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
+ drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ copied = 0;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+ list_for_each_entry(prop_enum, &property->enum_list, head) {
+
+ if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (copy_to_user(&enum_ptr[copied].name,
+ &prop_enum->name, DRM_PROP_NAME_LEN)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ copied++;
+ }
+ }
+ out_resp->count_enum_blobs = enum_count;
+ }
+
+ /*
+ * NOTE: The idea seems to have been to use this to read all the blob
+ * property values. But nothing ever added them to the corresponding
+ * list, userspace always used the special-purpose get_blob ioctl to
+ * read the value for a blob property. It also doesn't make a lot of
+ * sense to return values here when everything else is just metadata for
+ * the property itself.
+ */
+ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+ out_resp->count_enum_blobs = 0;
+done:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static void drm_property_free_blob(struct kref *kref)
+{
+ struct drm_property_blob *blob =
+ container_of(kref, struct drm_property_blob, base.refcount);
+
+ mutex_lock(&blob->dev->mode_config.blob_lock);
+ list_del(&blob->head_global);
+ mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+ drm_mode_object_unregister(blob->dev, &blob->base);
+
+ kfree(blob);
+}
+
+/**
+ * drm_property_create_blob - Create new blob property
+ * @dev: DRM device to create property for
+ * @length: Length to allocate for blob data
+ * @data: If specified, copies data into blob
+ *
+ * Creates a new blob property for a specified DRM device, optionally
+ * copying data. Note that blob properties are meant to be invariant, hence the
+ * data must be filled out before the blob is used as the value of any property.
+ *
+ * Returns:
+ * New blob property with a single reference on success, or an ERR_PTR
+ * value on failure.
+ */
+struct drm_property_blob *
+drm_property_create_blob(struct drm_device *dev, size_t length,
+ const void *data)
+{
+ struct drm_property_blob *blob;
+ int ret;
+
+ if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+ return ERR_PTR(-EINVAL);
+
+ blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ if (!blob)
+ return ERR_PTR(-ENOMEM);
+
+ /* This must be explicitly initialised, so we can safely call list_del
+ * on it in the removal handler, even if it isn't in a file list. */
+ INIT_LIST_HEAD(&blob->head_file);
+ blob->length = length;
+ blob->dev = dev;
+
+ if (data)
+ memcpy(blob->data, data, length);
+
+ ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+ true, drm_property_free_blob);
+ if (ret) {
+ kfree(blob);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&dev->mode_config.blob_lock);
+ list_add_tail(&blob->head_global,
+ &dev->mode_config.property_blob_list);
+ mutex_unlock(&dev->mode_config.blob_lock);
+
+ return blob;
+}
+EXPORT_SYMBOL(drm_property_create_blob);
+
+/**
+ * drm_property_unreference_blob - Unreference a blob property
+ * @blob: Pointer to blob property
+ *
+ * Drop a reference on a blob property. May free the object.
+ */
+void drm_property_unreference_blob(struct drm_property_blob *blob)
+{
+ if (!blob)
+ return;
+
+ drm_mode_object_unreference(&blob->base);
+}
+EXPORT_SYMBOL(drm_property_unreference_blob);
+
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct drm_property_blob *blob, *bt;
+
+ /*
+ * When the file gets released that means no one else can access the
+ * blob list any more, so no need to grab dev->blob_lock.
+ */
+ list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
+ list_del_init(&blob->head_file);
+ drm_property_unreference_blob(blob);
+ }
+}
+
+/**
+ * drm_property_reference_blob - Take a reference on an existing property
+ * @blob: Pointer to blob property
+ *
+ * Take a new reference on an existing blob property. Returns @blob, which
+ * allows this to be used as a shorthand in assignments.
+ */
+struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
+{
+ drm_mode_object_reference(&blob->base);
+ return blob;
+}
+EXPORT_SYMBOL(drm_property_reference_blob);
+
+/**
+ * drm_property_lookup_blob - look up a blob property and take a reference
+ * @dev: drm device
+ * @id: id of the blob property
+ *
+ * If successful, this takes an additional reference to the blob property.
+ * callers need to make sure to eventually unreference the returned property
+ * again, using @drm_property_unreference_blob.
+ *
+ * Return:
+ * NULL on failure, pointer to the blob on success.
+ */
+struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *obj;
+ struct drm_property_blob *blob = NULL;
+
+ obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ if (obj)
+ blob = obj_to_blob(obj);
+ return blob;
+}
+EXPORT_SYMBOL(drm_property_lookup_blob);
+
+/**
+ * drm_property_replace_global_blob - replace existing blob property
+ * @dev: drm device
+ * @replace: location of blob property pointer to be replaced
+ * @length: length of data for new blob, or 0 for no data
+ * @data: content for new blob, or NULL for no data
+ * @obj_holds_id: optional object for property holding blob ID
+ * @prop_holds_id: optional property holding blob ID
+ * @return 0 on success or error on failure
+ *
+ * This function will replace a global property in the blob list, optionally
+ * updating a property which holds the ID of that property.
+ *
+ * If length is 0 or data is NULL, no new blob will be created, and the holding
+ * property, if specified, will be set to 0.
+ *
+ * Access to the replace pointer is assumed to be protected by the caller, e.g.
+ * by holding the relevant modesetting object lock for its parent.
+ *
+ * For example, a drm_connector has a 'PATH' property, which contains the ID
+ * of a blob property with the value of the MST path information. Calling this
+ * function with replace pointing to the connector's path_blob_ptr, length and
+ * data set for the new path information, obj_holds_id set to the connector's
+ * base object, and prop_holds_id set to the path property name, will perform
+ * a completely atomic update. The access to path_blob_ptr is protected by the
+ * caller holding a lock on the connector.
+ */
+int drm_property_replace_global_blob(struct drm_device *dev,
+ struct drm_property_blob **replace,
+ size_t length,
+ const void *data,
+ struct drm_mode_object *obj_holds_id,
+ struct drm_property *prop_holds_id)
+{
+ struct drm_property_blob *new_blob = NULL;
+ struct drm_property_blob *old_blob = NULL;
+ int ret;
+
+ WARN_ON(replace == NULL);
+
+ old_blob = *replace;
+
+ if (length && data) {
+ new_blob = drm_property_create_blob(dev, length, data);
+ if (IS_ERR(new_blob))
+ return PTR_ERR(new_blob);
+ }
+
+ if (obj_holds_id) {
+ ret = drm_object_property_set_value(obj_holds_id,
+ prop_holds_id,
+ new_blob ?
+ new_blob->base.id : 0);
+ if (ret != 0)
+ goto err_created;
+ }
+
+ drm_property_unreference_blob(old_blob);
+ *replace = new_blob;
+
+ return 0;
+
+err_created:
+ drm_property_unreference_blob(new_blob);
+ return ret;
+}
+EXPORT_SYMBOL(drm_property_replace_global_blob);
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_get_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ int ret = 0;
+ void __user *blob_ptr;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+ if (!blob)
+ return -ENOENT;
+
+ if (out_resp->length == blob->length) {
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
+ if (copy_to_user(blob_ptr, blob->data, blob->length)) {
+ ret = -EFAULT;
+ goto unref;
+ }
+ }
+ out_resp->length = blob->length;
+unref:
+ drm_property_unreference_blob(blob);
+
+ return ret;
+}
+
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_blob *out_resp = data;
+ struct drm_property_blob *blob;
+ void __user *blob_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ blob = drm_property_create_blob(dev, out_resp->length, NULL);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
+ if (copy_from_user(blob->data, blob_ptr, out_resp->length)) {
+ ret = -EFAULT;
+ goto out_blob;
+ }
+
+ /* Dropping the lock between create_blob and our access here is safe
+ * as only the same file_priv can remove the blob; at this point, it is
+ * not associated with any file_priv. */
+ mutex_lock(&dev->mode_config.blob_lock);
+ out_resp->blob_id = blob->base.id;
+ list_add_tail(&blob->head_file, &file_priv->blobs);
+ mutex_unlock(&dev->mode_config.blob_lock);
+
+ return 0;
+
+out_blob:
+ drm_property_unreference_blob(blob);
+ return ret;
+}
+
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_blob *out_resp = data;
+ struct drm_property_blob *blob = NULL, *bt;
+ bool found = false;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+ if (!blob)
+ return -ENOENT;
+
+ mutex_lock(&dev->mode_config.blob_lock);
+ /* Ensure the property was actually created by this user. */
+ list_for_each_entry(bt, &file_priv->blobs, head_file) {
+ if (bt == blob) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ /* We must drop head_file here, because we may not be the last
+ * reference on the blob. */
+ list_del_init(&blob->head_file);
+ mutex_unlock(&dev->mode_config.blob_lock);
+
+ /* One reference from lookup, and one from the filp. */
+ drm_property_unreference_blob(blob);
+ drm_property_unreference_blob(blob);
+
+ return 0;
+
+err:
+ mutex_unlock(&dev->mode_config.blob_lock);
+ drm_property_unreference_blob(blob);
+
+ return ret;
+}
+
+/* Some properties could refer to dynamic refcnt'd objects, or things that
+ * need special locking to handle lifetime issues (ie. to ensure the prop
+ * value doesn't become invalid part way through the property update due to
+ * race). The value returned by reference via 'obj' should be passed back
+ * to drm_property_change_valid_put() after the property is set (and the
+ * object to which the property is attached has a chance to take it's own
+ * reference).
+ */
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref)
+{
+ int i;
+
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+
+ *ref = NULL;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) {
+ int64_t svalue = U642I64(value);
+
+ if (svalue < U642I64(property->values[0]) ||
+ svalue > U642I64(property->values[1]))
+ return false;
+ return true;
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
+ uint64_t valid_mask = 0;
+
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
+ struct drm_property_blob *blob;
+
+ if (value == 0)
+ return true;
+
+ blob = drm_property_lookup_blob(property->dev, value);
+ if (blob) {
+ *ref = &blob->base;
+ return true;
+ } else {
+ return false;
+ }
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ /* a zero value for an object property translates to null: */
+ if (value == 0)
+ return true;
+
+ *ref = __drm_mode_object_find(property->dev, value,
+ property->values[0]);
+ return *ref != NULL;
+ }
+
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+}
+
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref)
+{
+ if (!ref)
+ return;
+
+ if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
+ drm_mode_object_unreference(ref);
+ } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
+ drm_property_unreference_blob(obj_to_blob(ref));
+}
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index a8e2c8603945..73e53a8d1b37 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -100,7 +100,7 @@ static int drm_calc_scale(int src, int dst)
{
int scale = 0;
- if (src < 0 || dst < 0)
+ if (WARN_ON(src < 0 || dst < 0))
return -EINVAL;
if (dst == 0)
@@ -317,38 +317,38 @@ void drm_rect_rotate(struct drm_rect *r,
{
struct drm_rect tmp;
- if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
tmp = *r;
- if (rotation & BIT(DRM_REFLECT_X)) {
+ if (rotation & DRM_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
- if (rotation & BIT(DRM_REFLECT_Y)) {
+ if (rotation & DRM_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
switch (rotation & DRM_ROTATE_MASK) {
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
break;
- case BIT(DRM_ROTATE_90):
+ case DRM_ROTATE_90:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = width - tmp.x2;
r->y2 = width - tmp.x1;
break;
- case BIT(DRM_ROTATE_180):
+ case DRM_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_270:
tmp = *r;
r->x1 = height - tmp.y2;
r->x2 = height - tmp.y1;
@@ -392,23 +392,23 @@ void drm_rect_rotate_inv(struct drm_rect *r,
struct drm_rect tmp;
switch (rotation & DRM_ROTATE_MASK) {
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
break;
- case BIT(DRM_ROTATE_90):
+ case DRM_ROTATE_90:
tmp = *r;
r->x1 = width - tmp.y2;
r->x2 = width - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
- case BIT(DRM_ROTATE_180):
+ case DRM_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_270:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
@@ -419,15 +419,15 @@ void drm_rect_rotate_inv(struct drm_rect *r,
break;
}
- if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
tmp = *r;
- if (rotation & BIT(DRM_REFLECT_X)) {
+ if (rotation & DRM_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
- if (rotation & BIT(DRM_REFLECT_Y)) {
+ if (rotation & DRM_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index bf70431073f6..275bca44f38c 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -68,7 +68,7 @@ static void drm_sg_cleanup(struct drm_sg_mem * entry)
void drm_legacy_sg_cleanup(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_core_check_feature(dev, DRIVER_LEGACY)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
@@ -88,7 +88,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_SG))
@@ -201,7 +201,7 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_SG))
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 0db36d27e90b..7bae08c2bf0a 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -34,6 +34,12 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
{
struct drm_simple_display_pipe *pipe;
@@ -57,6 +63,7 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+ .atomic_check = drm_simple_kms_crtc_check,
.disable = drm_simple_kms_crtc_disable,
.enable = drm_simple_kms_crtc_enable,
};
@@ -73,22 +80,9 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
- struct drm_rect src = {
- .x1 = plane_state->src_x,
- .y1 = plane_state->src_y,
- .x2 = plane_state->src_x + plane_state->src_w,
- .y2 = plane_state->src_y + plane_state->src_h,
- };
- struct drm_rect dest = {
- .x1 = plane_state->crtc_x,
- .y1 = plane_state->crtc_y,
- .x2 = plane_state->crtc_x + plane_state->crtc_w,
- .y2 = plane_state->crtc_y + plane_state->crtc_h,
- };
struct drm_rect clip = { 0 };
struct drm_simple_display_pipe *pipe;
struct drm_crtc_state *crtc_state;
- bool visible;
int ret;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
@@ -102,17 +96,15 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_update(plane, &pipe->crtc,
- plane_state->fb,
- &src, &dest, &clip,
- plane_state->rotation,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &visible);
+
+ ret = drm_plane_helper_check_state(plane_state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
if (ret)
return ret;
- if (!visible)
+ if (!plane_state->visible)
return -EINVAL;
if (!pipe->funcs || !pipe->funcs->check)
@@ -133,7 +125,33 @@ static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
pipe->funcs->update(pipe, pstate);
}
+static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->prepare_fb)
+ return 0;
+
+ return pipe->funcs->prepare_fb(pipe, state);
+}
+
+static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->cleanup_fb)
+ return;
+
+ pipe->funcs->cleanup_fb(pipe, state);
+}
+
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
+ .prepare_fb = drm_simple_kms_plane_prepare_fb,
+ .cleanup_fb = drm_simple_kms_plane_cleanup_fb,
.atomic_check = drm_simple_kms_plane_atomic_check,
.atomic_update = drm_simple_kms_plane_atomic_update,
};
@@ -148,16 +166,61 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
};
/**
+ * drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe
+ * @pipe: simple display pipe object
+ * @bridge: bridge to attach
+ *
+ * Makes it possible to still use the drm_simple_display_pipe helpers when
+ * a DRM bridge has to be used.
+ *
+ * Note that you probably want to initialize the pipe by passing a NULL
+ * connector to drm_simple_display_pipe_init().
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
+ struct drm_bridge *bridge)
+{
+ bridge->encoder = &pipe->encoder;
+ pipe->encoder.bridge = bridge;
+ return drm_bridge_attach(pipe->encoder.dev, bridge);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
+
+/**
+ * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe
+ * @pipe: simple display pipe object
+ *
+ * Detaches the drm bridge previously attached with
+ * drm_simple_display_pipe_attach_bridge()
+ */
+void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe)
+{
+ if (WARN_ON(!pipe->encoder.bridge))
+ return;
+
+ drm_bridge_detach(pipe->encoder.bridge);
+ pipe->encoder.bridge = NULL;
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge);
+
+/**
* drm_simple_display_pipe_init - Initialize a simple display pipeline
* @dev: DRM device
* @pipe: simple display pipe object to initialize
* @funcs: callbacks for the display pipe (optional)
- * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
- * @connector: connector to attach and register
+ * @connector: connector to attach and register (optional)
*
* Sets up a display pipeline which consist of a really simple
- * plane-crtc-encoder pipe coupled with the provided connector.
+ * plane-crtc-encoder pipe.
+ *
+ * If a connector is supplied, the pipe will be coupled with the provided
+ * connector. You may supply a NULL connector when using drm bridges, that
+ * handle connectors themselves (see drm_simple_display_pipe_attach_bridge()).
+ *
* Teardown of a simple display pipe is all handled automatically by the drm
* core through calling drm_mode_config_cleanup(). Drivers afterwards need to
* release the memory for the structure themselves.
@@ -196,7 +259,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- if (ret)
+ if (ret || !connector)
return ret;
return drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 32dd821b7202..9a37196c1bf1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -19,7 +19,6 @@
#include <linux/export.h>
#include <drm/drm_sysfs.h>
-#include <drm/drm_core.h>
#include <drm/drmP.h>
#include "drm_internal.h"
@@ -37,12 +36,7 @@ static char *drm_devnode(struct device *dev, umode_t *mode)
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
-static CLASS_ATTR_STRING(version, S_IRUGO,
- CORE_NAME " "
- __stringify(CORE_MAJOR) "."
- __stringify(CORE_MINOR) "."
- __stringify(CORE_PATCHLEVEL) " "
- CORE_DATE);
+static CLASS_ATTR_STRING(version, S_IRUGO, "drm 1.1.0 20060810");
/**
* drm_sysfs_init - initialize sysfs helpers
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index f306c8855978..20cc33d1bfc1 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -25,7 +25,6 @@
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <drm/drm_vma_manager.h>
-#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
@@ -86,7 +85,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size)
{
rwlock_init(&mgr->vm_lock);
- mgr->vm_addr_space_rb = RB_ROOT;
drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
EXPORT_SYMBOL(drm_vma_offset_manager_init);
@@ -145,16 +143,16 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
unsigned long start,
unsigned long pages)
{
- struct drm_vma_offset_node *node, *best;
+ struct drm_mm_node *node, *best;
struct rb_node *iter;
unsigned long offset;
- iter = mgr->vm_addr_space_rb.rb_node;
+ iter = mgr->vm_addr_space_mm.interval_tree.rb_node;
best = NULL;
while (likely(iter)) {
- node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
- offset = node->vm_node.start;
+ node = rb_entry(iter, struct drm_mm_node, rb);
+ offset = node->start;
if (start >= offset) {
iter = iter->rb_right;
best = node;
@@ -167,38 +165,17 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m
/* verify that the node spans the requested area */
if (best) {
- offset = best->vm_node.start + best->vm_node.size;
+ offset = best->start + best->size;
if (offset < start + pages)
best = NULL;
}
- return best;
-}
-EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
-
-/* internal helper to link @node into the rb-tree */
-static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
- struct drm_vma_offset_node *node)
-{
- struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct drm_vma_offset_node *iter_node;
-
- while (likely(*iter)) {
- parent = *iter;
- iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+ if (!best)
+ return NULL;
- if (node->vm_node.start < iter_node->vm_node.start)
- iter = &(*iter)->rb_left;
- else if (node->vm_node.start > iter_node->vm_node.start)
- iter = &(*iter)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&node->vm_rb, parent, iter);
- rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+ return container_of(best, struct drm_vma_offset_node, vm_node);
}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
/**
* drm_vma_offset_add() - Add offset node to manager
@@ -240,8 +217,6 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
if (ret)
goto out_unlock;
- _drm_vma_offset_add_rb(mgr, node);
-
out_unlock:
write_unlock(&mgr->vm_lock);
return ret;
@@ -265,7 +240,6 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
write_lock(&mgr->vm_lock);
if (drm_mm_node_allocated(&node->vm_node)) {
- rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
drm_mm_remove_node(&node->vm_node);
memset(&node->vm_node, 0, sizeof(node->vm_node));
}
@@ -277,9 +251,9 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
- * @filp: Open file to add
+ * @tag: Tag of file to remove
*
- * Add @filp to the list of allowed open-files for this node. If @filp is
+ * Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
@@ -294,7 +268,7 @@ EXPORT_SYMBOL(drm_vma_offset_remove);
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
-int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{
struct rb_node **iter;
struct rb_node *parent = NULL;
@@ -315,10 +289,10 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
parent = *iter;
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp) {
+ if (tag == entry->vm_tag) {
entry->vm_count++;
goto unlock;
- } else if (filp > entry->vm_filp) {
+ } else if (tag > entry->vm_tag) {
iter = &(*iter)->rb_right;
} else {
iter = &(*iter)->rb_left;
@@ -330,7 +304,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
goto unlock;
}
- new->vm_filp = filp;
+ new->vm_tag = tag;
new->vm_count = 1;
rb_link_node(&new->vm_rb, parent, iter);
rb_insert_color(&new->vm_rb, &node->vm_files);
@@ -346,17 +320,18 @@ EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify
- * @filp: Open file to remove
+ * @tag: Tag of file to remove
*
- * Decrement the ref-count of @filp in the list of allowed open-files on @node.
- * If the ref-count drops to zero, remove @filp from the list. You must call
- * this once for every drm_vma_node_allow() on @filp.
+ * Decrement the ref-count of @tag in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @tag from the list. You must call
+ * this once for every drm_vma_node_allow() on @tag.
*
* This is locked against concurrent access internally.
*
- * If @filp is not on the list, nothing is done.
+ * If @tag is not on the list, nothing is done.
*/
-void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+void drm_vma_node_revoke(struct drm_vma_offset_node *node,
+ struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
@@ -366,13 +341,13 @@ void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp) {
+ if (tag == entry->vm_tag) {
if (!--entry->vm_count) {
rb_erase(&entry->vm_rb, &node->vm_files);
kfree(entry);
}
break;
- } else if (filp > entry->vm_filp) {
+ } else if (tag > entry->vm_tag) {
iter = iter->rb_right;
} else {
iter = iter->rb_left;
@@ -386,9 +361,9 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
/**
* drm_vma_node_is_allowed - Check whether an open-file is granted access
* @node: Node to check
- * @filp: Open-file to check for
+ * @tag: Tag of file to remove
*
- * Search the list in @node whether @filp is currently on the list of allowed
+ * Search the list in @node whether @tag is currently on the list of allowed
* open-files (see drm_vma_node_allow()).
*
* This is locked against concurrent access internally.
@@ -397,7 +372,7 @@ EXPORT_SYMBOL(drm_vma_node_revoke);
* true iff @filp is on the list
*/
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
- struct file *filp)
+ struct drm_file *tag)
{
struct drm_vma_offset_file *entry;
struct rb_node *iter;
@@ -407,9 +382,9 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
iter = node->vm_files.rb_node;
while (likely(iter)) {
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
- if (filp == entry->vm_filp)
+ if (tag == entry->vm_tag)
break;
- else if (filp > entry->vm_filp)
+ else if (tag > entry->vm_tag)
iter = iter->rb_right;
else
iter = iter->rb_left;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index d8d556457427..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -21,6 +21,7 @@
#include "common.xml.h"
#include "state.xml.h"
+#include "state_hi.xml.h"
#include "state_3d.xml.h"
#include "cmdstream.xml.h"
@@ -117,11 +118,6 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
VIVS_GL_PIPE_SELECT_PIPE(pipe));
}
-static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
-{
- return buf->paddr - gpu->memory_base;
-}
-
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
struct etnaviv_cmdbuf *buf, u32 off, u32 len)
{
@@ -129,7 +125,7 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
- ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
+ ptr, etnaviv_iommu_get_cmdbuf_va(gpu, buf) + off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
@@ -162,7 +158,7 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
- return gpu_va(gpu, buffer) + buffer->user_size;
+ return etnaviv_iommu_get_cmdbuf_va(gpu, buffer) + buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -173,7 +169,41 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
+
+ return buffer->user_size / 8;
+}
+
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+
+ buffer->user_size = 0;
+
+ if (gpu->identity.features & chipFeatures_PIPE_3D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ }
+
+ if (gpu->identity.features & chipFeatures_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
+ VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ }
+
+ CMD_END(buffer);
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
return buffer->user_size / 8;
}
@@ -231,7 +261,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
- link_target = gpu_va(gpu, cmdbuf);
+ link_target = etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf);
link_dwords = cmdbuf->size / 8;
/*
@@ -246,8 +276,12 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush)
- extra_dwords += 1;
+ if (gpu->mmu->need_flush) {
+ if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+ extra_dwords += 1;
+ else
+ extra_dwords += 3;
+ }
/* pipe switch commands */
if (gpu->switch_context)
@@ -257,12 +291,23 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (gpu->mmu->need_flush) {
/* Add the MMU flush */
- CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
- VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
- VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
- VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
- VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
- VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+ if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
+ VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
+ VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
+ VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+ VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
+ VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE,
+ SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE,
+ SYNC_RECIPIENT_PE);
+ }
gpu->mmu->need_flush = false;
}
@@ -284,24 +329,38 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
- * We need three dwords: event, wait, link.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
- return_dwords = 3;
+ return_dwords = 7;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
- * Append event, wait and link pointing back to the wait
- * command to the ring buffer.
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
*/
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, return_target + 8);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
- return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
+ return_target, etnaviv_iommu_get_cmdbuf_va(gpu, cmdbuf), cmdbuf->vaddr);
if (drm_debug & DRM_UT_DRIVER) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ffd1b32caa8d..aa687669e22b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,8 +488,7 @@ static const struct file_operations fops = {
};
static struct drm_driver etnaviv_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ |
- DRIVER_GEM |
+ .driver_features = DRIVER_GEM |
DRIVER_PRIME |
DRIVER_RENDER,
.open = etnaviv_open,
@@ -530,10 +529,8 @@ static int etnaviv_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
-
- drm->platformdev = to_platform_device(dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 115c5bc6d7c8..65e057639653 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -96,6 +96,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 4a29eeadbf1e..2bef501d4a17 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -175,11 +175,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
- gpu->buffer->size, gpu->buffer->paddr);
+ gpu->buffer->size,
+ etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer));
list_for_each_entry(cmd, &gpu->active_cmd_list, node)
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
- cmd->size, cmd->paddr);
+ cmd->size,
+ etnaviv_iommu_get_cmdbuf_va(gpu, cmd));
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
+ unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec)
return ERR_PTR(-ENOMEM);
+ if (!etnaviv_obj->userptr.ro)
+ flags |= FOLL_WRITE;
+
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ flags, pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b382cf505262..b1254f885fed 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -22,8 +22,6 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
-#include "etnaviv_iommu_v2.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@@ -329,6 +327,18 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
gpu->identity.revision = 0x1051;
}
}
+
+ /*
+ * NXP likes to call the GPU on the i.MX6QP GC2000+, but in
+ * reality it's just a re-branded GC3000. We can identify this
+ * core by the upper half of the revision register being all 1.
+ * Fix model/rev here, so all other places can refer to this
+ * core by its real identity.
+ */
+ if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
+ gpu->identity.model = chipModel_GC3000;
+ gpu->identity.revision &= 0xffff;
+ }
}
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -528,6 +538,14 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
}
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
+{
+ gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
+ gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
+ VIVS_FE_COMMAND_CONTROL_ENABLE |
+ VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+}
+
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
@@ -568,33 +586,20 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
- /* set base addresses */
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
-
- /* setup the MMU page table pointers */
- etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
+ /* setup the MMU */
+ etnaviv_iommu_restore(gpu);
/* Start command processor */
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
- gpu->buffer->paddr - gpu->memory_base);
- gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
- VIVS_FE_COMMAND_CONTROL_ENABLE |
- VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+ etnaviv_gpu_start_fe(gpu, etnaviv_iommu_get_cmdbuf_va(gpu, gpu->buffer),
+ prefetch);
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
int ret, i;
- struct iommu_domain *iommu;
- enum etnaviv_iommu_version version;
- bool mmuv2;
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
@@ -642,32 +647,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
- dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
-
- if (!mmuv2) {
- iommu = etnaviv_iommu_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V1;
- } else {
- iommu = etnaviv_iommu_v2_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V2;
- }
-
- if (!iommu) {
- dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
- if (!gpu->mmu) {
+ gpu->mmu = etnaviv_iommu_new(gpu);
+ if (IS_ERR(gpu->mmu)) {
dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
- iommu_domain_free(iommu);
- ret = -ENOMEM;
+ ret = PTR_ERR(gpu->mmu);
goto fail;
}
@@ -678,7 +661,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
- if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
+
+ if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
+ gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@@ -868,45 +853,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
#endif
/*
- * Power Management:
- */
-static int enable_clk(struct etnaviv_gpu *gpu)
-{
- if (gpu->clk_core)
- clk_prepare_enable(gpu->clk_core);
- if (gpu->clk_shader)
- clk_prepare_enable(gpu->clk_shader);
-
- return 0;
-}
-
-static int disable_clk(struct etnaviv_gpu *gpu)
-{
- if (gpu->clk_core)
- clk_disable_unprepare(gpu->clk_core);
- if (gpu->clk_shader)
- clk_disable_unprepare(gpu->clk_shader);
-
- return 0;
-}
-
-static int enable_axi(struct etnaviv_gpu *gpu)
-{
- if (gpu->clk_bus)
- clk_prepare_enable(gpu->clk_bus);
-
- return 0;
-}
-
-static int disable_axi(struct etnaviv_gpu *gpu)
-{
- if (gpu->clk_bus)
- clk_disable_unprepare(gpu->clk_bus);
-
- return 0;
-}
-
-/*
* Hangcheck detection for locked gpu:
*/
static void recover_worker(struct work_struct *work)
@@ -945,7 +891,7 @@ static void recover_worker(struct work_struct *work)
gpu->completed_fence = gpu->active_fence;
etnaviv_gpu_hw_init(gpu);
- gpu->switch_context = true;
+ gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1178,6 +1124,9 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
if (!cmdbuf)
return NULL;
+ if (gpu->mmu->version == ETNAVIV_IOMMU_V2)
+ size = ALIGN(size, SZ_4K);
+
cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
GFP_KERNEL);
if (!cmdbuf->vaddr) {
@@ -1193,6 +1142,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
+ etnaviv_iommu_put_cmdbuf_va(cmdbuf->gpu, cmdbuf);
dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
cmdbuf->paddr);
kfree(cmdbuf);
@@ -1425,6 +1375,21 @@ static irqreturn_t irq_handler(int irq, void *data)
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
}
+ if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
+ int i;
+
+ dev_err_ratelimited(gpu->dev,
+ "MMU fault status 0x%08x\n",
+ gpu_read(gpu, VIVS_MMUv2_STATUS));
+ for (i = 0; i < 4; i++) {
+ dev_err_ratelimited(gpu->dev,
+ "MMU %d fault addr 0x%08x\n",
+ i, gpu_read(gpu,
+ VIVS_MMUv2_EXCEPTION_ADDR(i)));
+ }
+ intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
+ }
+
while ((event = ffs(intr)) != 0) {
struct fence *fence;
@@ -1466,39 +1431,72 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
{
int ret;
- ret = enable_clk(gpu);
- if (ret)
- return ret;
+ if (gpu->clk_bus) {
+ ret = clk_prepare_enable(gpu->clk_bus);
+ if (ret)
+ return ret;
+ }
- ret = enable_axi(gpu);
- if (ret) {
- disable_clk(gpu);
- return ret;
+ if (gpu->clk_core) {
+ ret = clk_prepare_enable(gpu->clk_core);
+ if (ret)
+ goto disable_clk_bus;
+ }
+
+ if (gpu->clk_shader) {
+ ret = clk_prepare_enable(gpu->clk_shader);
+ if (ret)
+ goto disable_clk_core;
}
return 0;
+
+disable_clk_core:
+ if (gpu->clk_core)
+ clk_disable_unprepare(gpu->clk_core);
+disable_clk_bus:
+ if (gpu->clk_bus)
+ clk_disable_unprepare(gpu->clk_bus);
+
+ return ret;
}
static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
{
- int ret;
+ if (gpu->clk_shader)
+ clk_disable_unprepare(gpu->clk_shader);
+ if (gpu->clk_core)
+ clk_disable_unprepare(gpu->clk_core);
+ if (gpu->clk_bus)
+ clk_disable_unprepare(gpu->clk_bus);
- ret = disable_axi(gpu);
- if (ret)
- return ret;
+ return 0;
+}
- ret = disable_clk(gpu);
- if (ret)
- return ret;
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- return 0;
+ do {
+ u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+
+ if ((idle & gpu->idle_mask) == gpu->idle_mask)
+ return 0;
+
+ if (time_is_before_jiffies(timeout)) {
+ dev_warn(gpu->dev,
+ "timed out waiting for idle: idle=0x%x\n",
+ idle);
+ return -ETIMEDOUT;
+ }
+
+ udelay(5);
+ } while (1);
}
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
if (gpu->buffer) {
- unsigned long timeout;
-
/* Replace the last WAIT with END */
etnaviv_buffer_end(gpu);
@@ -1507,22 +1505,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
* happen quickly (as the WAIT is only 200 cycles). If
* we fail, just warn and continue.
*/
- timeout = jiffies + msecs_to_jiffies(100);
- do {
- u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
-
- if ((idle & gpu->idle_mask) == gpu->idle_mask)
- break;
-
- if (time_is_before_jiffies(timeout)) {
- dev_warn(gpu->dev,
- "timed out waiting for idle: idle=0x%x\n",
- idle);
- break;
- }
-
- udelay(5);
- } while (1);
+ etnaviv_gpu_wait_idle(gpu, 100);
}
return etnaviv_gpu_clk_disable(gpu);
@@ -1634,7 +1617,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- int err = 0;
+ int err;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
if (!gpu)
@@ -1651,16 +1634,15 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
- err = gpu->irq;
- dev_err(dev, "failed to get irq: %d\n", err);
- goto fail;
+ dev_err(dev, "failed to get irq: %d\n", gpu->irq);
+ return gpu->irq;
}
err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
dev_name(gpu->dev), gpu);
if (err) {
dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
- goto fail;
+ return err;
}
/* Get Clocks: */
@@ -1694,13 +1676,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
err = component_add(&pdev->dev, &gpu_ops);
if (err < 0) {
dev_err(&pdev->dev, "failed to register component: %d\n", err);
- goto fail;
+ return err;
}
return 0;
-
-fail:
- return err;
}
static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index a69cdd526bf8..73c278dc3706 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -160,6 +160,8 @@ struct etnaviv_cmdbuf {
dma_addr_t paddr;
u32 size;
u32 user_size;
+ /* vram node used if the cmdbuf is mapped through the MMUv2 */
+ struct drm_mm_node vram_node;
/* fence after which this buffer is to be disposed */
struct fence *fence;
/* target exec state */
@@ -214,6 +216,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
+int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
+void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
extern struct platform_driver etnaviv_gpu_driver;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 16353ee81651..81f1583a7946 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -196,12 +196,19 @@ static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
.dump = etnaviv_iommuv1_dump,
};
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
- struct iommu_domain *domain)
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommu_domain *etnaviv_domain =
+ to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable;
+ /* set base addresses */
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+
/* set page table address in MC */
pgtable = (u32)etnaviv_domain->pgtable.paddr;
@@ -212,7 +219,7 @@ void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommu_domain *etnaviv_domain;
int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index cf45503f6b6f..8b51e7c16feb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -17,12 +17,12 @@
#ifndef __ETNAVIV_IOMMU_H__
#define __ETNAVIV_IOMMU_H__
-#include <linux/iommu.h>
struct etnaviv_gpu;
-struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
- struct iommu_domain *domain);
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
+struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
+
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index fbb4aed3dc80..7e9c4d210a84 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
+ * Copyright (C) 2016 Etnaviv Project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -22,12 +22,267 @@
#include <linux/bitops.h>
#include "etnaviv_gpu.h"
+#include "etnaviv_mmu.h"
#include "etnaviv_iommu.h"
+#include "state.xml.h"
#include "state_hi.xml.h"
+#define MMUv2_PTE_PRESENT BIT(0)
+#define MMUv2_PTE_EXCEPTION BIT(1)
+#define MMUv2_PTE_WRITEABLE BIT(2)
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
+#define MMUv2_MTLB_MASK 0xffc00000
+#define MMUv2_MTLB_SHIFT 22
+#define MMUv2_STLB_MASK 0x003ff000
+#define MMUv2_STLB_SHIFT 12
+
+#define MMUv2_MAX_STLB_ENTRIES 1024
+
+struct etnaviv_iommuv2_domain {
+ struct iommu_domain domain;
+ struct device *dev;
+ void *bad_page_cpu;
+ dma_addr_t bad_page_dma;
+ /* M(aster) TLB aka first level pagetable */
+ u32 *mtlb_cpu;
+ dma_addr_t mtlb_dma;
+ /* S(lave) TLB aka second level pagetable */
+ u32 *stlb_cpu[1024];
+ dma_addr_t stlb_dma[1024];
+};
+
+static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
+{
+ return container_of(domain, struct etnaviv_iommuv2_domain, domain);
+}
+
+static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ int mtlb_entry, stlb_entry;
+ u32 entry = (u32)paddr | MMUv2_PTE_PRESENT;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ if (prot & IOMMU_WRITE)
+ entry |= MMUv2_PTE_WRITEABLE;
+
+ mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+ stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+ etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+
+ return 0;
+}
+
+static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ int mtlb_entry, stlb_entry;
+
+ if (size != SZ_4K)
+ return -EINVAL;
+
+ mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+ stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+ etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
+
+ return SZ_4K;
+}
+
+static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ int mtlb_entry, stlb_entry;
+
+ mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
+ stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
+
+ return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
+}
+
+static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
+{
+ u32 *p;
+ int ret, i, j;
+
+ /* allocate scratch page */
+ etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+ SZ_4K,
+ &etnaviv_domain->bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->bad_page_cpu) {
+ ret = -ENOMEM;
+ goto fail_mem;
+ }
+ p = etnaviv_domain->bad_page_cpu;
+ for (i = 0; i < SZ_4K / 4; i++)
+ *p++ = 0xdead55aa;
+
+ etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+ SZ_4K,
+ &etnaviv_domain->mtlb_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->mtlb_cpu) {
+ ret = -ENOMEM;
+ goto fail_mem;
+ }
+
+ /* pre-populate STLB pages (may want to switch to on-demand later) */
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ etnaviv_domain->stlb_cpu[i] =
+ dma_alloc_coherent(etnaviv_domain->dev,
+ SZ_4K,
+ &etnaviv_domain->stlb_dma[i],
+ GFP_KERNEL);
+ if (!etnaviv_domain->stlb_cpu[i]) {
+ ret = -ENOMEM;
+ goto fail_mem;
+ }
+ p = etnaviv_domain->stlb_cpu[i];
+ for (j = 0; j < SZ_4K / 4; j++)
+ *p++ = MMUv2_PTE_EXCEPTION;
+
+ etnaviv_domain->mtlb_cpu[i] = etnaviv_domain->stlb_dma[i] |
+ MMUv2_PTE_PRESENT;
+ }
+
+ return 0;
+
+fail_mem:
+ if (etnaviv_domain->bad_page_cpu)
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->bad_page_cpu,
+ etnaviv_domain->bad_page_dma);
+
+ if (etnaviv_domain->mtlb_cpu)
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->mtlb_cpu,
+ etnaviv_domain->mtlb_dma);
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ if (etnaviv_domain->stlb_cpu[i])
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->stlb_cpu[i],
+ etnaviv_domain->stlb_dma[i]);
+ }
+
+ return ret;
+}
+
+static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ int i;
+
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->bad_page_cpu,
+ etnaviv_domain->bad_page_dma);
+
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->mtlb_cpu,
+ etnaviv_domain->mtlb_dma);
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ if (etnaviv_domain->stlb_cpu[i])
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ etnaviv_domain->stlb_cpu[i],
+ etnaviv_domain->stlb_dma[i]);
+ }
+
+ vfree(etnaviv_domain);
+}
+
+static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ size_t dump_size = SZ_4K;
+ int i;
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+ if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ dump_size += SZ_4K;
+
+ return dump_size;
+}
+
+static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
{
- /* TODO */
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ int i;
+
+ memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+ buf += SZ_4K;
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
+ if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+}
+
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
+ .ops = {
+ .domain_free = etnaviv_iommuv2_domain_free,
+ .map = etnaviv_iommuv2_map,
+ .unmap = etnaviv_iommuv2_unmap,
+ .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
+ .pgsize_bitmap = SZ_4K,
+ },
+ .dump_size = etnaviv_iommuv2_dump_size,
+ .dump = etnaviv_iommuv2_dump,
+};
+
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(gpu->mmu->domain);
+ u16 prefetch;
+
+ /* If the MMU is already enabled the state is still there. */
+ if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
+ return;
+
+ prefetch = etnaviv_buffer_config_mmuv2(gpu,
+ (u32)etnaviv_domain->mtlb_dma,
+ (u32)etnaviv_domain->bad_page_dma);
+ etnaviv_gpu_start_fe(gpu, gpu->buffer->paddr, prefetch);
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
+}
+struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain;
+ int ret;
+
+ etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
+ if (!etnaviv_domain)
+ return NULL;
+
+ etnaviv_domain->dev = gpu->dev;
+
+ etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
+ etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+ etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
+ etnaviv_domain->domain.geometry.aperture_start = 0;
+ etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
+
+ ret = etnaviv_iommuv2_init(etnaviv_domain);
+ if (ret)
+ goto out_free;
+
+ return &etnaviv_domain->domain;
+
+out_free:
+ vfree(etnaviv_domain);
return NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
deleted file mode 100644
index 603ea41c5389..000000000000
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ETNAVIV_IOMMU_V2_H__
-#define __ETNAVIV_IOMMU_V2_H__
-
-#include <linux/iommu.h>
-struct etnaviv_gpu;
-
-struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
-
-#endif /* __ETNAVIV_IOMMU_V2_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 29a723fabc17..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -14,9 +14,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include "common.xml.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
+#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
@@ -101,40 +103,21 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
drm_mm_remove_node(&mapping->vram_node);
}
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
- struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
- struct etnaviv_vram_mapping *mapping)
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+ struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
- struct sg_table *sgt = etnaviv_obj->sgt;
- struct drm_mm_node *node;
int ret;
- lockdep_assert_held(&etnaviv_obj->lock);
-
- mutex_lock(&mmu->lock);
-
- /* v1 MMU can optimize single entry (contiguous) scatterlists */
- if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
- u32 iova;
-
- iova = sg_dma_address(sgt->sgl) - memory_base;
- if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
- mapping->iova = iova;
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mutex_unlock(&mmu->lock);
- return 0;
- }
- }
+ lockdep_assert_held(&mmu->lock);
- node = &mapping->vram_node;
while (1) {
struct etnaviv_vram_mapping *m, *n;
struct list_head list;
bool found;
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
- etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
+ size, 0, mmu->last_iova, ~0UL,
DRM_MM_SEARCH_DEFAULT);
if (ret != -ENOSPC)
@@ -151,7 +134,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
}
/* Try to retire some entries */
- drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
+ drm_mm_init_scan(&mmu->mm, size, 0, 0);
found = 0;
INIT_LIST_HEAD(&list);
@@ -212,6 +195,38 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
mmu->need_flush = true;
}
+ return ret;
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping)
+{
+ struct sg_table *sgt = etnaviv_obj->sgt;
+ struct drm_mm_node *node;
+ int ret;
+
+ lockdep_assert_held(&etnaviv_obj->lock);
+
+ mutex_lock(&mmu->lock);
+
+ /* v1 MMU can optimize single entry (contiguous) scatterlists */
+ if (mmu->version == ETNAVIV_IOMMU_V1 &&
+ sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
+ u32 iova;
+
+ iova = sg_dma_address(sgt->sgl) - memory_base;
+ if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
+ mapping->iova = iova;
+ list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ mutex_unlock(&mmu->lock);
+ return 0;
+ }
+ }
+
+ node = &mapping->vram_node;
+
+ ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return ret;
@@ -256,30 +271,103 @@ void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
kfree(mmu);
}
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
- struct iommu_domain *domain, enum etnaviv_iommu_version version)
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
{
+ enum etnaviv_iommu_version version;
struct etnaviv_iommu *mmu;
mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
if (!mmu)
return ERR_PTR(-ENOMEM);
- mmu->domain = domain;
+ if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
+ mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
+ version = ETNAVIV_IOMMU_V1;
+ } else {
+ mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
+ version = ETNAVIV_IOMMU_V2;
+ }
+
+ if (!mmu->domain) {
+ dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
+ kfree(mmu);
+ return ERR_PTR(-ENOMEM);
+ }
+
mmu->gpu = gpu;
mmu->version = version;
mutex_init(&mmu->lock);
INIT_LIST_HEAD(&mmu->mappings);
- drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
- domain->geometry.aperture_end -
- domain->geometry.aperture_start + 1);
+ drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
+ mmu->domain->geometry.aperture_end -
+ mmu->domain->geometry.aperture_start + 1);
- iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
+ iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
return mmu;
}
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+{
+ if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+ etnaviv_iommuv1_restore(gpu);
+ else
+ etnaviv_iommuv2_restore(gpu);
+}
+
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf)
+{
+ struct etnaviv_iommu *mmu = gpu->mmu;
+
+ if (mmu->version == ETNAVIV_IOMMU_V1) {
+ return buf->paddr - gpu->memory_base;
+ } else {
+ int ret;
+
+ if (buf->vram_node.allocated)
+ return (u32)buf->vram_node.start;
+
+ mutex_lock(&mmu->lock);
+ ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+ buf->size + SZ_64K);
+ if (ret < 0) {
+ mutex_unlock(&mmu->lock);
+ return 0;
+ }
+ ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
+ buf->size, IOMMU_READ);
+ if (ret < 0) {
+ drm_mm_remove_node(&buf->vram_node);
+ mutex_unlock(&mmu->lock);
+ return 0;
+ }
+ /*
+ * At least on GC3000 the FE MMU doesn't properly flush old TLB
+ * entries. Make sure to space the command buffers out in a way
+ * that the FE MMU prefetch won't load invalid entries.
+ */
+ mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
+ gpu->mmu->need_flush = true;
+ mutex_unlock(&mmu->lock);
+
+ return (u32)buf->vram_node.start;
+ }
+}
+
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf)
+{
+ struct etnaviv_iommu *mmu = gpu->mmu;
+
+ if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
+ mutex_lock(&mmu->lock);
+ iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
+ drm_mm_remove_node(&buf->vram_node);
+ mutex_unlock(&mmu->lock);
+ }
+}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
struct etnaviv_iommu_ops *ops;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index fff215a47630..e787e49c9693 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -62,10 +62,15 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
+u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf);
+void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
+ struct etnaviv_cmdbuf *buf);
+
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
- struct iommu_domain *domain, enum etnaviv_iommu_version version);
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 807a3d9e0dd5..43c73e2ed34f 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,10 +8,10 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53)
-- common.xml ( 18437 bytes, from 2015-12-12 09:02:53)
+- state_hi.xml ( 25620 bytes, from 2016-08-19 22:07:37)
+- common.xml ( 20583 bytes, from 2016-06-07 05:22:38)
-Copyright (C) 2015
+Copyright (C) 2016
*/
@@ -78,9 +78,10 @@ Copyright (C) 2015
#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
-#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x3fffffff
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
+#define VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION 0x40000000
#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
#define VIVS_HI_INTR_ENBL 0x00000014
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83f61c513b7e..465d344f3391 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -38,7 +38,6 @@ config DRM_EXYNOS7_DECON
config DRM_EXYNOS_MIXER
bool "Mixer"
- depends on !VIDEO_SAMSUNG_S5P_TV
help
Choose this option if you want to use Exynos Mixer for DRM.
@@ -77,7 +76,7 @@ config DRM_EXYNOS_DP
config DRM_EXYNOS_HDMI
bool "HDMI"
- depends on !VIDEO_SAMSUNG_S5P_TV && (DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON)
+ depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
help
Choose this option if you want to use Exynos HDMI for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index ac21b4000835..6ca1f3117fe8 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -551,7 +551,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = dev_id;
u32 val;
- int win;
if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
goto out;
@@ -560,16 +559,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND;
if (val) {
- for (win = ctx->first_win; win < WINDOWS_NR ; win++) {
- struct exynos_drm_plane *plane = &ctx->planes[win];
-
- if (!plane->pending_fb)
- continue;
-
- exynos_drm_crtc_finish_update(ctx->crtc, plane);
- }
-
- /* clear */
writel(val, ctx->addr + DECON_VIDINTCON1);
drm_crtc_handle_vblank(&ctx->crtc->base);
}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 7f9901b7777b..f4d5a2133777 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -603,7 +603,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = (struct decon_context *)dev_id;
u32 val, clear_bit;
- int win;
val = readl(ctx->regs + VIDINTCON1);
@@ -617,14 +616,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);
- for (win = 0 ; win < WINDOWS_NR ; win++) {
- struct exynos_drm_plane *plane = &ctx->planes[win];
-
- if (!plane->pending_fb)
- continue;
-
- exynos_drm_crtc_finish_update(ctx->crtc, plane);
- }
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4f0850585b8e..528229faffe4 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -43,7 +43,7 @@ struct exynos_dp_device {
struct analogix_dp_plat_data plat_data;
};
-int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
+static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
bool enable)
{
struct exynos_dp_device *dp = to_dp(plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 785ffa6cc309..2530bf57716a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,6 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- exynos_crtc->event = crtc->state->event;
-
if (exynos_crtc->ops->atomic_begin)
exynos_crtc->ops->atomic_begin(exynos_crtc);
}
@@ -79,9 +77,24 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
if (exynos_crtc->ops->atomic_flush)
exynos_crtc->ops->atomic_flush(exynos_crtc);
+
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+
}
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -134,8 +147,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
- init_waitqueue_head(&exynos_crtc->wait_update);
-
crtc = &exynos_crtc->base;
private->crtc[pipe] = crtc;
@@ -175,32 +186,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
-{
- wait_event_timeout(exynos_crtc->wait_update,
- (atomic_read(&exynos_crtc->pending_update) == 0),
- msecs_to_jiffies(50));
-}
-
-void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
- struct exynos_drm_plane *exynos_plane)
-{
- struct drm_crtc *crtc = &exynos_crtc->base;
- unsigned long flags;
-
- exynos_plane->pending_fb = NULL;
-
- if (atomic_dec_and_test(&exynos_crtc->pending_update))
- wake_up(&exynos_crtc->wait_update);
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (exynos_crtc->event)
- drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
-
- exynos_crtc->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-}
-
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
@@ -228,20 +213,19 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
struct drm_file *file)
{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_pending_vblank_event *e;
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- e = exynos_crtc->event;
- if (e && e->base.file_priv == file) {
- exynos_crtc->event = NULL;
- atomic_dec(&exynos_crtc->pending_update);
- }
+ e = crtc->state->event;
+ if (e && e->base.file_priv == file)
+ crtc->state->event = NULL;
+ else
+ e = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- if (e && e->base.file_priv == file)
+ if (e)
drm_event_cancel_free(crtc->dev, &e->base);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 877d2efa28e2..f86e7c846678 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -45,37 +45,11 @@ struct exynos_atomic_commit {
u32 crtcs;
};
-static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
-{
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i, ret;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (!crtc->state->enable)
- continue;
-
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- continue;
-
- exynos_drm_crtc_wait_pending_update(exynos_crtc);
- drm_crtc_vblank_put(crtc);
- }
-}
-
static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
{
struct drm_device *dev = commit->dev;
struct exynos_drm_private *priv = dev->dev_private;
struct drm_atomic_state *state = commit->state;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_plane_state *plane_state;
- struct drm_crtc_state *crtc_state;
- int i;
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -89,25 +63,9 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
* have the relevant clocks enabled to perform the update.
*/
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- atomic_set(&exynos_crtc->pending_update, 0);
- }
-
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct exynos_drm_crtc *exynos_crtc =
- to_exynos_crtc(plane->crtc);
+ drm_atomic_helper_commit_planes(dev, state, 0);
- if (!plane->crtc)
- continue;
-
- atomic_inc(&exynos_crtc->pending_update);
- }
-
- drm_atomic_helper_commit_planes(dev, state, false);
-
- exynos_atomic_wait_for_commit(state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
@@ -304,6 +262,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+int exynos_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 7f1a49d5bdbe..80c4d5b81689 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -86,7 +86,6 @@ struct exynos_drm_plane {
struct drm_plane base;
const struct exynos_drm_plane_config *config;
unsigned int index;
- struct drm_framebuffer *pending_fb;
};
#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
@@ -172,9 +171,6 @@ struct exynos_drm_crtc {
struct drm_crtc base;
enum exynos_drm_output_type type;
unsigned int pipe;
- struct drm_pending_vblank_event *event;
- wait_queue_head_t wait_update;
- atomic_t pending_update;
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
@@ -305,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
bool nonblock);
+int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 40ce841eb952..23cce0a3f5fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = exynos_atomic_check,
.atomic_commit = exynos_atomic_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index d47216488985..e2e405170d35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -198,6 +198,7 @@ struct fimd_context {
atomic_t wait_vsync_event;
atomic_t win_updated;
atomic_t triggering;
+ u32 clkdiv;
const struct fimd_driver_data *driver_data;
struct drm_encoder *encoder;
@@ -389,15 +390,18 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
pm_runtime_put(ctx->dev);
}
-static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
- const struct drm_display_mode *mode)
+
+static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- unsigned long ideal_clk;
+ struct drm_display_mode *mode = &state->adjusted_mode;
+ struct fimd_context *ctx = crtc->ctx;
+ unsigned long ideal_clk, lcd_rate;
u32 clkdiv;
if (mode->clock == 0) {
- DRM_ERROR("Mode has zero clock value.\n");
- return 0xff;
+ DRM_INFO("Mode has zero clock value.\n");
+ return -EINVAL;
}
ideal_clk = mode->clock * 1000;
@@ -410,10 +414,23 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
ideal_clk *= 2;
}
+ lcd_rate = clk_get_rate(ctx->lcd_clk);
+ if (2 * lcd_rate < ideal_clk) {
+ DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
+ lcd_rate, ideal_clk);
+ return -EINVAL;
+ }
+
/* Find the clock divider value that gets us closest to ideal_clk */
- clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk);
+ clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
+ if (clkdiv >= 0x200) {
+ DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk);
+ return -EINVAL;
+ }
+
+ ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff;
- return (clkdiv < 0x100) ? clkdiv : 0xff;
+ return 0;
}
static void fimd_setup_trigger(struct fimd_context *ctx)
@@ -442,7 +459,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
const struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
- u32 val, clkdiv;
+ u32 val;
if (ctx->suspended)
return;
@@ -543,9 +560,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
if (ctx->driver_data->has_clksel)
val |= VIDCON0_CLKSEL_LCD;
- clkdiv = fimd_calc_clkdiv(ctx, mode);
- if (clkdiv > 1)
- val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR;
+ if (ctx->clkdiv > 1)
+ val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
writel(val, ctx->regs + VIDCON0);
}
@@ -939,14 +955,14 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.update_plane = fimd_update_plane,
.disable_plane = fimd_disable_plane,
.atomic_flush = fimd_atomic_flush,
+ .atomic_check = fimd_atomic_check,
.te_handler = fimd_te_handler,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
- u32 val, clear_bit, start, start_s;
- int win;
+ u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
@@ -961,18 +977,6 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
if (!ctx->i80_if)
drm_crtc_handle_vblank(&ctx->crtc->base);
- for (win = 0 ; win < WINDOWS_NR ; win++) {
- struct exynos_drm_plane *plane = &ctx->planes[win];
-
- if (!plane->pending_fb)
- continue;
-
- start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
- start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
- if (start == start_s)
- exynos_drm_crtc_finish_update(ctx->crtc, plane);
- }
-
if (ctx->i80_if) {
/* Exits triggering mode */
atomic_set(&ctx->triggering, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6eca8bb88648..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -138,6 +138,18 @@ enum g2d_reg_type {
MAX_REG_TYPE_NR
};
+enum g2d_flag_bits {
+ /*
+ * If set, suspends the runqueue worker after the currently
+ * processed node is finished.
+ */
+ G2D_BIT_SUSPEND_RUNQUEUE,
+ /*
+ * If set, indicates that the engine is currently busy.
+ */
+ G2D_BIT_ENGINE_BUSY,
+};
+
/* cmdlist data structure */
struct g2d_cmdlist {
u32 head;
@@ -226,7 +238,7 @@ struct g2d_data {
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
struct exynos_drm_subdrv subdrv;
- bool suspended;
+ unsigned long flags;
/* cmdlist */
struct g2d_cmdlist_node *cmdlist_node;
@@ -246,6 +258,12 @@ struct g2d_data {
unsigned long max_pool;
};
+static inline void g2d_hw_reset(struct g2d_data *g2d)
+{
+ writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
+ clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
+}
+
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
@@ -470,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
@@ -803,12 +822,8 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
- int ret;
-
- ret = pm_runtime_get_sync(g2d->dev);
- if (ret < 0)
- return;
+ set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
}
@@ -831,9 +846,6 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
{
struct g2d_cmdlist_node *node;
- if (!runqueue_node)
- return;
-
mutex_lock(&g2d->cmdlist_mutex);
/*
* commands in run_cmdlist have been completed so unmap all gem
@@ -847,29 +859,65 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
}
-static void g2d_exec_runqueue(struct g2d_data *g2d)
+/**
+ * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
+ * @g2d: G2D state object
+ * @file: if not zero, only remove items with this DRM file
+ *
+ * Has to be called under runqueue lock.
+ */
+static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file)
{
- g2d->runqueue_node = g2d_get_runqueue_node(g2d);
- if (g2d->runqueue_node)
- g2d_dma_start(g2d, g2d->runqueue_node);
+ struct g2d_runqueue_node *node, *n;
+
+ if (list_empty(&g2d->runqueue))
+ return;
+
+ list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
+ if (file && node->filp != file)
+ continue;
+
+ list_del_init(&node->list);
+ g2d_free_runqueue_node(g2d, node);
+ }
}
static void g2d_runqueue_worker(struct work_struct *work)
{
struct g2d_data *g2d = container_of(work, struct g2d_data,
runqueue_work);
+ struct g2d_runqueue_node *runqueue_node;
+
+ /*
+ * The engine is busy and the completion of the current node is going
+ * to poke the runqueue worker, so nothing to do here.
+ */
+ if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
+ return;
mutex_lock(&g2d->runqueue_mutex);
- pm_runtime_put_sync(g2d->dev);
- complete(&g2d->runqueue_node->complete);
- if (g2d->runqueue_node->async)
- g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+ runqueue_node = g2d->runqueue_node;
+ g2d->runqueue_node = NULL;
+
+ if (runqueue_node) {
+ pm_runtime_mark_last_busy(g2d->dev);
+ pm_runtime_put_autosuspend(g2d->dev);
+
+ complete(&runqueue_node->complete);
+ if (runqueue_node->async)
+ g2d_free_runqueue_node(g2d, runqueue_node);
+ }
+
+ if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+
+ if (g2d->runqueue_node) {
+ pm_runtime_get_sync(g2d->dev);
+ g2d_dma_start(g2d, g2d->runqueue_node);
+ }
+ }
- if (g2d->suspended)
- g2d->runqueue_node = NULL;
- else
- g2d_exec_runqueue(g2d);
mutex_unlock(&g2d->runqueue_mutex);
}
@@ -918,12 +966,72 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
}
}
- if (pending & G2D_INTP_ACMD_FIN)
+ if (pending & G2D_INTP_ACMD_FIN) {
+ clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+ }
return IRQ_HANDLED;
}
+/**
+ * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
+ * @g2d: G2D state object
+ * @file: if not zero, only wait if the current runqueue node belongs
+ * to the DRM file
+ *
+ * Should the engine not become idle after a 100ms timeout, a hardware
+ * reset is issued.
+ */
+static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
+{
+ struct device *dev = g2d->dev;
+
+ struct g2d_runqueue_node *runqueue_node = NULL;
+ unsigned int tries = 10;
+
+ mutex_lock(&g2d->runqueue_mutex);
+
+ /* If no node is currently processed, we have nothing to do. */
+ if (!g2d->runqueue_node)
+ goto out;
+
+ runqueue_node = g2d->runqueue_node;
+
+ /* Check if the currently processed item belongs to us. */
+ if (file && runqueue_node->filp != file)
+ goto out;
+
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ /* Wait for the G2D engine to finish. */
+ while (tries-- && (g2d->runqueue_node == runqueue_node))
+ mdelay(10);
+
+ mutex_lock(&g2d->runqueue_mutex);
+
+ if (g2d->runqueue_node != runqueue_node)
+ goto out;
+
+ dev_err(dev, "wait timed out, resetting engine...\n");
+ g2d_hw_reset(g2d);
+
+ /*
+ * After the hardware reset of the engine we are going to loose
+ * the IRQ which triggers the PM runtime put().
+ * So do this manually here.
+ */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ complete(&runqueue_node->complete);
+ if (runqueue_node->async)
+ g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+ mutex_unlock(&g2d->runqueue_mutex);
+}
+
static int g2d_check_reg_offset(struct device *dev,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
@@ -1259,10 +1367,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
runqueue_node->pid = current->pid;
runqueue_node->filp = file;
list_add_tail(&runqueue_node->list, &g2d->runqueue);
- if (!g2d->runqueue_node)
- g2d_exec_runqueue(g2d);
mutex_unlock(&g2d->runqueue_mutex);
+ /* Let the runqueue know that there is work to do. */
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
if (runqueue_node->async)
goto out;
@@ -1339,15 +1448,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
if (!g2d)
return;
+ /* Remove the runqueue nodes that belong to us. */
+ mutex_lock(&g2d->runqueue_mutex);
+ g2d_remove_runqueue_nodes(g2d, file);
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ /*
+ * Wait for the runqueue worker to finish its current node.
+ * After this the engine should no longer be accessing any
+ * memory belonging to us.
+ */
+ g2d_wait_finish(g2d, file);
+
+ /*
+ * Even after the engine is idle, there might still be stale cmdlists
+ * (i.e. cmdlisst which we submitted but never executed) around, with
+ * their corresponding GEM/userptr buffers.
+ * Properly unmap these buffers here.
+ */
mutex_lock(&g2d->cmdlist_mutex);
list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
- /*
- * unmap all gem objects not completed.
- *
- * P.S. if current process was terminated forcely then
- * there may be some commands in inuse_cmdlist so unmap
- * them.
- */
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
@@ -1399,7 +1519,11 @@ static int g2d_probe(struct platform_device *pdev)
goto err_destroy_workqueue;
}
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_enable(dev);
+ clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1440,7 +1564,7 @@ static int g2d_probe(struct platform_device *pdev)
goto err_put_clk;
}
- dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+ dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
return 0;
@@ -1458,14 +1582,17 @@ static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
+ /* Suspend operation and wait for engine idle. */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+
cancel_work_sync(&g2d->runqueue_work);
exynos_drm_subdrv_unregister(&g2d->subdrv);
- while (g2d->runqueue_node) {
- g2d_free_runqueue_node(g2d, g2d->runqueue_node);
- g2d->runqueue_node = g2d_get_runqueue_node(g2d);
- }
+ /* There should be no locking needed here. */
+ g2d_remove_runqueue_nodes(g2d, NULL);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
g2d_fini_cmdlist(g2d);
@@ -1475,20 +1602,37 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
- mutex_lock(&g2d->runqueue_mutex);
- g2d->suspended = true;
- mutex_unlock(&g2d->runqueue_mutex);
+ /*
+ * Suspend the runqueue worker operation and wait until the G2D
+ * engine is idle.
+ */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+ flush_work(&g2d->runqueue_work);
- while (g2d->runqueue_node)
- /* FIXME: good range? */
- usleep_range(500, 1000);
+ return 0;
+}
- flush_work(&g2d->runqueue_work);
+static int g2d_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
clk_disable_unprepare(g2d->gate_clk);
@@ -1504,16 +1648,12 @@ static int g2d_runtime_resume(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
- g2d->suspended = false;
- g2d_exec_runqueue(g2d);
-
return ret;
}
#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index c8de4913fdbe..87f6b5672e11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -66,7 +66,7 @@ static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
if (ret)
goto free_domain;
- ret = iommu_dma_init_domain(domain, start, size);
+ ret = iommu_dma_init_domain(domain, start, size, NULL);
if (ret)
goto put_cookie;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7f32419b25ea..c2f17f30afab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -238,7 +238,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
return;
plane->crtc = state->crtc;
- exynos_plane->pending_fb = state->fb;
if (exynos_crtc->ops->update_plane)
exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e8f6c92b2a36..57fe514d5c5b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/timer.h>
#include <drm/exynos_drm.h>
@@ -28,6 +29,9 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
+/* VIDI uses fixed refresh rate of 50Hz */
+#define VIDI_REFRESH_TIME (1000 / 50)
+
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
@@ -43,12 +47,9 @@ struct vidi_context {
struct exynos_drm_plane planes[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
- unsigned long irq_flags;
unsigned int connected;
- bool vblank_on;
bool suspended;
- bool direct_vblank;
- struct work_struct work;
+ struct timer_list timer;
struct mutex lock;
int pipe;
};
@@ -102,30 +103,14 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
if (ctx->suspended)
return -EPERM;
- if (!test_and_set_bit(0, &ctx->irq_flags))
- ctx->vblank_on = true;
-
- ctx->direct_vblank = true;
-
- /*
- * in case of page flip request, vidi_finish_pageflip function
- * will not be called because direct_vblank is true and then
- * that function will be called by crtc_ops->update_plane callback
- */
- schedule_work(&ctx->work);
+ mod_timer(&ctx->timer,
+ jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
return 0;
}
static void vidi_disable_vblank(struct exynos_drm_crtc *crtc)
{
- struct vidi_context *ctx = crtc->ctx;
-
- if (ctx->suspended)
- return;
-
- if (test_and_clear_bit(0, &ctx->irq_flags))
- ctx->vblank_on = false;
}
static void vidi_update_plane(struct exynos_drm_crtc *crtc,
@@ -140,9 +125,6 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
addr = exynos_drm_fb_dma_addr(state->fb, 0);
DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
-
- if (ctx->vblank_on)
- schedule_work(&ctx->work);
}
static void vidi_enable(struct exynos_drm_crtc *crtc)
@@ -153,17 +135,17 @@ static void vidi_enable(struct exynos_drm_crtc *crtc)
ctx->suspended = false;
- /* if vblank was enabled status, enable it again. */
- if (test_and_clear_bit(0, &ctx->irq_flags))
- vidi_enable_vblank(ctx->crtc);
-
mutex_unlock(&ctx->lock);
+
+ drm_crtc_vblank_on(&crtc->base);
}
static void vidi_disable(struct exynos_drm_crtc *crtc)
{
struct vidi_context *ctx = crtc->ctx;
+ drm_crtc_vblank_off(&crtc->base);
+
mutex_lock(&ctx->lock);
ctx->suspended = true;
@@ -190,37 +172,16 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.update_plane = vidi_update_plane,
};
-static void vidi_fake_vblank_handler(struct work_struct *work)
+static void vidi_fake_vblank_timer(unsigned long arg)
{
- struct vidi_context *ctx = container_of(work, struct vidi_context,
- work);
- int win;
+ struct vidi_context *ctx = (void *)arg;
if (ctx->pipe < 0)
return;
- /* refresh rate is about 50Hz. */
- usleep_range(16000, 20000);
-
- mutex_lock(&ctx->lock);
-
- if (ctx->direct_vblank) {
- drm_crtc_handle_vblank(&ctx->crtc->base);
- ctx->direct_vblank = false;
- mutex_unlock(&ctx->lock);
- return;
- }
-
- mutex_unlock(&ctx->lock);
-
- for (win = 0 ; win < WINDOWS_NR ; win++) {
- struct exynos_drm_plane *plane = &ctx->planes[win];
-
- if (!plane->pending_fb)
- continue;
-
- exynos_drm_crtc_finish_update(ctx->crtc, plane);
- }
+ if (drm_crtc_handle_vblank(&ctx->crtc->base))
+ mod_timer(&ctx->timer,
+ jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
}
static ssize_t vidi_show_connection(struct device *dev,
@@ -489,6 +450,9 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
static void vidi_unbind(struct device *dev, struct device *master, void *data)
{
+ struct vidi_context *ctx = dev_get_drvdata(dev);
+
+ del_timer_sync(&ctx->timer);
}
static const struct component_ops vidi_component_ops = {
@@ -507,7 +471,7 @@ static int vidi_probe(struct platform_device *pdev)
ctx->pdev = pdev;
- INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
+ setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2275efe41acd..e8fb6ef947ee 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,10 +1669,9 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ for (i = 0; i < ARRAY_SIZE(supply); ++i)
hdata->regul_bulk[i].supply = supply[i];
- hdata->regul_bulk[i].consumer = NULL;
- }
+
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1760,28 +1759,74 @@ static const struct component_ops hdmi_component_ops = {
.unbind = hdmi_unbind,
};
-static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev)
+static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4210-hdmiddc";
struct device_node *np;
+ struct i2c_adapter *adpt;
np = of_find_compatible_node(NULL, NULL, compatible_str);
if (np)
- return of_get_next_parent(np);
+ np = of_get_next_parent(np);
+ else
+ np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
+
+ if (!np) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
- return NULL;
+ adpt = of_find_i2c_adapter_by_node(np);
+ of_node_put(np);
+
+ if (!adpt) {
+ DRM_INFO("Failed to get ddc i2c adapter by node\n");
+ return -EPROBE_DEFER;
+ }
+
+ hdata->ddc_adpt = adpt;
+
+ return 0;
}
-static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
+static int hdmi_get_phy_io(struct hdmi_context *hdata)
{
const char *compatible_str = "samsung,exynos4212-hdmiphy";
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, compatible_str);
+ if (!np) {
+ np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
+ if (!np) {
+ DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ return -ENODEV;
+ }
+ }
+
+ if (hdata->drv_data->is_apb_phy) {
+ hdata->regs_hdmiphy = of_iomap(np, 0);
+ if (!hdata->regs_hdmiphy) {
+ DRM_ERROR("failed to ioremap hdmi phy\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else {
+ hdata->hdmiphy_port = of_find_i2c_device_by_node(np);
+ if (!hdata->hdmiphy_port) {
+ DRM_INFO("Failed to get hdmi phy i2c client\n");
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+ }
- return of_find_compatible_node(NULL, NULL, compatible_str);
+out:
+ of_node_put(np);
+ return ret;
}
static int hdmi_probe(struct platform_device *pdev)
{
- struct device_node *ddc_node, *phy_node;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
@@ -1811,52 +1856,13 @@ static int hdmi_probe(struct platform_device *pdev)
return ret;
}
- ddc_node = hdmi_legacy_ddc_dt_binding(dev);
- if (ddc_node)
- goto out_get_ddc_adpt;
-
- ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
- if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
- return -ENODEV;
- }
- of_node_put(dev->of_node);
-
-out_get_ddc_adpt:
- hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
- if (!hdata->ddc_adpt) {
- DRM_ERROR("Failed to get ddc i2c adapter by node\n");
- return -EPROBE_DEFER;
- }
-
- phy_node = hdmi_legacy_phy_dt_binding(dev);
- if (phy_node)
- goto out_get_phy_port;
+ ret = hdmi_get_ddc_adapter(hdata);
+ if (ret)
+ return ret;
- phy_node = of_parse_phandle(dev->of_node, "phy", 0);
- if (!phy_node) {
- DRM_ERROR("Failed to find hdmiphy node in device tree\n");
- ret = -ENODEV;
+ ret = hdmi_get_phy_io(hdata);
+ if (ret)
goto err_ddc;
- }
- of_node_put(dev->of_node);
-
-out_get_phy_port:
- if (hdata->drv_data->is_apb_phy) {
- hdata->regs_hdmiphy = of_iomap(phy_node, 0);
- if (!hdata->regs_hdmiphy) {
- DRM_ERROR("failed to ioremap hdmi phy\n");
- ret = -ENOMEM;
- goto err_ddc;
- }
- } else {
- hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node);
- if (!hdata->hdmiphy_port) {
- DRM_ERROR("Failed to get hdmi phy i2c client\n");
- ret = -EPROBE_DEFER;
- goto err_ddc;
- }
- }
INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e1d47f9435fc..edb20a34c66c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -73,6 +73,9 @@ enum mixer_version_id {
enum mixer_flag_bits {
MXR_BIT_POWERED,
MXR_BIT_VSYNC,
+ MXR_BIT_INTERLACE,
+ MXR_BIT_VP_ENABLED,
+ MXR_BIT_HAS_SCLK,
};
static const uint32_t mixer_formats[] = {
@@ -98,9 +101,6 @@ struct mixer_context {
struct exynos_drm_plane planes[MIXER_WIN_NR];
int pipe;
unsigned long flags;
- bool interlace;
- bool vp_enabled;
- bool has_sclk;
struct mixer_resources mixer_res;
enum mixer_version_id mxr_ver;
@@ -346,7 +346,7 @@ static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
mixer_reg_writemask(res, MXR_STATUS, enable ?
MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
- if (ctx->vp_enabled)
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
VP_SHADOW_UPDATE_ENABLE : 0);
}
@@ -357,8 +357,8 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
u32 val;
/* choosing between interlace and progressive mode */
- val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
- MXR_CFG_SCAN_PROGRESSIVE);
+ val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ?
+ MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE;
if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
/* choosing between proper HD and SD mode */
@@ -436,9 +436,10 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
mixer_reg_writemask(res, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP1_VAL(priority),
MXR_LAYER_CFG_GRP1_MASK);
+
break;
case VP_DEFAULT_WIN:
- if (ctx->vp_enabled) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(res, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
@@ -501,7 +502,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- ctx->interlace = true;
+ __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
if (tiled_mode) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
@@ -510,7 +511,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
}
} else {
- ctx->interlace = false;
+ __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
luma_addr[1] = 0;
chroma_addr[1] = 0;
}
@@ -518,7 +519,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&res->reg_slock, flags);
/* interlace or progressive scan mode */
- val = (ctx->interlace ? ~0 : 0);
+ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
@@ -541,7 +542,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
- if (ctx->interlace) {
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
@@ -636,9 +637,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
src_y_offset = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- ctx->interlace = true;
+ __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
else
- ctx->interlace = false;
+ __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
spin_lock_irqsave(&res->reg_slock, flags);
@@ -697,10 +698,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
static void vp_win_reset(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- int tries = 100;
+ unsigned int tries = 100;
vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
- for (tries = 100; tries; --tries) {
+ while (tries--) {
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
break;
@@ -733,7 +734,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
- if (ctx->vp_enabled) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
@@ -742,7 +743,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
/* disable all layers */
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
- if (ctx->vp_enabled)
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -753,7 +754,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct mixer_context *ctx = arg;
struct mixer_resources *res = &ctx->mixer_res;
u32 val, base, shadow;
- int win;
spin_lock(&res->reg_slock);
@@ -767,7 +767,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
val &= ~MXR_INT_STATUS_VSYNC;
/* interlace scan need to check shadow register */
- if (ctx->interlace) {
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
@@ -780,14 +780,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
}
drm_crtc_handle_vblank(&ctx->crtc->base);
- for (win = 0 ; win < MIXER_WIN_NR ; win++) {
- struct exynos_drm_plane *plane = &ctx->planes[win];
-
- if (!plane->pending_fb)
- continue;
-
- exynos_drm_crtc_finish_update(ctx->crtc, plane);
- }
}
out:
@@ -867,7 +859,7 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
return -ENODEV;
}
- if (mixer_ctx->has_sclk) {
+ if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) {
mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
if (IS_ERR(mixer_res->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
@@ -917,7 +909,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
return ret;
}
- if (mixer_ctx->vp_enabled) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) {
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(mixer_ctx);
if (ret) {
@@ -1160,7 +1152,8 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret;
for (i = 0; i < MIXER_WIN_NR; i++) {
- if (i == VP_DEFAULT_WIN && !ctx->vp_enabled)
+ if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED,
+ &ctx->flags))
continue;
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
@@ -1215,10 +1208,13 @@ static int mixer_probe(struct platform_device *pdev)
ctx->pdev = pdev;
ctx->dev = dev;
- ctx->vp_enabled = drv->is_vp_enabled;
- ctx->has_sclk = drv->has_sclk;
ctx->mxr_ver = drv->version;
+ if (drv->is_vp_enabled)
+ __set_bit(MXR_BIT_VP_ENABLED, &ctx->flags);
+ if (drv->has_sclk)
+ __set_bit(MXR_BIT_HAS_SCLK, &ctx->flags);
+
platform_set_drvdata(pdev, ctx);
ret = component_add(&pdev->dev, &mixer_component_ops);
@@ -1244,9 +1240,9 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev)
clk_disable_unprepare(res->hdmi);
clk_disable_unprepare(res->mixer);
- if (ctx->vp_enabled) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
clk_disable_unprepare(res->vp);
- if (ctx->has_sclk)
+ if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags))
clk_disable_unprepare(res->sclk_mixer);
}
@@ -1269,14 +1265,14 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
return ret;
}
- if (ctx->vp_enabled) {
+ if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
ret = clk_prepare_enable(res->vp);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
ret);
return ret;
}
- if (ctx->has_sclk) {
+ if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
ret = clk_prepare_enable(res->sclk_mixer);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the " \
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..deb57435cc89 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -25,8 +25,13 @@
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_pending_vblank_event *event = crtc->state->event;
+ regmap_write(fsl_dev->regmap,
+ DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+
if (event) {
crtc->state->event = NULL;
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ /* always disable planes on the CRTC */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
+
drm_crtc_vblank_off(crtc);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
@@ -51,6 +60,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+ clk_disable_unprepare(fsl_dev->pix_clk);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +68,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ clk_prepare_enable(fsl_dev->pix_clk);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,14 +127,12 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return;
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+ .atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
- .disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 7882387f9bff..cc2fde2ae5ef 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return ret;
}
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
drm_handle_vblank(dev, 0);
regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return IRQ_HANDLED;
}
@@ -267,12 +263,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- return ret;
- }
-
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
@@ -330,6 +322,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
const char *pix_clk_in_name;
const struct of_device_id *id;
int ret;
+ u8 div_ratio_shift = 0;
fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
if (!fsl_dev)
@@ -382,29 +375,26 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
pix_clk_in = fsl_dev->clk;
}
+ if (of_property_read_bool(dev->of_node, "big-endian"))
+ div_ratio_shift = 24;
+
pix_clk_in_name = __clk_get_name(pix_clk_in);
snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
pix_clk_in_name, 0, base + DCU_DIV_RATIO,
- 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+ div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
if (IS_ERR(fsl_dev->pix_clk)) {
dev_err(dev, "failed to register pix clk\n");
ret = PTR_ERR(fsl_dev->pix_clk);
goto disable_clk;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto unregister_pix_clk;
- }
-
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
- if (!drm) {
- ret = -ENOMEM;
- goto disable_pix_clk;
+ if (IS_ERR(drm)) {
+ ret = PTR_ERR(drm);
+ goto unregister_pix_clk;
}
fsl_dev->dev = dev;
@@ -425,8 +415,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
-disable_pix_clk:
- clk_disable_unprepare(fsl_dev->pix_clk);
unregister_pix_clk:
clk_unregister(fsl_dev->pix_clk);
disable_clk:
@@ -439,7 +427,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
clk_disable_unprepare(fsl_dev->clk);
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index e50467a0deb0..a99f48847420 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -160,34 +160,14 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
DCU_LAYER_POST_SKIP(0) |
DCU_LAYER_PRE_SKIP(0));
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
- regmap_write(fsl_dev->regmap,
- DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
return;
}
-static void
-fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
-{
-}
-
-static int
-fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
-{
- return 0;
-}
-
static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
.atomic_check = fsl_dcu_drm_plane_atomic_check,
.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
.atomic_update = fsl_dcu_drm_plane_atomic_update,
- .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
- .prepare_fb = fsl_dcu_drm_plane_prepare_fb,
};
static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
@@ -226,11 +206,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
}
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = fsl_dcu_drm_encoder_atomic_check,
- .disable = fsl_dcu_drm_encoder_disable,
- .enable = fsl_dcu_drm_encoder_enable,
-};
-
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
int ret;
encoder->possible_crtcs = 1;
+
+ /* Use bypass mode for parallel RGB/LVDS encoder */
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index bca09ea24632..3194e544ee27 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -57,10 +57,7 @@ static int fsl_tcon_init_regmap(struct device *dev,
tcon->regs = devm_regmap_init_mmio(dev, regs,
&fsl_tcon_regmap_config);
- if (IS_ERR(tcon->regs))
- return PTR_ERR(tcon->regs);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tcon->regs);
}
struct fsl_tcon *fsl_tcon_init(struct device *dev)
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index db9f7d011832..0d2bb1682508 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -28,7 +28,6 @@
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 38dc89083148..ea733ab5b1e0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -415,14 +415,6 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0fcdce0817de..3a44e705db53 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -26,7 +26,6 @@
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 907cb51795c3..acb3848ef1c9 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -335,11 +335,6 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *dup_mode = NULL;
struct drm_device *dev = connector->dev;
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
if (fixed_mode) {
dev_dbg(dev->dev, "fixed_mode %dx%d\n",
fixed_mode->hdisplay, fixed_mode->vdisplay);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index ab696ca7eeec..eab6d889bde9 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -163,10 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
- int max = bd->props.max_brightness;
- gma_backlight_set(dev, bclp * max / 255);
- }
+ gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index e55733ca46d2..fd7c91254841 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -530,15 +530,6 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
if (mode_dev->panel_fixed_mode != NULL) {
struct drm_display_mode *mode =
drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index 4fca0d6feebe..e5360726d80b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -18,7 +18,6 @@
*/
#include <linux/i2c.h>
-#include <linux/fb.h>
#include <drm/drmP.h>
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index c3707d47cd89..7e7a4d43d6b6 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -608,15 +608,17 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+ char *format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
+ format_name = drm_get_format_name(fb->pixel_format);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
- addr, fb->width, fb->height, fmt,
- drm_get_format_name(fb->pixel_format));
+ addr, fb->width, fb->height, fmt, format_name);
+ kfree(format_name);
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
@@ -815,19 +817,6 @@ static void ade_disable_channel(struct ade_plane *aplane)
ade_compositor_routing_disable(base, ch);
}
-static int ade_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
-{
- /* do nothing */
- return 0;
-}
-
-static void ade_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
-{
- /* do nothing */
-}
-
static int ade_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -895,8 +884,6 @@ static void ade_plane_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
- .prepare_fb = ade_plane_prepare_fb,
- .cleanup_fb = ade_plane_cleanup_fb,
.atomic_check = ade_plane_atomic_check,
.atomic_update = ade_plane_atomic_update,
.atomic_disable = ade_plane_atomic_disable,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 1edd9bc80294..90377a609c98 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -169,7 +169,7 @@ static int kirin_gem_cma_dumb_create(struct drm_file *file,
static struct drm_driver kirin_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ DRIVER_ATOMIC,
.fops = &kirin_drm_fops,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -207,8 +207,8 @@ static int kirin_drm_bind(struct device *dev)
int ret;
drm_dev = drm_dev_alloc(driver, dev);
- if (!drm_dev)
- return -ENOMEM;
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
drm_dev->platformdev = to_platform_device(dev);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db462a2..a6c92beb410a 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -22,6 +22,7 @@ config DRM_I2C_SIL164
config DRM_I2C_NXP_TDA998X
tristate "NXP Semiconductors TDA998X HDMI encoder"
default m if DRM_TILCDC
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Support for NXP Semiconductors TDA998X HDMI encoders.
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f4315bc8d471..9798d400d817 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/irq.h>
#include <sound/asoundef.h>
+#include <sound/hdmi-codec.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -30,6 +31,11 @@
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+struct tda998x_audio_port {
+ u8 format; /* AFMT_xxx */
+ u8 config; /* AP value */
+};
+
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
@@ -41,7 +47,10 @@ struct tda998x_priv {
u8 vip_cntrl_0;
u8 vip_cntrl_1;
u8 vip_cntrl_2;
- struct tda998x_encoder_params params;
+ struct tda998x_audio_params audio_params;
+
+ struct platform_device *audio_pdev;
+ struct mutex audio_mutex;
wait_queue_head_t wq_edid;
volatile int wq_edid_wait;
@@ -53,6 +62,8 @@ struct tda998x_priv {
struct drm_encoder encoder;
struct drm_connector connector;
+
+ struct tda998x_audio_port audio_port[2];
};
#define conn_to_tda998x_priv(x) \
@@ -666,26 +677,16 @@ tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
reg_set(priv, REG_DIP_IF_FLAGS, bit);
}
-static void
-tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
+static int tda998x_write_aif(struct tda998x_priv *priv,
+ struct hdmi_audio_infoframe *cea)
{
union hdmi_infoframe frame;
- hdmi_audio_infoframe_init(&frame.audio);
-
- frame.audio.channels = p->audio_frame[1] & 0x07;
- frame.audio.channel_allocation = p->audio_frame[4];
- frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
- frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
-
- /*
- * L-PCM and IEC61937 compressed audio shall always set sample
- * frequency to "refer to stream". For others, see the HDMI
- * specification.
- */
- frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
+ frame.audio = *cea;
tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
+
+ return 0;
}
static void
@@ -710,20 +711,21 @@ static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
}
}
-static void
+static int
tda998x_configure_audio(struct tda998x_priv *priv,
- struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+ struct tda998x_audio_params *params,
+ unsigned mode_clock)
{
u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
u32 n;
/* Enable audio ports */
- reg_write(priv, REG_ENA_AP, p->audio_cfg);
- reg_write(priv, REG_ENA_ACLK, p->audio_clk_cfg);
+ reg_write(priv, REG_ENA_AP, params->config);
/* Set audio input source */
- switch (p->audio_format) {
+ switch (params->format) {
case AFMT_SPDIF:
+ reg_write(priv, REG_ENA_ACLK, 0);
reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_SPDIF);
clksel_aip = AIP_CLKSEL_AIP_SPDIF;
clksel_fs = AIP_CLKSEL_FS_FS64SPDIF;
@@ -731,15 +733,29 @@ tda998x_configure_audio(struct tda998x_priv *priv,
break;
case AFMT_I2S:
+ reg_write(priv, REG_ENA_ACLK, 1);
reg_write(priv, REG_MUX_AP, MUX_AP_SELECT_I2S);
clksel_aip = AIP_CLKSEL_AIP_I2S;
clksel_fs = AIP_CLKSEL_FS_ACLK;
- cts_n = CTS_N_M(3) | CTS_N_K(3);
+ switch (params->sample_width) {
+ case 16:
+ cts_n = CTS_N_M(3) | CTS_N_K(1);
+ break;
+ case 18:
+ case 20:
+ case 24:
+ cts_n = CTS_N_M(3) | CTS_N_K(2);
+ break;
+ default:
+ case 32:
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ break;
+ }
break;
default:
- BUG();
- return;
+ dev_err(&priv->hdmi->dev, "Unsupported I2S format\n");
+ return -EINVAL;
}
reg_write(priv, REG_AIP_CLKSEL, clksel_aip);
@@ -755,11 +771,11 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* assume 100MHz requires larger divider.
*/
adiv = AUDIO_DIV_SERCLK_8;
- if (mode->clock > 100000)
+ if (mode_clock > 100000)
adiv++; /* AUDIO_DIV_SERCLK_16 */
/* S/PDIF asks for a larger divider */
- if (p->audio_format == AFMT_SPDIF)
+ if (params->format == AFMT_SPDIF)
adiv++; /* AUDIO_DIV_SERCLK_16 or _32 */
reg_write(priv, REG_AUDIO_DIV, adiv);
@@ -768,7 +784,7 @@ tda998x_configure_audio(struct tda998x_priv *priv,
* This is the approximate value of N, which happens to be
* the recommended values for non-coherent clocks.
*/
- n = 128 * p->audio_sample_rate / 1000;
+ n = 128 * params->sample_rate / 1000;
/* Write the CTS and N values */
buf[0] = 0x44;
@@ -786,20 +802,21 @@ tda998x_configure_audio(struct tda998x_priv *priv,
reg_set(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
reg_clear(priv, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
- /* Write the channel status */
- buf[0] = IEC958_AES0_CON_NOT_COPYRIGHT;
- buf[1] = 0x00;
- buf[2] = IEC958_AES3_CON_FS_NOTID;
- buf[3] = IEC958_AES4_CON_ORIGFS_NOTID |
- IEC958_AES4_CON_MAX_WORDLEN_24;
+ /* Write the channel status
+ * The REG_CH_STAT_B-registers skip IEC958 AES2 byte, because
+ * there is a separate register for each I2S wire.
+ */
+ buf[0] = params->status[0];
+ buf[1] = params->status[1];
+ buf[2] = params->status[3];
+ buf[3] = params->status[4];
reg_write_range(priv, REG_CH_STAT_B(0), buf, 4);
tda998x_audio_mute(priv, true);
msleep(20);
tda998x_audio_mute(priv, false);
- /* Write the audio information packet */
- tda998x_write_aif(priv, p);
+ return tda998x_write_aif(priv, &params->cea);
}
/* DRM encoder functions */
@@ -820,7 +837,7 @@ static void tda998x_encoder_set_config(struct tda998x_priv *priv,
VIP_CNTRL_2_SWAP_F(p->swap_f) |
(p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
- priv->params = *p;
+ priv->audio_params = p->audio_params;
}
static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -1057,9 +1074,13 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
tda998x_write_avi(priv, adjusted_mode);
- if (priv->params.audio_cfg)
- tda998x_configure_audio(priv, adjusted_mode,
- &priv->params);
+ if (priv->audio_params.format != AFMT_UNUSED) {
+ mutex_lock(&priv->audio_mutex);
+ tda998x_configure_audio(priv,
+ &priv->audio_params,
+ adjusted_mode->clock);
+ mutex_unlock(&priv->audio_mutex);
+ }
}
}
@@ -1159,6 +1180,8 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ drm_edid_to_eld(connector, edid);
+
kfree(edid);
return n;
@@ -1180,6 +1203,9 @@ static void tda998x_destroy(struct tda998x_priv *priv)
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+ if (priv->audio_pdev)
+ platform_device_unregister(priv->audio_pdev);
+
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
@@ -1189,8 +1215,189 @@ static void tda998x_destroy(struct tda998x_priv *priv)
i2c_unregister_device(priv->cec);
}
+static int tda998x_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ int i, ret;
+ struct tda998x_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ if (!priv->encoder.crtc)
+ return -ENODEV;
+
+ memcpy(audio.status, params->iec.status,
+ min(sizeof(audio.status), sizeof(params->iec.status)));
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ if (daifmt->bit_clk_inv || daifmt->frame_clk_inv ||
+ daifmt->bit_clk_master || daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_I2S)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_I2S;
+ break;
+ case HDMI_SPDIF:
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++)
+ if (priv->audio_port[i].format == AFMT_SPDIF)
+ audio.config = priv->audio_port[i].config;
+ audio.format = AFMT_SPDIF;
+ break;
+ default:
+ dev_err(dev, "%s: Invalid format %d\n", __func__, daifmt->fmt);
+ return -EINVAL;
+ }
+
+ if (audio.config == 0) {
+ dev_err(dev, "%s: No audio configutation found\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->audio_mutex);
+ ret = tda998x_configure_audio(priv,
+ &audio,
+ priv->encoder.crtc->hwmode.clock);
+
+ if (ret == 0)
+ priv->audio_params = audio;
+ mutex_unlock(&priv->audio_mutex);
+
+ return ret;
+}
+
+static void tda998x_audio_shutdown(struct device *dev, void *data)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+
+ reg_write(priv, REG_ENA_AP, 0);
+
+ priv->audio_params.format = AFMT_UNUSED;
+
+ mutex_unlock(&priv->audio_mutex);
+}
+
+int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ mutex_lock(&priv->audio_mutex);
+
+ tda998x_audio_mute(priv, enable);
+
+ mutex_unlock(&priv->audio_mutex);
+ return 0;
+}
+
+static int tda998x_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+ struct drm_mode_config *config = &priv->encoder.dev->mode_config;
+ struct drm_connector *connector;
+ int ret = -ENODEV;
+
+ mutex_lock(&config->mutex);
+ list_for_each_entry(connector, &config->connector_list, head) {
+ if (&priv->encoder == connector->encoder) {
+ memcpy(buf, connector->eld,
+ min(sizeof(connector->eld), len));
+ ret = 0;
+ }
+ }
+ mutex_unlock(&config->mutex);
+
+ return ret;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = tda998x_audio_hw_params,
+ .audio_shutdown = tda998x_audio_shutdown,
+ .digital_mute = tda998x_audio_digital_mute,
+ .get_eld = tda998x_audio_get_eld,
+};
+
+static int tda998x_audio_codec_init(struct tda998x_priv *priv,
+ struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 2,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->audio_port); i++) {
+ if (priv->audio_port[i].format == AFMT_I2S &&
+ priv->audio_port[i].config != 0)
+ codec_data.i2s = 1;
+ if (priv->audio_port[i].format == AFMT_SPDIF &&
+ priv->audio_port[i].config != 0)
+ codec_data.spdif = 1;
+ }
+
+ priv->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(priv->audio_pdev);
+}
+
/* I2C driver functions */
+static int tda998x_get_audio_ports(struct tda998x_priv *priv,
+ struct device_node *np)
+{
+ const u32 *port_data;
+ u32 size;
+ int i;
+
+ port_data = of_get_property(np, "audio-ports", &size);
+ if (!port_data)
+ return 0;
+
+ size /= sizeof(u32);
+ if (size > 2 * ARRAY_SIZE(priv->audio_port) || size % 2 != 0) {
+ dev_err(&priv->hdmi->dev,
+ "Bad number of elements in audio-ports dt-property\n");
+ return -EINVAL;
+ }
+
+ size /= 2;
+
+ for (i = 0; i < size; i++) {
+ u8 afmt = be32_to_cpup(&port_data[2*i]);
+ u8 ena_ap = be32_to_cpup(&port_data[2*i+1]);
+
+ if (afmt != AFMT_SPDIF && afmt != AFMT_I2S) {
+ dev_err(&priv->hdmi->dev,
+ "Bad audio format %u\n", afmt);
+ return -EINVAL;
+ }
+
+ priv->audio_port[i].format = afmt;
+ priv->audio_port[i].config = ena_ap;
+ }
+
+ if (priv->audio_port[0].format == priv->audio_port[1].format) {
+ dev_err(&priv->hdmi->dev,
+ "There can only be on I2S port and one SPDIF port\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
{
struct device_node *np = client->dev.of_node;
@@ -1304,7 +1511,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (!np)
return 0; /* non-DT */
- /* get the optional video properties */
+ /* get the device tree parameters */
ret = of_property_read_u32(np, "video-ports", &video);
if (ret == 0) {
priv->vip_cntrl_0 = video >> 16;
@@ -1312,8 +1519,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->vip_cntrl_2 = video;
}
- return 0;
+ mutex_init(&priv->audio_mutex); /* Protect access from audio thread */
+ ret = tda998x_get_audio_ports(priv, np);
+ if (ret)
+ goto fail;
+
+ if (priv->audio_port[0].format != AFMT_UNUSED)
+ tda998x_audio_codec_init(priv, &client->dev);
+
+ return 0;
fail:
/* if encoder_init fails, the encoder slave is never registered,
* so cleanup here:
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 44f4a131c8dd..0be55dc1ef4b 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -56,9 +56,7 @@ static const struct file_operations i810_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP |
- DRIVER_HAVE_DMA,
+ .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 684fc1cd08fa..a998c2bce70a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,15 +3,20 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+subdir-ccflags-y += \
+ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Please keep these build lists sorted!
# core driver code
i915-y := i915_drv.o \
i915_irq.o \
+ i915_memcpy.o \
+ i915_mm.o \
i915_params.o \
i915_pci.o \
i915_suspend.o \
+ i915_sw_fence.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
@@ -25,7 +30,6 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
i915_gem_context.o \
- i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
@@ -33,6 +37,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_gtt.o \
i915_gem.o \
i915_gem_render_state.o \
+ i915_gem_request.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
@@ -40,6 +45,7 @@ i915-y += i915_cmd_parser.o \
i915_gpu_error.o \
i915_trace_points.o \
intel_breadcrumbs.o \
+ intel_engine_cs.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -109,6 +115,6 @@ i915-y += intel_gvt.o
include $(src)/gvt/Makefile
endif
-obj-$(CONFIG_DRM_I915) += i915.o
+obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0fd6a7b0603..70980f82a15b 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -62,23 +62,23 @@
* The parser always rejects such commands.
*
* The majority of the problematic commands fall in the MI_* range, with only a
- * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
+ * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
*
* Implementation:
- * Each ring maintains tables of commands and registers which the parser uses in
- * scanning batch buffers submitted to that ring.
+ * Each engine maintains tables of commands and registers which the parser
+ * uses in scanning batch buffers submitted to that engine.
*
* Since the set of commands that the parser must check for is significantly
* smaller than the number of commands supported, the parser tables contain only
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
- * implemented via a per-ring length decoding vfunc.
+ * implemented via a per-engine length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
* To handle this, the parser provides a way to define explicit "skip" entries
- * in the per-ring command tables.
+ * in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser
@@ -86,24 +86,25 @@
* general bitmasking mechanism.
*/
-#define STD_MI_OPCODE_MASK 0xFF800000
-#define STD_3D_OPCODE_MASK 0xFFFF0000
-#define STD_2D_OPCODE_MASK 0xFFC00000
-#define STD_MFX_OPCODE_MASK 0xFFFF0000
+#define STD_MI_OPCODE_SHIFT (32 - 9)
+#define STD_3D_OPCODE_SHIFT (32 - 16)
+#define STD_2D_OPCODE_SHIFT (32 - 10)
+#define STD_MFX_OPCODE_SHIFT (32 - 16)
+#define MIN_OPCODE_SHIFT 16
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), (opm) }, \
+ .cmd = { (op), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
/* Convenience macros to compress the tables */
-#define SMI STD_MI_OPCODE_MASK
-#define S3D STD_3D_OPCODE_MASK
-#define S2D STD_2D_OPCODE_MASK
-#define SMFX STD_MFX_OPCODE_MASK
+#define SMI STD_MI_OPCODE_SHIFT
+#define S3D STD_3D_OPCODE_SHIFT
+#define S2D STD_2D_OPCODE_SHIFT
+#define SMFX STD_MFX_OPCODE_SHIFT
#define F true
#define S CMD_DESC_SKIP
#define R CMD_DESC_REJECT
@@ -350,6 +351,9 @@ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+static const struct drm_i915_cmd_descriptor noop_desc =
+ CMD(MI_NOOP, SMI, F, 1, S);
+
#undef CMD
#undef SMI
#undef S3D
@@ -458,6 +462,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_GPGPU_DISPATCHDIMX),
REG32(GEN7_GPGPU_DISPATCHDIMY),
REG32(GEN7_GPGPU_DISPATCHDIMZ),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
@@ -473,6 +478,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_L3SQCREG1),
REG32(GEN7_L3CNTLREG2),
REG32(GEN7_L3CNTLREG3),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
@@ -502,7 +508,10 @@ static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
};
static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
@@ -603,7 +612,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
-static bool validate_cmds_sorted(struct intel_engine_cs *engine,
+static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
@@ -624,8 +633,10 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
- DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- engine->id, i, j, curr, previous);
+ DRM_ERROR("CMD: %s [%d] command table not sorted: "
+ "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, j, curr, previous);
ret = false;
}
@@ -636,7 +647,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
return ret;
}
-static bool check_sorted(int ring_id,
+static bool check_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_reg_descriptor *reg_table,
int reg_count)
{
@@ -648,8 +659,10 @@ static bool check_sorted(int ring_id,
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) {
- DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
- ring_id, i, curr, previous);
+ DRM_ERROR("CMD: %s [%d] register table not sorted: "
+ "entry=%d reg=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, curr, previous);
ret = false;
}
@@ -666,7 +679,7 @@ static bool validate_regs_sorted(struct intel_engine_cs *engine)
for (i = 0; i < engine->reg_table_count; i++) {
table = &engine->reg_tables[i];
- if (!check_sorted(engine->id, table->regs, table->num_regs))
+ if (!check_sorted(engine, table->regs, table->num_regs))
return false;
}
@@ -687,12 +700,26 @@ struct cmd_node {
* non-opcode bits being set. But if we don't include those bits, some 3D
* commands may hash to the same bucket due to not including opcode bits that
* make the command unique. For now, we will risk hashing to the same bucket.
- *
- * If we attempt to generate a perfect hash, we should be able to look at bits
- * 31:29 of a command from a batch buffer and use the full mask for that
- * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
*/
-#define CMD_HASH_MASK STD_MI_OPCODE_MASK
+static inline u32 cmd_header_key(u32 x)
+{
+ u32 shift;
+
+ switch (x >> INSTR_CLIENT_SHIFT) {
+ default:
+ case INSTR_MI_CLIENT:
+ shift = STD_MI_OPCODE_SHIFT;
+ break;
+ case INSTR_RC_CLIENT:
+ shift = STD_3D_OPCODE_SHIFT;
+ break;
+ case INSTR_BC_CLIENT:
+ shift = STD_2D_OPCODE_SHIFT;
+ break;
+ }
+
+ return x >> shift;
+}
static int init_hash_table(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
@@ -716,7 +743,7 @@ static int init_hash_table(struct intel_engine_cs *engine,
desc_node->desc = desc;
hash_add(engine->cmd_hash, &desc_node->node,
- desc->cmd.value & CMD_HASH_MASK);
+ cmd_header_key(desc->cmd.value));
}
}
@@ -736,23 +763,21 @@ static void fini_hash_table(struct intel_engine_cs *engine)
}
/**
- * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
+ * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
* @engine: the engine to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_engine_cs based on whether the platform requires software
* command parsing.
- *
- * Return: non-zero if initialization fails
*/
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
if (!IS_GEN7(engine->i915))
- return 0;
+ return;
switch (engine->id) {
case RCS:
@@ -806,36 +831,38 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
- DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
- engine->id);
- BUG();
+ MISSING_CASE(engine->id);
+ return;
}
- BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
- BUG_ON(!validate_regs_sorted(engine));
-
- WARN_ON(!hash_empty(engine->cmd_hash));
+ if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
+ DRM_ERROR("%s: command descriptions are not sorted\n",
+ engine->name);
+ return;
+ }
+ if (!validate_regs_sorted(engine)) {
+ DRM_ERROR("%s: registers are not sorted\n", engine->name);
+ return;
+ }
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
- DRM_ERROR("CMD: cmd_parser_init failed!\n");
+ DRM_ERROR("%s: initialised failed!\n", engine->name);
fini_hash_table(engine);
- return ret;
+ return;
}
engine->needs_cmd_parser = true;
-
- return 0;
}
/**
- * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
+ * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
* @engine: the engine to clean up
*
* Releases any resources related to command parsing that may have been
- * initialized for the specified ring.
+ * initialized for the specified engine.
*/
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
if (!engine->needs_cmd_parser)
return;
@@ -850,12 +877,9 @@ find_cmd_in_table(struct intel_engine_cs *engine,
struct cmd_node *desc_node;
hash_for_each_possible(engine->cmd_hash, desc_node, node,
- cmd_header & CMD_HASH_MASK) {
+ cmd_header_key(cmd_header)) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
- u32 masked_cmd = desc->cmd.mask & cmd_header;
- u32 masked_value = desc->cmd.value & desc->cmd.mask;
-
- if (masked_cmd == masked_value)
+ if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
return desc;
}
@@ -866,18 +890,21 @@ find_cmd_in_table(struct intel_engine_cs *engine,
* Returns a pointer to a descriptor for the command specified by cmd_header.
*
* The caller must supply space for a default descriptor via the default_desc
- * parameter. If no descriptor for the specified command exists in the ring's
+ * parameter. If no descriptor for the specified command exists in the engine's
* command parser tables, this function fills in default_desc based on the
- * ring's default length encoding and returns default_desc.
+ * engine's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_engine_cs *engine,
u32 cmd_header,
+ const struct drm_i915_cmd_descriptor *desc,
struct drm_i915_cmd_descriptor *default_desc)
{
- const struct drm_i915_cmd_descriptor *desc;
u32 mask;
+ if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
+ return desc;
+
desc = find_cmd_in_table(engine, cmd_header);
if (desc)
return desc;
@@ -886,152 +913,140 @@ find_cmd(struct intel_engine_cs *engine,
if (!mask)
return NULL;
- BUG_ON(!default_desc);
- default_desc->flags = CMD_DESC_SKIP;
+ default_desc->cmd.value = cmd_header;
+ default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT;
default_desc->length.mask = mask;
-
+ default_desc->flags = CMD_DESC_SKIP;
return default_desc;
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct drm_i915_reg_descriptor *table,
- int count, u32 addr)
+__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
{
- int i;
-
- for (i = 0; i < count; i++) {
- if (i915_mmio_reg_offset(table[i].addr) == addr)
- return &table[i];
+ int start = 0, end = count;
+ while (start < end) {
+ int mid = start + (end - start) / 2;
+ int ret = addr - i915_mmio_reg_offset(table[mid].addr);
+ if (ret < 0)
+ end = mid;
+ else if (ret > 0)
+ start = mid + 1;
+ else
+ return &table[mid];
}
-
return NULL;
}
static const struct drm_i915_reg_descriptor *
-find_reg_in_tables(const struct drm_i915_reg_table *tables,
- int count, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
{
- int i;
- const struct drm_i915_reg_table *table;
- const struct drm_i915_reg_descriptor *reg;
+ const struct drm_i915_reg_table *table = engine->reg_tables;
+ int count = engine->reg_table_count;
- for (i = 0; i < count; i++) {
- table = &tables[i];
+ do {
if (!table->master || is_master) {
- reg = find_reg(table->regs, table->num_regs,
- addr);
+ const struct drm_i915_reg_descriptor *reg;
+
+ reg = __find_reg(table->regs, table->num_regs, addr);
if (reg != NULL)
return reg;
}
- }
+ } while (table++, --count);
return NULL;
}
-static u32 *vmap_batch(struct drm_i915_gem_object *obj,
- unsigned start, unsigned len)
-{
- int i;
- void *addr = NULL;
- struct sg_page_iter sg_iter;
- int first_page = start >> PAGE_SHIFT;
- int last_page = (len + start + 4095) >> PAGE_SHIFT;
- int npages = last_page - first_page;
- struct page **pages;
-
- pages = drm_malloc_ab(npages, sizeof(*pages));
- if (pages == NULL) {
- DRM_DEBUG_DRIVER("Failed to get space for pages\n");
- goto finish;
- }
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) {
- pages[i++] = sg_page_iter_page(&sg_iter);
- if (i == npages)
- break;
- }
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- if (addr == NULL) {
- DRM_DEBUG_DRIVER("Failed to vmap pages\n");
- goto finish;
- }
-
-finish:
- if (pages)
- drm_free_large(pages);
- return (u32*)addr;
-}
-
-/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
-static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
u32 batch_start_offset,
- u32 batch_len)
+ u32 batch_len,
+ bool *needs_clflush_after)
{
- int needs_clflush = 0;
- void *src_base, *src;
- void *dst = NULL;
+ unsigned int src_needs_clflush;
+ unsigned int dst_needs_clflush;
+ void *dst, *src;
int ret;
- if (batch_len > dest_obj->base.size ||
- batch_len + batch_start_offset > src_obj->base.size)
- return ERR_PTR(-E2BIG);
-
- if (WARN_ON(dest_obj->pages_pin_count == 0))
- return ERR_PTR(-ENODEV);
-
- ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
+ ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
+ if (ret)
return ERR_PTR(ret);
- }
- src_base = vmap_batch(src_obj, batch_start_offset, batch_len);
- if (!src_base) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
- ret = -ENOMEM;
+ ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
+ if (ret) {
+ dst = ERR_PTR(ret);
goto unpin_src;
}
- ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
- goto unmap_src;
+ dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
+ if (IS_ERR(dst))
+ goto unpin_dst;
+
+ src = ERR_PTR(-ENODEV);
+ if (src_needs_clflush &&
+ i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
+ src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
+ if (!IS_ERR(src)) {
+ i915_memcpy_from_wc(dst,
+ src + batch_start_offset,
+ ALIGN(batch_len, 16));
+ i915_gem_object_unpin_map(src_obj);
+ }
}
-
- dst = vmap_batch(dest_obj, 0, batch_len);
- if (!dst) {
- DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
- ret = -ENOMEM;
- goto unmap_src;
+ if (IS_ERR(src)) {
+ void *ptr;
+ int offset, n;
+
+ offset = offset_in_page(batch_start_offset);
+
+ /* We can avoid clflushing partial cachelines before the write
+ * if we only every write full cache-lines. Since we know that
+ * both the source and destination are in multiples of
+ * PAGE_SIZE, we can simply round up to the next cacheline.
+ * We don't care about copying too much here as we only
+ * validate up to the end of the batch.
+ */
+ if (dst_needs_clflush & CLFLUSH_BEFORE)
+ batch_len = roundup(batch_len,
+ boot_cpu_data.x86_clflush_size);
+
+ ptr = dst;
+ for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
+ int len = min_t(int, batch_len, PAGE_SIZE - offset);
+
+ src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ if (src_needs_clflush)
+ drm_clflush_virt_range(src + offset, len);
+ memcpy(ptr, src + offset, len);
+ kunmap_atomic(src);
+
+ ptr += len;
+ batch_len -= len;
+ offset = 0;
+ }
}
- src = src_base + offset_in_page(batch_start_offset);
- if (needs_clflush)
- drm_clflush_virt_range(src, batch_len);
-
- memcpy(dst, src, batch_len);
+ /* dst_obj is returned with vmap pinned */
+ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-unmap_src:
- vunmap(src_base);
+unpin_dst:
+ i915_gem_obj_finish_shmem_access(dst_obj);
unpin_src:
- i915_gem_object_unpin_pages(src_obj);
-
- return ret ? ERR_PTR(ret) : dst;
+ i915_gem_obj_finish_shmem_access(src_obj);
+ return dst;
}
/**
- * i915_needs_cmd_parser() - should a given ring use software command parsing?
+ * intel_engine_needs_cmd_parser() - should a given engine use software
+ * command parsing?
* @engine: the engine in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module parameter.
*
- * Return: true if the ring requires software command parsing
+ * Return: true if the engine requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
{
if (!engine->needs_cmd_parser)
return false;
@@ -1048,6 +1063,9 @@ static bool check_cmd(const struct intel_engine_cs *engine,
const bool is_master,
bool *oacontrol_set)
{
+ if (desc->flags & CMD_DESC_SKIP)
+ return true;
+
if (desc->flags & CMD_DESC_REJECT) {
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
@@ -1072,14 +1090,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg_in_tables(engine->reg_tables,
- engine->reg_table_count,
- is_master,
- reg_addr);
+ find_reg(engine, is_master, reg_addr);
if (!reg) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr, *cmd, engine->id);
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
+ reg_addr, *cmd, engine->exec_id);
return false;
}
@@ -1159,11 +1174,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
- dword, engine->id);
+ dword, engine->exec_id);
return false;
}
}
@@ -1189,23 +1204,26 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int i915_parse_cmds(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool is_master)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u32 batch_start_offset,
+ u32 batch_len,
+ bool is_master)
{
- u32 *cmd, *batch_base, *batch_end;
- struct drm_i915_cmd_descriptor default_desc = { 0 };
+ u32 *cmd, *batch_end;
+ struct drm_i915_cmd_descriptor default_desc = noop_desc;
+ const struct drm_i915_cmd_descriptor *desc = &default_desc;
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+ bool needs_clflush_after = false;
int ret = 0;
- batch_base = copy_batch(shadow_batch_obj, batch_obj,
- batch_start_offset, batch_len);
- if (IS_ERR(batch_base)) {
+ cmd = copy_batch(shadow_batch_obj, batch_obj,
+ batch_start_offset, batch_len,
+ &needs_clflush_after);
+ if (IS_ERR(cmd)) {
DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
- return PTR_ERR(batch_base);
+ return PTR_ERR(cmd);
}
/*
@@ -1213,17 +1231,14 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = batch_base + (batch_len / sizeof(*batch_end));
-
- cmd = batch_base;
+ batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
- const struct drm_i915_cmd_descriptor *desc;
u32 length;
if (*cmd == MI_BATCH_BUFFER_END)
break;
- desc = find_cmd(engine, *cmd, &default_desc);
+ desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
@@ -1274,7 +1289,9 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
ret = -EINVAL;
}
- vunmap(batch_base);
+ if (ret == 0 && needs_clflush_after)
+ drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+ i915_gem_object_unpin_map(shadow_batch_obj);
return ret;
}
@@ -1295,7 +1312,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
- if (i915_needs_cmd_parser(engine)) {
+ if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 844fea795bae..27b0e34dadec 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,11 +40,10 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-enum {
- ACTIVE_LIST,
- INACTIVE_LIST,
- PINNED_LIST,
-};
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
@@ -63,7 +62,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
node->minor = minor;
node->dent = ent;
- node->info_ent = (void *) key;
+ node->info_ent = (void *)key;
mutex_lock(&minor->debugfs_lock);
list_add(&node->list, &minor->debugfs_list);
@@ -74,12 +73,11 @@ drm_add_fake_info_node(struct drm_minor *minor,
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- const struct intel_device_info *info = INTEL_INFO(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_device_info *info = INTEL_INFO(dev_priv);
- seq_printf(m, "gen: %d\n", info->gen);
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+ seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
#define SEP_SEMICOLON ;
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
@@ -91,7 +89,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
static char get_active_flag(struct drm_i915_gem_object *obj)
{
- return obj->active ? '*' : ' ';
+ return i915_gem_object_is_active(obj) ? '*' : ' ';
}
static char get_pin_flag(struct drm_i915_gem_object *obj)
@@ -101,7 +99,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj)
static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj->tiling_mode) {
+ switch (i915_gem_object_get_tiling(obj)) {
default:
case I915_TILING_NONE: return ' ';
case I915_TILING_X: return 'X';
@@ -111,7 +109,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+ return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -125,7 +123,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+ if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -138,6 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
+ unsigned int frontbuffer_bits;
int pin_count = 0;
enum intel_engine_id id;
@@ -155,30 +154,36 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.write_domain);
for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ",
- i915_gem_request_get_seqno(obj->last_read_req[id]));
- seq_printf(m, "] %x %x%s%s%s",
- i915_gem_request_get_seqno(obj->last_write_req),
- i915_gem_request_get_seqno(obj->last_fenced_req),
- i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+ i915_gem_active_get_seqno(&obj->last_read[id],
+ &obj->base.dev->struct_mutex));
+ seq_printf(m, "] %x %s%s%s",
+ i915_gem_active_get_seqno(&obj->last_write,
+ &obj->base.dev->struct_mutex),
+ i915_cache_level_str(dev_priv, obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->pin_count > 0)
+ if (i915_vma_is_pinned(vma))
pin_count++;
}
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display)
seq_printf(m, " (display)");
- if (obj->fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", obj->fence_reg);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
- vma->is_ggtt ? "g" : "pp",
+ i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size);
- if (vma->is_ggtt)
+ if (i915_vma_is_ggtt(vma))
seq_printf(m, ", type: %u", vma->ggtt_view.type);
+ if (vma->fence)
+ seq_printf(m, " , fence: %d%s",
+ vma->fence->id,
+ i915_gem_active_isset(&vma->last_fence) ? "*" : "");
seq_puts(m, ")");
}
if (obj->stolen)
@@ -192,58 +197,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->last_write_req != NULL)
- seq_printf(m, " (%s)",
- i915_gem_request_get_engine(obj->last_write_req)->name);
- if (obj->frontbuffer_bits)
- seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
-}
-
-static int i915_gem_object_list_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- uintptr_t list = (uintptr_t) node->info_ent->data;
- struct list_head *head;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma;
- u64 total_obj_size, total_gtt_size;
- int count, ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ engine = i915_gem_active_get_engine(&obj->last_write,
+ &dev_priv->drm.struct_mutex);
+ if (engine)
+ seq_printf(m, " (%s)", engine->name);
- /* FIXME: the user of this interface might want more than just GGTT */
- switch (list) {
- case ACTIVE_LIST:
- seq_puts(m, "Active:\n");
- head = &ggtt->base.active_list;
- break;
- case INACTIVE_LIST:
- seq_puts(m, "Inactive:\n");
- head = &ggtt->base.inactive_list;
- break;
- default:
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(vma, head, vm_link) {
- seq_printf(m, " ");
- describe_obj(m, vma->obj);
- seq_printf(m, "\n");
- total_obj_size += vma->obj->base.size;
- total_gtt_size += vma->node.size;
- count++;
- }
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
- count, total_obj_size, total_gtt_size);
- return 0;
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
static int obj_rank_by_stolen(void *priv,
@@ -263,9 +225,8 @@ static int obj_rank_by_stolen(void *priv,
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
@@ -311,17 +272,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
return 0;
}
-#define count_objects(list, member) do { \
- list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_total_ggtt_size(obj); \
- ++count; \
- if (obj->map_and_fenceable) { \
- mappable_size += i915_gem_obj_ggtt_size(obj); \
- ++mappable_count; \
- } \
- } \
-} while (0)
-
struct file_stats {
struct drm_i915_file_private *file_priv;
unsigned long count;
@@ -338,46 +288,29 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
-
+ if (!obj->bind_count)
+ stats->unbound += obj->base.size;
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
- if (USES_FULL_PPGTT(obj->base.dev)) {
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- struct i915_hw_ppgtt *ppgtt;
-
- if (!drm_mm_node_allocated(&vma->node))
- continue;
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
- if (vma->is_ggtt) {
- stats->global += obj->base.size;
- continue;
- }
+ if (i915_vma_is_ggtt(vma)) {
+ stats->global += vma->node.size;
+ } else {
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
- ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
- if (ppgtt->file_priv != stats->file_priv)
+ if (ppgtt->base.file != stats->file_priv)
continue;
-
- if (obj->active) /* XXX per-vma statistic */
- stats->active += obj->base.size;
- else
- stats->inactive += obj->base.size;
-
- return 0;
- }
- } else {
- if (i915_gem_obj_ggtt_bound(obj)) {
- stats->global += obj->base.size;
- if (obj->active)
- stats->active += obj->base.size;
- else
- stats->inactive += obj->base.size;
- return 0;
}
- }
- if (!list_empty(&obj->global_list))
- stats->unbound += obj->base.size;
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
+ }
return 0;
}
@@ -424,9 +357,9 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
if (ctx->engine[n].state)
- per_file_stats(0, ctx->engine[n].state, data);
- if (ctx->engine[n].ringbuf)
- per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+ per_file_stats(0, ctx->engine[n].state->obj, data);
+ if (ctx->engine[n].ring)
+ per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
}
return 0;
@@ -435,48 +368,34 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
struct file_stats stats;
struct drm_file *file;
memset(&stats, 0, sizeof(stats));
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev->struct_mutex);
if (dev_priv->kernel_context)
per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
- list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+ list_for_each_entry(file, &dev->filelist, lhead) {
struct drm_i915_file_private *fpriv = file->driver_priv;
idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
print_file_stats(m, "[k]contexts", stats);
}
-#define count_vmas(list, member) do { \
- list_for_each_entry(vma, list, member) { \
- size += i915_gem_obj_total_ggtt_size(vma->obj); \
- ++count; \
- if (vma->obj->map_and_fenceable) { \
- mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
- ++mappable_count; \
- } \
- } \
-} while (0)
-
-static int i915_gem_object_info(struct seq_file *m, void* data)
+static int i915_gem_object_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 count, mappable_count, purgeable_count;
- u64 size, mappable_size, purgeable_size;
- unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
- u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
+ u32 count, mapped_count, purgeable_count, dpy_count;
+ u64 size, mapped_size, purgeable_size, dpy_size;
struct drm_i915_gem_object *obj;
struct drm_file *file;
- struct i915_vma *vma;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -487,70 +406,53 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
- size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.bound_list, global_list);
- seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
- count_vmas(&ggtt->base.active_list, vm_link);
- seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
- count, mappable_count, size, mappable_size);
+ size = count = 0;
+ mapped_size = mapped_count = 0;
+ purgeable_size = purgeable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ size += obj->base.size;
+ ++count;
- size = count = mappable_size = mappable_count = 0;
- count_vmas(&ggtt->base.inactive_list, vm_link);
- seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
- count, mappable_count, size, mappable_size);
+ if (obj->madv == I915_MADV_DONTNEED) {
+ purgeable_size += obj->base.size;
+ ++purgeable_count;
+ }
- size = count = purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- size += obj->base.size, ++count;
- if (obj->madv == I915_MADV_DONTNEED)
- purgeable_size += obj->base.size, ++purgeable_count;
if (obj->mapping) {
- pin_mapped_count++;
- pin_mapped_size += obj->base.size;
- if (obj->pages_pin_count == 0) {
- pin_mapped_purgeable_count++;
- pin_mapped_purgeable_size += obj->base.size;
- }
+ mapped_count++;
+ mapped_size += obj->base.size;
}
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
- size = count = mappable_size = mappable_count = 0;
+ size = count = dpy_size = dpy_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (obj->fault_mappable) {
- size += i915_gem_obj_ggtt_size(obj);
- ++count;
- }
+ size += obj->base.size;
+ ++count;
+
if (obj->pin_display) {
- mappable_size += i915_gem_obj_ggtt_size(obj);
- ++mappable_count;
+ dpy_size += obj->base.size;
+ ++dpy_count;
}
+
if (obj->madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size;
++purgeable_count;
}
+
if (obj->mapping) {
- pin_mapped_count++;
- pin_mapped_size += obj->base.size;
- if (obj->pages_pin_count == 0) {
- pin_mapped_purgeable_count++;
- pin_mapped_purgeable_size += obj->base.size;
- }
+ mapped_count++;
+ mapped_size += obj->base.size;
}
}
+ seq_printf(m, "%u bound objects, %llu bytes\n",
+ count, size);
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
- seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
- mappable_count, mappable_size);
- seq_printf(m, "%u fault mappable objects, %llu bytes\n",
- count, size);
- seq_printf(m,
- "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
- pin_mapped_count, pin_mapped_purgeable_count,
- pin_mapped_size, pin_mapped_purgeable_size);
+ seq_printf(m, "%u mapped objects, %llu bytes\n",
+ mapped_count, mapped_size);
+ seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+ dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
@@ -563,6 +465,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_context_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_request *request;
struct task_struct *task;
memset(&stats, 0, sizeof(stats));
@@ -576,10 +480,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
+ mutex_lock(&dev->struct_mutex);
+ request = list_first_entry_or_null(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(request && request->ctx->pid ?
+ request->ctx->pid : file->pid,
+ PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
+ mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->filelist_mutex);
@@ -589,9 +500,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- uintptr_t list = (uintptr_t) node->info_ent->data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(node);
+ struct drm_device *dev = &dev_priv->drm;
+ bool show_pin_display_only = !!node->info_ent->data;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -602,7 +513,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
+ if (show_pin_display_only && !obj->pin_display)
continue;
seq_puts(m, " ");
@@ -623,9 +534,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
int ret;
@@ -672,7 +582,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
intel_crtc_get_vblank_counter(crtc));
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
else
addr = I915_READ(DSPADDR(crtc->plane));
@@ -693,9 +603,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
int total = 0;
@@ -738,9 +647,8 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
static int i915_gem_request_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any;
@@ -754,21 +662,20 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count;
count = 0;
- list_for_each_entry(req, &engine->request_list, list)
+ list_for_each_entry(req, &engine->request_list, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->request_list, list) {
+ list_for_each_entry(req, &engine->request_list, link) {
+ struct pid *pid = req->ctx->pid;
struct task_struct *task;
rcu_read_lock();
- task = NULL;
- if (req->pid)
- task = pid_task(req->pid, PIDTYPE_PID);
+ task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, " %x @ %d: %s [%d]\n",
- req->seqno,
+ req->fence.seqno,
(int) (jiffies - req->emitted_jiffies),
task ? task->comm : "<unknown>",
task ? task->pid : -1);
@@ -793,8 +700,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- seq_printf(m, "Current user interrupts (%s): %lx\n",
- engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
spin_lock(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -808,41 +713,25 @@ static void i915_ring_seqno_info(struct seq_file *m,
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
for_each_engine(engine, dev_priv)
i915_ring_seqno_info(m, engine);
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
static int i915_interrupt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
- int ret, i, pipe;
+ int i, pipe;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
intel_runtime_pm_get(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -881,7 +770,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
- } else if (INTEL_INFO(dev)->gen >= 8) {
+ } else if (INTEL_GEN(dev_priv) >= 8) {
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -937,7 +826,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN8_PCU_IIR));
seq_printf(m, "PCU interrupt enable:\t%08x\n",
I915_READ(GEN8_PCU_IER));
- } else if (IS_VALLEYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -975,7 +864,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -1007,7 +896,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GTIMR));
}
for_each_engine(engine, dev_priv) {
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
@@ -1015,16 +904,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1033,14 +920,14 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+ struct i915_vma *vma = dev_priv->fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
i, dev_priv->fence_regs[i].pin_count);
- if (obj == NULL)
+ if (!vma)
seq_puts(m, "unused");
else
- describe_obj(m, obj);
+ describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
@@ -1051,8 +938,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(node);
struct intel_engine_cs *engine;
const u32 *hws;
int i;
@@ -1077,33 +963,25 @@ i915_error_state_write(struct file *filp,
loff_t *ppos)
{
struct i915_error_state_file_priv *error_priv = filp->private_data;
- struct drm_device *dev = error_priv->dev;
- int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- i915_destroy_error_state(dev);
- mutex_unlock(&dev->struct_mutex);
+ i915_destroy_error_state(error_priv->dev);
return cnt;
}
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
struct i915_error_state_file_priv *error_priv;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
return -ENOMEM;
- error_priv->dev = dev;
+ error_priv->dev = &dev_priv->drm;
- i915_error_state_get(dev, error_priv);
+ i915_error_state_get(&dev_priv->drm, error_priv);
file->private_data = error_priv;
@@ -1129,7 +1007,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
ssize_t ret_count = 0;
int ret;
- ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
+ ret = i915_error_state_buf_init(&error_str,
+ to_i915(error_priv->dev), count, *pos);
if (ret)
return ret;
@@ -1162,16 +1041,15 @@ static const struct file_operations i915_error_state_fops = {
static int
i915_next_seqno_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
*val = dev_priv->next_seqno;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
@@ -1179,7 +1057,8 @@ i915_next_seqno_get(void *data, u64 *val)
static int
i915_next_seqno_set(void *data, u64 val)
{
- struct drm_device *dev = data;
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1198,16 +1077,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int ret = 0;
intel_runtime_pm_get(dev_priv);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1217,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1244,7 +1120,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
mutex_unlock(&dev_priv->rps.hw_lock);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
u32 rp_state_cap;
@@ -1256,7 +1132,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
int max_freq;
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
} else {
@@ -1272,11 +1148,11 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
reqf >>= 24;
else
reqf >>= 25;
@@ -1294,9 +1170,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1305,7 +1181,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex);
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
pm_ier = I915_READ(GEN6_PMIER);
pm_imr = I915_READ(GEN6_PMIMR);
pm_isr = I915_READ(GEN6_PMISR);
@@ -1323,7 +1199,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1352,22 +1228,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+ max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+ max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
@@ -1381,6 +1257,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "Boost freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
seq_printf(m,
@@ -1401,9 +1279,7 @@ out:
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
@@ -1411,6 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
enum intel_engine_id id;
int j;
+ if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+ seq_printf(m, "Wedged\n");
+ if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
+ seq_printf(m, "Reset in progress\n");
+ if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
+ seq_printf(m, "Waiter holding struct mutex\n");
+ if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
+ seq_printf(m, "struct_mutex blocked for reset\n");
+
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
return 0;
@@ -1419,7 +1304,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
for_each_engine_id(engine, dev_priv, id) {
- acthd[id] = intel_ring_get_active_head(engine);
+ acthd[id] = intel_engine_get_active_head(engine);
seqno[id] = intel_engine_get_seqno(engine);
}
@@ -1440,11 +1325,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
- seq_printf(m, "\twaiters? %d\n",
- intel_engine_has_waiter(engine));
- seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
- engine->hangcheck.user_interrupts,
- READ_ONCE(engine->breadcrumbs.irq_wakeups));
+ seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
+ yesno(intel_engine_has_waiter(engine)),
+ yesno(test_bit(engine->id,
+ &dev_priv->gpu_error.missed_irq_rings)));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1472,9 +1356,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
@@ -1540,9 +1423,7 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore_forcewake_domain *fw_domain;
spin_lock_irq(&dev_priv->uncore.lock);
@@ -1558,9 +1439,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
static int vlv_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 rpmodectl1, rcctl1, pw_status;
intel_runtime_pm_get(dev_priv);
@@ -1598,10 +1477,10 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+ u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
unsigned forcewake_count;
int count = 0, ret;
@@ -1629,6 +1508,10 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
+ gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
+ }
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -1647,6 +1530,12 @@ static int gen6_drpc_info(struct seq_file *m)
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ if (INTEL_GEN(dev_priv) >= 9) {
+ seq_printf(m, "Render Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ seq_printf(m, "Media Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ }
seq_printf(m, "Deep RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
@@ -1675,6 +1564,14 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ if (INTEL_GEN(dev_priv) >= 9) {
+ seq_printf(m, "Render Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ }
/* Not exactly sure what this is */
seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
@@ -1692,17 +1589,16 @@ static int gen6_drpc_info(struct seq_file *m)
GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
seq_printf(m, "RC6++ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- return 0;
+ return i915_forcewake_domains(m, NULL);
}
static int i915_drpc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_drpc_info(m);
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_GEN(dev_priv) >= 6)
return gen6_drpc_info(m);
else
return ironlake_drpc_info(m);
@@ -1710,9 +1606,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
dev_priv->fb_tracking.busy_bits);
@@ -1725,11 +1619,9 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_FBC(dev)) {
+ if (!HAS_FBC(dev_priv)) {
seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
@@ -1743,7 +1635,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "FBC disabled: %s\n",
dev_priv->fbc.no_fbc_reason);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
seq_printf(m, "Compressing: %s\n",
yesno(I915_READ(FBC_STATUS2) &
FBC_COMPRESSION_MASK));
@@ -1756,10 +1648,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
static int i915_fbc_fc_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
- if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
*val = dev_priv->fbc.false_color;
@@ -1769,11 +1660,10 @@ static int i915_fbc_fc_get(void *data, u64 *val)
static int i915_fbc_fc_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 reg;
- if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
return -ENODEV;
mutex_lock(&dev_priv->fbc.lock);
@@ -1795,11 +1685,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
static int i915_ips_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev)) {
+ if (!HAS_IPS(dev_priv)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -1809,7 +1697,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915.enable_ips));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (I915_READ(IPS_CTL) & IPS_ENABLE)
@@ -1825,23 +1713,21 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
- IS_I945G(dev) || IS_I945GM(dev))
+ else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev))
+ else if (IS_I915GM(dev_priv))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_runtime_pm_put(dev_priv);
@@ -1854,13 +1740,12 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
unsigned long temp, chipset, gfx;
int ret;
- if (!IS_GEN5(dev))
+ if (!IS_GEN5(dev_priv))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1882,27 +1767,23 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_CORE_RING_FREQ(dev)) {
+ if (!HAS_LLC(dev_priv)) {
seq_puts(m, "unsupported on this chipset\n");
return 0;
}
intel_runtime_pm_get(dev_priv);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
goto out;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1922,7 +1803,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -1937,9 +1818,8 @@ out:
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_opregion *opregion = &dev_priv->opregion;
int ret;
@@ -1958,10 +1838,7 @@ out:
static int i915_vbt(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -1971,8 +1848,8 @@ static int i915_vbt(struct seq_file *m, void *unused)
static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
int ret;
@@ -1982,8 +1859,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (to_i915(dev)->fbdev) {
- fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+ if (dev_priv->fbdev) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -2019,19 +1896,17 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static void describe_ctx_ringbuf(struct seq_file *m,
- struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
- ringbuf->space, ringbuf->head, ringbuf->tail,
- ringbuf->last_retired_head);
+ ring->space, ring->head, ring->tail,
+ ring->last_retired_head);
}
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
int ret;
@@ -2042,18 +1917,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
- if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else if (ctx->file_priv) {
- struct pid *pid = ctx->file_priv->file->pid;
+ if (ctx->pid) {
struct task_struct *task;
- task = get_pid_task(pid, PIDTYPE_PID);
+ task = get_pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
seq_printf(m, "(%s [%d]) ",
task->comm, task->pid);
put_task_struct(task);
}
+ } else if (IS_ERR(ctx->file_priv)) {
+ seq_puts(m, "(deleted) ");
} else {
seq_puts(m, "(kernel) ");
}
@@ -2067,9 +1941,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_printf(m, "%s: ", engine->name);
seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state)
- describe_obj(m, ce->state);
- if (ce->ringbuf)
- describe_ctx_ringbuf(m, ce->ringbuf);
+ describe_obj(m, ce->state->obj);
+ if (ce->ring)
+ describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
}
@@ -2085,36 +1959,34 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ struct i915_vma *vma = ctx->engine[engine->id].state;
struct page *page;
- uint32_t *reg_state;
int j;
- unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
- if (ctx_obj == NULL) {
- seq_puts(m, "\tNot allocated\n");
+ if (!vma) {
+ seq_puts(m, "\tFake context\n");
return;
}
- if (!i915_gem_obj_ggtt_bound(ctx_obj))
- seq_puts(m, "\tNot bound in GGTT\n");
- else
- ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
+ if (vma->flags & I915_VMA_GLOBAL_BIND)
+ seq_printf(m, "\tBound in GGTT at 0x%08x\n",
+ i915_ggtt_offset(vma));
- if (i915_gem_object_get_pages(ctx_obj)) {
- seq_puts(m, "\tFailed to get pages for context object\n");
+ if (i915_gem_object_get_pages(vma->obj)) {
+ seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- if (!WARN_ON(page == NULL)) {
- reg_state = kmap_atomic(page);
+ page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
+ if (page) {
+ u32 *reg_state = kmap_atomic(page);
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
- seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- ggtt_offset + 4096 + (j * 4),
+ seq_printf(m,
+ "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ j * 4,
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
@@ -2126,9 +1998,8 @@ static void i915_dump_lrc_obj(struct seq_file *m,
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
int ret;
@@ -2153,9 +2024,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
static int i915_execlists(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
@@ -2190,7 +2060,7 @@ static int i915_execlists(struct seq_file *m, void *data)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
- read_pointer = engine->next_context_status_buffer;
+ read_pointer = GEN8_CSB_READ_PTR(status_pointer);
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
@@ -2256,9 +2126,8 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2271,7 +2140,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
- if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2280,7 +2149,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ16(C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
I915_READ16(C1DRB3));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
I915_READ(MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
@@ -2289,7 +2158,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
I915_READ(MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
I915_READ(TILECTL));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
I915_READ(GAMTARBMODE));
else
@@ -2329,9 +2198,9 @@ static int per_file_ctx(int id, void *ptr, void *data)
return 0;
}
-static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen8_ppgtt_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
@@ -2350,9 +2219,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
}
}
-static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
+static void gen6_ppgtt_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
if (IS_GEN6(dev_priv))
@@ -2384,22 +2253,23 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static int i915_ppgtt_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_file *file;
+ int ret;
- int ret = mutex_lock_interruptible(&dev->struct_mutex);
+ mutex_lock(&dev->filelist_mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out_unlock;
+
intel_runtime_pm_get(dev_priv);
- if (INTEL_INFO(dev)->gen >= 8)
- gen8_ppgtt_info(m, dev);
- else if (INTEL_INFO(dev)->gen >= 6)
- gen6_ppgtt_info(m, dev);
+ if (INTEL_GEN(dev_priv) >= 8)
+ gen8_ppgtt_info(m, dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_ppgtt_info(m, dev_priv);
- mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
@@ -2407,19 +2277,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
task = get_pid_task(file->pid, PIDTYPE_PID);
if (!task) {
ret = -ESRCH;
- goto out_unlock;
+ goto out_rpm;
}
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m);
}
-out_unlock:
- mutex_unlock(&dev->filelist_mutex);
+out_rpm:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
-
+out_unlock:
+ mutex_unlock(&dev->filelist_mutex);
return ret;
}
@@ -2434,23 +2304,41 @@ static int count_irq_waiters(struct drm_i915_private *i915)
return count;
}
+static const char *rps_power_to_str(unsigned int power)
+{
+ static const char * const strings[] = {
+ [LOW_POWER] = "low power",
+ [BETWEEN] = "mixed",
+ [HIGH_POWER] = "high power",
+ };
+
+ if (power >= ARRAY_SIZE(strings) || !strings[power])
+ return "unknown";
+
+ return strings[power];
+}
+
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
seq_printf(m, "GPU busy? %s [%x]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
- seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ seq_printf(m, "Frequency requested %d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
mutex_lock(&dev->filelist_mutex);
spin_lock(&dev_priv->rps.client_lock);
@@ -2467,27 +2355,44 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
list_empty(&file_priv->rps.link) ? "" : ", active");
rcu_read_unlock();
}
- seq_printf(m, "Semaphore boosts: %d%s\n",
- dev_priv->rps.semaphores.boosts,
- list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
- seq_printf(m, "MMIO flip boosts: %d%s\n",
- dev_priv->rps.mmioflips.boosts,
- list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
- seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
+ seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
spin_unlock(&dev_priv->rps.client_lock);
mutex_unlock(&dev->filelist_mutex);
+ if (INTEL_GEN(dev_priv) >= 6 &&
+ dev_priv->rps.enabled &&
+ dev_priv->gt.active_engines) {
+ u32 rpup, rpupei;
+ u32 rpdown, rpdownei;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+ rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+ rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+ rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+ rps_power_to_str(dev_priv->rps.power));
+ seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
+ 100 * rpup / rpupei,
+ dev_priv->rps.up_threshold);
+ seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
+ 100 * rpdown / rpdownei,
+ dev_priv->rps.down_threshold);
+ } else {
+ seq_puts(m, "\nRPS Autotuning inactive\n");
+ }
+
return 0;
}
static int i915_llc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
const bool edram = INTEL_GEN(dev_priv) > 8;
- seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
intel_uncore_edram_size(dev_priv)/1024/1024);
@@ -2496,8 +2401,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
@@ -2543,6 +2447,7 @@ static void i915_guc_client_info(struct seq_file *m,
struct i915_guc_client *client)
{
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
uint64_t tot = 0;
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
@@ -2553,27 +2458,26 @@ static void i915_guc_client_info(struct seq_file *m,
client->wq_size, client->wq_offset, client->wq_tail);
seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
- seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_engine(engine, dev_priv) {
+ for_each_engine_id(engine, dev_priv, id) {
+ u64 submissions = client->submissions[id];
+ tot += submissions;
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[engine->id],
- engine->name);
- tot += client->submissions[engine->id];
+ submissions, engine->name);
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
static int i915_guc_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_guc guc;
struct i915_guc_client client = {};
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u64 total = 0;
if (!HAS_GUC_SCHED(dev_priv))
@@ -2600,11 +2504,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_engine(engine, dev_priv) {
+ for_each_engine_id(engine, dev_priv, id) {
+ u64 submissions = guc.submissions[id];
+ total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, guc.submissions[engine->id],
- guc.last_seqno[engine->id]);
- total += guc.submissions[engine->id];
+ engine->name, submissions, guc.last_seqno[id]);
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2618,18 +2522,16 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_log_dump(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
- u32 *log;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_gem_object *obj;
int i = 0, pg;
- if (!log_obj)
+ if (!dev_priv->guc.log_vma)
return 0;
- for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
- log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+ obj = dev_priv->guc.log_vma->obj;
+ for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
+ u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -2646,15 +2548,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
bool enabled = false;
- if (!HAS_PSR(dev)) {
+ if (!HAS_PSR(dev_priv)) {
seq_puts(m, "PSR not supported\n");
return 0;
}
@@ -2671,7 +2571,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev_priv))
enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else {
for_each_pipe(dev_priv, pipe) {
@@ -2688,7 +2588,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
for_each_pipe(dev_priv, pipe) {
if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
(stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
@@ -2700,7 +2600,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT) &
EDP_PSR_PERF_CNT_MASK;
@@ -2714,8 +2614,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
static int i915_sink_crc(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_connector *connector;
struct intel_dp *intel_dp = NULL;
int ret;
@@ -2754,13 +2654,11 @@ out:
static int i915_energy_uJ(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
u64 power;
u32 units;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2780,9 +2678,8 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
@@ -2792,22 +2689,20 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
seq_printf(m, "Usage count: %d\n",
- atomic_read(&dev->dev->power.usage_count));
+ atomic_read(&dev_priv->drm.dev->power.usage_count));
#else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
seq_printf(m, "PCI device power state: %s [%d]\n",
- pci_power_name(dev_priv->drm.pdev->current_state),
- dev_priv->drm.pdev->current_state);
+ pci_power_name(pdev->current_state),
+ pdev->current_state);
return 0;
}
static int i915_power_domain_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
@@ -2840,12 +2735,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
static int i915_dmc_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev)) {
+ if (!HAS_CSR(dev_priv)) {
seq_puts(m, "not supported\n");
return 0;
}
@@ -2863,12 +2756,12 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
+ if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(SKL_CSR_DC3_DC5_COUNT));
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
- } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
+ } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(BXT_CSR_DC3_DC5_COUNT));
}
@@ -2905,8 +2798,8 @@ static void intel_encoder_info(struct seq_file *m,
struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_connector *intel_connector;
struct drm_encoder *encoder;
@@ -2932,8 +2825,8 @@ static void intel_encoder_info(struct seq_file *m,
static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
struct drm_plane_state *plane_state = crtc->primary->state;
@@ -2967,6 +2860,9 @@ static void intel_dp_info(struct seq_file *m,
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_info(m, &intel_connector->panel);
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ &intel_dp->aux);
}
static void intel_hdmi_info(struct seq_file *m,
@@ -3031,12 +2927,11 @@ static void intel_connector_info(struct seq_file *m,
intel_seq_print_mode(m, 2, mode);
}
-static bool cursor_active(struct drm_device *dev, int pipe)
+static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 state;
- if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev_priv) || IS_I865G(dev_priv))
state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3044,9 +2939,9 @@ static bool cursor_active(struct drm_device *dev, int pipe)
return state;
}
-static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
+static bool cursor_position(struct drm_i915_private *dev_priv,
+ int pipe, int *x, int *y)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 pos;
pos = I915_READ(CURPOS(pipe));
@@ -3059,7 +2954,7 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
*y = -*y;
- return cursor_active(dev, pipe);
+ return cursor_active(dev_priv, pipe);
}
static const char *plane_type(enum drm_plane_type type)
@@ -3089,12 +2984,12 @@ static const char *plane_rotation(unsigned int rotation)
*/
snprintf(buf, sizeof(buf),
"%s%s%s%s%s%s(0x%08x)",
- (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
- (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
- (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
- (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
- (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
- (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
+ (rotation & DRM_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
rotation);
return buf;
@@ -3102,13 +2997,14 @@ static const char *plane_rotation(unsigned int rotation)
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_plane *intel_plane;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
+ char *format_name;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3117,6 +3013,12 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
+ if (state->fb) {
+ format_name = drm_get_format_name(state->fb->pixel_format);
+ } else {
+ format_name = kstrdup("N/A", GFP_KERNEL);
+ }
+
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
plane->base.id,
plane_type(intel_plane->base.type),
@@ -3130,8 +3032,10 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
- state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
+ format_name,
plane_rotation(state->rotation));
+
+ kfree(format_name);
}
}
@@ -3165,9 +3069,8 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
static int i915_display_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
struct drm_connector *connector;
@@ -3191,7 +3094,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
if (pipe_config->base.active) {
intel_crtc_info(m, crtc);
- active = cursor_position(dev, crtc->pipe, &x, &y);
+ active = cursor_position(dev_priv, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
x, y, crtc->base.cursor->state->crtc_w,
@@ -3220,15 +3123,14 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
- int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ int num_rings = INTEL_INFO(dev_priv)->num_rings;
enum intel_engine_id id;
int j, ret;
- if (!i915_semaphore_is_enabled(dev_priv)) {
+ if (!i915.semaphores) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -3238,11 +3140,11 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
return ret;
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
struct page *page;
uint64_t *seqno;
- page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+ page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_engine_id(engine, dev_priv, id) {
@@ -3293,9 +3195,8 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
int i;
drm_modeset_lock_all(dev);
@@ -3323,9 +3224,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
int i;
int ret;
struct intel_engine_cs *engine;
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
enum intel_engine_id id;
@@ -3361,15 +3261,14 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
static int i915_ddb_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
enum pipe pipe;
int plane;
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
drm_modeset_lock_all(dev);
@@ -3399,7 +3298,8 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
}
static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev, struct intel_crtc *intel_crtc)
+ struct drm_device *dev,
+ struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_drrs *drrs = &dev_priv->drrs;
@@ -3468,8 +3368,8 @@ static void drrs_status_per_crtc(struct seq_file *m,
static int i915_drrs_status(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *intel_crtc;
int active_crtc_cnt = 0;
@@ -3492,14 +3392,14 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
struct pipe_crc_info {
const char *name;
- struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
enum pipe pipe;
};
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct drm_connector *connector;
@@ -3528,10 +3428,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(info->dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
+ if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
return -ENODEV;
spin_lock_irq(&pipe_crc->lock);
@@ -3552,7 +3452,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(info->dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
spin_lock_irq(&pipe_crc->lock);
@@ -3579,8 +3479,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
loff_t *pos)
{
struct pipe_crc_info *info = filep->private_data;
- struct drm_device *dev = info->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = info->dev_priv;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
int n_entries;
@@ -3621,7 +3520,6 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
while (n_entries > 0) {
struct intel_pipe_crc_entry *entry =
&pipe_crc->entries[pipe_crc->tail];
- int ret;
if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
INTEL_PIPE_CRC_ENTRIES_NR) < 1)
@@ -3638,8 +3536,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
spin_unlock_irq(&pipe_crc->lock);
- ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
- if (ret == PIPE_CRC_LINE_LEN)
+ if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
return -EFAULT;
user_buf += PIPE_CRC_LINE_LEN;
@@ -3678,11 +3575,11 @@ static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
enum pipe pipe)
{
- struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = to_i915(minor->dev);
struct dentry *ent;
struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
- info->dev = dev;
+ info->dev_priv = dev_priv;
ent = debugfs_create_file(info->name, S_IRUGO, root, info,
&i915_pipe_crc_fops);
if (!ent)
@@ -3712,8 +3609,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
int i;
for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3725,9 +3621,7 @@ static int display_crc_ctl_show(struct seq_file *m, void *data)
static int display_crc_ctl_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
-
- return single_open(file, display_crc_ctl_show, dev);
+ return single_open(file, display_crc_ctl_show, inode->i_private);
}
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -3750,9 +3644,11 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
enum intel_pipe_crc_source *source)
{
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
@@ -3802,16 +3698,15 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
return ret;
}
-static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
return ret;
}
@@ -3829,7 +3724,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev))
+ if (!IS_CHERRYVIEW(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
need_stable_symbols = true;
@@ -3873,16 +3768,15 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
return 0;
}
-static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
return ret;
}
@@ -3892,24 +3786,24 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
break;
case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev))
+ if (!SUPPORTS_TV(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
need_stable_symbols = true;
break;
case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev))
+ if (!IS_G4X(dev_priv))
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
need_stable_symbols = true;
@@ -3933,7 +3827,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
if (need_stable_symbols) {
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
- WARN_ON(!IS_G4X(dev));
+ WARN_ON(!IS_G4X(dev_priv));
I915_WRITE(PORT_DFT_I9XX,
I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
@@ -3949,10 +3843,9 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
return 0;
}
-static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
@@ -3974,10 +3867,9 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
}
-static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
@@ -4018,9 +3910,10 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
struct intel_crtc_state *pipe_config;
@@ -4054,7 +3947,7 @@ out:
drm_atomic_state_free(state);
}
-static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val)
@@ -4070,8 +3963,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev, true);
+ if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -4085,13 +3978,14 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
return 0;
}
-static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
- pipe));
+ struct intel_crtc *crtc =
+ to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -4109,16 +4003,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
return -EIO;
}
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
- else if (INTEL_INFO(dev)->gen < 5)
- ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
- else if (IS_GEN5(dev) || IS_GEN6(dev))
+ else if (INTEL_GEN(dev_priv) < 5)
+ ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
if (ret != 0)
goto out;
@@ -4182,12 +4076,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
kfree(entries);
- if (IS_G4X(dev))
- g4x_undo_pipe_scramble_reset(dev, pipe);
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_undo_pipe_scramble_reset(dev, pipe);
- else if (IS_HASWELL(dev) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev, false);
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(crtc);
}
@@ -4291,7 +4185,8 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return -EINVAL;
}
-static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+ char *buf, size_t len)
{
#define N_WORDS 3
int n_words;
@@ -4322,14 +4217,14 @@ static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
return -EINVAL;
}
- return pipe_crc_set_source(dev, pipe, source);
+ return pipe_crc_set_source(dev_priv, pipe, source);
}
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
char *tmpbuf;
int ret;
@@ -4352,7 +4247,7 @@ static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
}
tmpbuf[len] = '\0';
- ret = display_crc_ctl_parse(dev, tmpbuf, len);
+ ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
out:
kfree(tmpbuf);
@@ -4373,8 +4268,8 @@ static const struct file_operations i915_display_crc_ctl_fops = {
};
static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
@@ -4404,7 +4299,6 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -4442,7 +4336,6 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -4462,11 +4355,12 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
}
static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
+ struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_active_show, dev);
+ return single_open(file, i915_displayport_test_active_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_active_fops = {
@@ -4486,7 +4380,6 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -4502,11 +4395,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
return 0;
}
static int i915_displayport_test_data_open(struct inode *inode,
- struct file *file)
+ struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_data_show, dev);
+ return single_open(file, i915_displayport_test_data_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_data_fops = {
@@ -4525,7 +4419,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
struct intel_dp *intel_dp;
list_for_each_entry(connector, connector_list, head) {
-
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -4544,9 +4437,10 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
static int i915_displayport_test_type_open(struct inode *inode,
struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- return single_open(file, i915_displayport_test_type_show, dev);
+ return single_open(file, i915_displayport_test_type_show,
+ &dev_priv->drm);
}
static const struct file_operations i915_displayport_test_type_fops = {
@@ -4559,13 +4453,14 @@ static const struct file_operations i915_displayport_test_type_fops = {
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
int level;
int num_levels;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
num_levels = 3;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
@@ -4579,8 +4474,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
- IS_CHERRYVIEW(dev))
+ if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -4594,14 +4489,13 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.pri_latency;
+ latencies = dev_priv->wm.pri_latency;
wm_latency_show(m, latencies);
@@ -4610,14 +4504,13 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.spr_latency;
+ latencies = dev_priv->wm.spr_latency;
wm_latency_show(m, latencies);
@@ -4626,14 +4519,13 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
const uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.cur_latency;
+ latencies = dev_priv->wm.cur_latency;
wm_latency_show(m, latencies);
@@ -4642,48 +4534,49 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
return -ENODEV;
- return single_open(file, pri_wm_latency_show, dev);
+ return single_open(file, pri_wm_latency_show, dev_priv);
}
static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return -ENODEV;
- return single_open(file, spr_wm_latency_show, dev);
+ return single_open(file, spr_wm_latency_show, dev_priv);
}
static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return -ENODEV;
- return single_open(file, cur_wm_latency_show, dev);
+ return single_open(file, cur_wm_latency_show, dev_priv);
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp, uint16_t wm[8])
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
uint16_t new[8] = { 0 };
int num_levels;
int level;
int ret;
char tmp[32];
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
num_levels = 3;
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
else
num_levels = ilk_wm_max_level(dev) + 1;
@@ -4717,14 +4610,13 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.pri_latency;
+ latencies = dev_priv->wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -4733,14 +4625,13 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.spr_latency;
+ latencies = dev_priv->wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -4749,14 +4640,13 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
- struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = m->private;
uint16_t *latencies;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
else
- latencies = to_i915(dev)->wm.cur_latency;
+ latencies = dev_priv->wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -4791,8 +4681,7 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = i915_terminally_wedged(&dev_priv->gpu_error);
@@ -4802,8 +4691,7 @@ i915_wedged_get(void *data, u64 *val)
static int
i915_wedged_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
/*
* There is no safeguard against this debugfs entry colliding
@@ -4833,8 +4721,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = dev_priv->gpu_error.missed_irq_rings;
return 0;
@@ -4843,8 +4730,8 @@ i915_ring_missed_irq_get(void *data, u64 *val)
static int
i915_ring_missed_irq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
/* Lock against concurrent debugfs callers */
@@ -4864,8 +4751,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
static int
i915_ring_test_irq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
*val = dev_priv->gpu_error.test_irq_rings;
@@ -4875,8 +4761,7 @@ i915_ring_test_irq_get(void *data, u64 *val)
static int
i915_ring_test_irq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
val &= INTEL_INFO(dev_priv)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
@@ -4908,8 +4793,8 @@ i915_drop_caches_get(void *data, u64 *val)
static int
i915_drop_caches_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4921,7 +4806,9 @@ i915_drop_caches_set(void *data, u64 val)
return ret;
if (val & DROP_ACTIVE) {
- ret = i915_gem_wait_for_idle(dev_priv);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
if (ret)
goto unlock;
}
@@ -4948,38 +4835,25 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
static int
i915_max_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct drm_i915_private *dev_priv = data;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
return 0;
}
static int
i915_max_freq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 hw_max, hw_min;
int ret;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5015,38 +4889,25 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
static int
i915_min_freq_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct drm_i915_private *dev_priv = data;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
- if (ret)
- return ret;
-
*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
return 0;
}
static int
i915_min_freq_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 hw_max, hw_min;
int ret;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -5061,7 +4922,8 @@ i915_min_freq_set(void *data, u64 val)
hw_max = dev_priv->rps.max_freq;
hw_min = dev_priv->rps.min_freq;
- if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
+ if (val < hw_min ||
+ val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@@ -5082,12 +4944,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
static int
i915_cache_sharing_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
u32 snpcr;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -5098,7 +4960,7 @@ i915_cache_sharing_get(void *data, u64 *val)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5108,11 +4970,10 @@ i915_cache_sharing_get(void *data, u64 *val)
static int
i915_cache_sharing_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
u32 snpcr;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
return -ENODEV;
if (val > 3)
@@ -5135,18 +4996,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
-struct sseu_dev_status {
- unsigned int slice_total;
- unsigned int subslice_total;
- unsigned int subslice_per_slice;
- unsigned int eu_total;
- unsigned int eu_per_subslice;
-};
-
-static void cherryview_sseu_device_status(struct drm_device *dev,
- struct sseu_dev_status *stat)
+static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int ss_max = 2;
int ss;
u32 sig1[ss_max], sig2[ss_max];
@@ -5163,28 +5015,27 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
/* skip disabled subslice */
continue;
- stat->slice_total = 1;
- stat->subslice_per_slice++;
+ sseu->slice_mask = BIT(0);
+ sseu->subslice_mask |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
- stat->eu_total += eu_cnt;
- stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice, eu_cnt);
}
- stat->subslice_total = stat->subslice_per_slice;
}
-static void gen9_sseu_device_status(struct drm_device *dev,
- struct sseu_dev_status *stat)
+static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
/* BXT has a single slice and at most 3 subslices. */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
s_max = 1;
ss_max = 3;
}
@@ -5205,126 +5056,134 @@ static void gen9_sseu_device_status(struct drm_device *dev,
GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < s_max; s++) {
- unsigned int ss_cnt = 0;
-
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
- stat->slice_total++;
+ sseu->slice_mask |= BIT(s);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ sseu->subslice_mask =
+ INTEL_INFO(dev_priv)->sseu.subslice_mask;
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
- if (IS_BROXTON(dev) &&
- !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
- /* skip disabled subslice */
- continue;
+ if (IS_BROXTON(dev_priv)) {
+ if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
- if (IS_BROXTON(dev))
- ss_cnt++;
+ sseu->subslice_mask |= BIT(ss);
+ }
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
eu_mask[ss%2]);
- stat->eu_total += eu_cnt;
- stat->eu_per_subslice = max(stat->eu_per_subslice,
- eu_cnt);
+ sseu->eu_total += eu_cnt;
+ sseu->eu_per_subslice = max_t(unsigned int,
+ sseu->eu_per_subslice,
+ eu_cnt);
}
-
- stat->subslice_total += ss_cnt;
- stat->subslice_per_slice = max(stat->subslice_per_slice,
- ss_cnt);
}
}
-static void broadwell_sseu_device_status(struct drm_device *dev,
- struct sseu_dev_status *stat)
+static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
+ struct sseu_dev_info *sseu)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int s;
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
+ int s;
- stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
+ sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
- if (stat->slice_total) {
- stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
- stat->subslice_total = stat->slice_total *
- stat->subslice_per_slice;
- stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
- stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
+ if (sseu->slice_mask) {
+ sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
+ sseu->eu_per_subslice =
+ INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+ sseu->eu_total = sseu->eu_per_subslice *
+ sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
- for (s = 0; s < stat->slice_total; s++) {
- u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
+ for (s = 0; s < fls(sseu->slice_mask); s++) {
+ u8 subslice_7eu =
+ INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
- stat->eu_total -= hweight8(subslice_7eu);
+ sseu->eu_total -= hweight8(subslice_7eu);
}
}
}
-static int i915_sseu_status(struct seq_file *m, void *unused)
+static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
+ const struct sseu_dev_info *sseu)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct sseu_dev_status stat;
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const char *type = is_available_info ? "Available" : "Enabled";
- if (INTEL_INFO(dev)->gen < 8)
- return -ENODEV;
+ seq_printf(m, " %s Slice Mask: %04x\n", type,
+ sseu->slice_mask);
+ seq_printf(m, " %s Slice Total: %u\n", type,
+ hweight8(sseu->slice_mask));
+ seq_printf(m, " %s Subslice Total: %u\n", type,
+ sseu_subslice_total(sseu));
+ seq_printf(m, " %s Subslice Mask: %04x\n", type,
+ sseu->subslice_mask);
+ seq_printf(m, " %s Subslice Per Slice: %u\n", type,
+ hweight8(sseu->subslice_mask));
+ seq_printf(m, " %s EU Total: %u\n", type,
+ sseu->eu_total);
+ seq_printf(m, " %s EU Per Subslice: %u\n", type,
+ sseu->eu_per_subslice);
+
+ if (!is_available_info)
+ return;
+
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
+ if (HAS_POOLED_EU(dev_priv))
+ seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
- seq_puts(m, "SSEU Device Info\n");
- seq_printf(m, " Available Slice Total: %u\n",
- INTEL_INFO(dev)->slice_total);
- seq_printf(m, " Available Subslice Total: %u\n",
- INTEL_INFO(dev)->subslice_total);
- seq_printf(m, " Available Subslice Per Slice: %u\n",
- INTEL_INFO(dev)->subslice_per_slice);
- seq_printf(m, " Available EU Total: %u\n",
- INTEL_INFO(dev)->eu_total);
- seq_printf(m, " Available EU Per Subslice: %u\n",
- INTEL_INFO(dev)->eu_per_subslice);
- seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
- if (HAS_POOLED_EU(dev))
- seq_printf(m, " Min EU in pool: %u\n",
- INTEL_INFO(dev)->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_slice_pg));
+ yesno(sseu->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_subslice_pg));
+ yesno(sseu->has_subslice_pg));
seq_printf(m, " Has EU Power Gating: %s\n",
- yesno(INTEL_INFO(dev)->has_eu_pg));
+ yesno(sseu->has_eu_pg));
+}
+
+static int i915_sseu_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct sseu_dev_info sseu;
+
+ if (INTEL_GEN(dev_priv) < 8)
+ return -ENODEV;
+
+ seq_puts(m, "SSEU Device Info\n");
+ i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
seq_puts(m, "SSEU Device Status\n");
- memset(&stat, 0, sizeof(stat));
- if (IS_CHERRYVIEW(dev)) {
- cherryview_sseu_device_status(dev, &stat);
- } else if (IS_BROADWELL(dev)) {
- broadwell_sseu_device_status(dev, &stat);
- } else if (INTEL_INFO(dev)->gen >= 9) {
- gen9_sseu_device_status(dev, &stat);
- }
- seq_printf(m, " Enabled Slice Total: %u\n",
- stat.slice_total);
- seq_printf(m, " Enabled Subslice Total: %u\n",
- stat.subslice_total);
- seq_printf(m, " Enabled Subslice Per Slice: %u\n",
- stat.subslice_per_slice);
- seq_printf(m, " Enabled EU Total: %u\n",
- stat.eu_total);
- seq_printf(m, " Enabled EU Per Subslice: %u\n",
- stat.eu_per_subslice);
+ memset(&sseu, 0, sizeof(sseu));
+
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_sseu_device_status(dev_priv, &sseu);
+ } else if (IS_BROADWELL(dev_priv)) {
+ broadwell_sseu_device_status(dev_priv, &sseu);
+ } else if (INTEL_GEN(dev_priv) >= 9) {
+ gen9_sseu_device_status(dev_priv, &sseu);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+
+ i915_print_sseu_info(m, false, &sseu);
return 0;
}
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return 0;
intel_runtime_pm_get(dev_priv);
@@ -5335,10 +5194,9 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
- struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return 0;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5355,12 +5213,11 @@ static const struct file_operations i915_forcewake_fops = {
static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
struct dentry *ent;
ent = debugfs_create_file("i915_forcewake_user",
S_IRUSR,
- root, dev,
+ root, to_i915(minor->dev),
&i915_forcewake_fops);
if (!ent)
return -ENOMEM;
@@ -5373,12 +5230,11 @@ static int i915_debugfs_create(struct dentry *root,
const char *name,
const struct file_operations *fops)
{
- struct drm_device *dev = minor->dev;
struct dentry *ent;
ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
- root, dev,
+ root, to_i915(minor->dev),
fops);
if (!ent)
return -ENOMEM;
@@ -5390,9 +5246,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
- {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
- {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
- {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
@@ -5467,9 +5321,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_active", &i915_displayport_test_active_fops}
};
-void intel_display_crc_init(struct drm_device *dev)
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -5517,7 +5370,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+ drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
1, minor);
for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
@@ -5529,7 +5382,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
- (struct drm_info_list *) i915_debugfs_files[i].fops;
+ (struct drm_info_list *)i915_debugfs_files[i].fops;
drm_debugfs_remove_files(info_list, 1, minor);
}
@@ -5609,6 +5462,40 @@ static const struct file_operations i915_dpcd_fops = {
.release = single_release,
};
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->backlight_off_delay);
+
+ return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_panel_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -5628,8 +5515,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
- &i915_dpcd_fops);
+ debugfs_create_file("i915_dpcd", S_IRUGO, root,
+ connector, &i915_dpcd_fops);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5de36d8dcc68..18dfdd5c1b3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -77,7 +77,7 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
const char *fmt, ...)
{
static bool shown_bug_once;
- struct device *dev = dev_priv->drm.dev;
+ struct device *kdev = dev_priv->drm.dev;
bool is_error = level[1] <= KERN_ERR[1];
bool is_debug = level[1] == KERN_DEBUG[1];
struct va_format vaf;
@@ -91,11 +91,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
if (is_error && !shown_bug_once) {
- dev_notice(dev, "%s", FDO_BUG_MSG);
+ dev_notice(kdev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
@@ -228,31 +228,11 @@ static void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch);
}
-bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6)
- return false;
-
- if (i915.semaphores >= 0)
- return i915.semaphores;
-
- /* TODO: make semaphores and Execlists play nicely together */
- if (i915.enable_execlists)
- return false;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Enable semaphores on SNB when IO remapping is off */
- if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
drm_i915_getparam_t *param = data;
int value;
@@ -263,13 +243,10 @@ static int i915_getparam(struct drm_device *dev, void *data,
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
- value = dev->pdev->device;
+ value = pdev->device;
break;
case I915_PARAM_REVISION:
- value = dev->pdev->revision;
- break;
- case I915_PARAM_HAS_GEM:
- value = 1;
+ value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs;
@@ -277,13 +254,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
break;
- case I915_PARAM_HAS_PAGEFLIPPING:
- value = 1;
- break;
- case I915_PARAM_HAS_EXECBUF2:
- /* depends on GEM */
- value = 1;
- break;
case I915_PARAM_HAS_BSD:
value = intel_engine_initialized(&dev_priv->engine[VCS]);
break;
@@ -296,67 +266,34 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD2:
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
break;
- case I915_PARAM_HAS_RELAXED_FENCING:
- value = 1;
- break;
- case I915_PARAM_HAS_COHERENT_RINGS:
- value = 1;
- break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_INFO(dev)->gen >= 4;
- break;
- case I915_PARAM_HAS_RELAXED_DELTA:
- value = 1;
- break;
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- value = 1;
+ value = INTEL_GEN(dev_priv) >= 4;
break;
case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev);
+ value = HAS_LLC(dev_priv);
break;
case I915_PARAM_HAS_WT:
- value = HAS_WT(dev);
+ value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = USES_PPGTT(dev);
- break;
- case I915_PARAM_HAS_WAIT_TIMEOUT:
- value = 1;
+ value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915_semaphore_is_enabled(dev_priv);
- break;
- case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
- value = 1;
+ value = i915.semaphores;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
- case I915_PARAM_HAS_PINNED_BATCHES:
- value = 1;
- break;
- case I915_PARAM_HAS_EXEC_NO_RELOC:
- value = 1;
- break;
- case I915_PARAM_HAS_EXEC_HANDLE_LUT:
- value = 1;
- break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(dev_priv);
break;
- case I915_PARAM_HAS_COHERENT_PHYS_GTT:
- value = 1;
- break;
- case I915_PARAM_MMAP_VERSION:
- value = 1;
- break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = INTEL_INFO(dev)->subslice_total;
+ value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
- value = INTEL_INFO(dev)->eu_total;
+ value = INTEL_INFO(dev_priv)->sseu.eu_total;
if (!value)
return -ENODEV;
break;
@@ -364,16 +301,43 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = HAS_RESOURCE_STREAMER(dev);
- break;
- case I915_PARAM_HAS_EXEC_SOFTPIN:
- value = 1;
+ value = HAS_RESOURCE_STREAMER(dev_priv);
break;
case I915_PARAM_HAS_POOLED_EU:
- value = HAS_POOLED_EU(dev);
+ value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
- value = INTEL_INFO(dev)->min_eu_in_pool;
+ value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+ break;
+ case I915_PARAM_MMAP_GTT_VERSION:
+ /* Though we've started our numbering from 1, and so class all
+ * earlier versions as 0, in effect their value is undefined as
+ * the ioctl will report EINVAL for the unknown param!
+ */
+ value = i915_gem_mmap_gtt_version();
+ break;
+ case I915_PARAM_MMAP_VERSION:
+ /* Remember to bump this if the version changes! */
+ case I915_PARAM_HAS_GEM:
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ /* For the time being all of these are always true;
+ * if some supported hardware does not have one of these
+ * features this value needs to be provided from
+ * INTEL_INFO(), a feature macro, or similar.
+ */
+ value = 1;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
@@ -537,7 +501,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
- pci_set_power_state(dev->pdev, PCI_D0);
+ pci_set_power_state(pdev, PCI_D0);
i915_resume_switcheroo(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
@@ -595,7 +559,6 @@ static void i915_gem_fini(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- i915_gem_reset(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
@@ -606,6 +569,7 @@ static void i915_gem_fini(struct drm_device *dev)
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
if (i915_inject_load_failure())
@@ -622,13 +586,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
if (ret && ret != -ENODEV)
goto out;
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+ ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
if (ret)
goto cleanup_vga_client;
@@ -680,9 +644,9 @@ cleanup_irq:
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini(dev_priv);
- vga_switcheroo_unregister_client(dev->pdev);
+ vga_switcheroo_unregister_client(pdev);
cleanup_vga_client:
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_client_register(pdev, NULL, NULL, NULL);
out:
return ret;
}
@@ -706,7 +670,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
kfree(ap);
@@ -848,6 +812,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ i915_memcpy_init_early(dev_priv);
+
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
return ret;
@@ -868,7 +834,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_audio_hooks(dev_priv);
i915_gem_load_init(&dev_priv->drm);
- intel_display_crc_init(&dev_priv->drm);
+ intel_display_crc_init(dev_priv);
intel_device_info_dump(dev_priv);
@@ -900,6 +866,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
static int i915_mmio_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -916,7 +883,7 @@ static int i915_mmio_setup(struct drm_device *dev)
mmio_size = 512 * 1024;
else
mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+ dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
if (dev_priv->regs == NULL) {
DRM_ERROR("failed to map registers\n");
@@ -932,9 +899,10 @@ static int i915_mmio_setup(struct drm_device *dev)
static void i915_mmio_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
intel_teardown_mchbar(dev);
- pci_iounmap(dev->pdev, dev_priv->regs);
+ pci_iounmap(pdev, dev_priv->regs);
}
/**
@@ -999,6 +967,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+
+ i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
+ DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
}
/**
@@ -1010,9 +981,8 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
*/
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- uint32_t aperture_size;
int ret;
if (i915_inject_load_failure())
@@ -1022,16 +992,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
- ret = i915_ggtt_init_hw(dev);
+ ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
return ret;
- ret = i915_ggtt_enable_hw(dev);
- if (ret) {
- DRM_ERROR("failed to enable GGTT\n");
- goto out_ggtt;
- }
-
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
ret = i915_kick_out_firmware_fb(dev_priv);
@@ -1046,11 +1010,21 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
goto out_ggtt;
}
- pci_set_master(dev->pdev);
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_ggtt_enable_hw(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
+
+ pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev)) {
- ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1058,7 +1032,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
}
}
-
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located
* above 4GB.
@@ -1068,7 +1041,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* which also needs to be handled carefully.
*/
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
- ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1077,19 +1050,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
}
}
- aperture_size = ggtt->mappable_end;
-
- ggtt->mappable =
- io_mapping_create_wc(ggtt->mappable_base,
- aperture_size);
- if (!ggtt->mappable) {
- ret = -EIO;
- goto out_ggtt;
- }
-
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
- aperture_size);
-
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
@@ -1111,14 +1071,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev)) {
- if (pci_enable_msi(dev->pdev) < 0)
+ if (pci_enable_msi(pdev) < 0)
DRM_DEBUG_DRIVER("can't enable MSI");
}
return 0;
out_ggtt:
- i915_ggtt_cleanup_hw(dev);
+ i915_ggtt_cleanup_hw(dev_priv);
return ret;
}
@@ -1129,16 +1089,13 @@ out_ggtt:
*/
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_free(ggtt->mappable);
- i915_ggtt_cleanup_hw(dev);
+ i915_ggtt_cleanup_hw(dev_priv);
}
/**
@@ -1164,7 +1121,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
- i915_setup_sysfs(dev);
+ i915_setup_sysfs(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1201,7 +1158,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
- i915_teardown_sysfs(&dev_priv->drm);
+ i915_teardown_sysfs(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1310,6 +1267,7 @@ out_free_priv:
void i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
intel_fbdev_fini(dev);
@@ -1338,8 +1296,8 @@ void i915_driver_unload(struct drm_device *dev)
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
- vga_switcheroo_unregister_client(dev->pdev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ vga_switcheroo_unregister_client(pdev);
+ vga_client_register(pdev, NULL, NULL, NULL);
intel_csr_ucode_fini(dev_priv);
@@ -1348,7 +1306,7 @@ void i915_driver_unload(struct drm_device *dev)
i915_destroy_error_state(dev);
/* Flush any outstanding unpin_work. */
- flush_workqueue(dev_priv->wq);
+ drain_workqueue(dev_priv->wq);
intel_guc_fini(dev);
i915_gem_fini(dev);
@@ -1436,6 +1394,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
int error;
@@ -1452,19 +1411,17 @@ static int i915_drm_suspend(struct drm_device *dev)
drm_kms_helper_poll_disable(dev);
- pci_save_state(dev->pdev);
+ pci_save_state(pdev);
error = i915_gem_suspend(dev);
if (error) {
- dev_err(&dev->pdev->dev,
+ dev_err(&pdev->dev,
"GEM idle failed, resume might fail\n");
goto out;
}
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev_priv);
-
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
@@ -1490,8 +1447,6 @@ static int i915_drm_suspend(struct drm_device *dev)
dev_priv->suspend_count++;
- intel_display_set_init_power(dev_priv, false);
-
intel_csr_ucode_suspend(dev_priv);
out:
@@ -1500,14 +1455,17 @@ out:
return error;
}
-static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
- struct drm_i915_private *dev_priv = to_i915(drm_dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
bool fw_csr;
int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ intel_display_set_init_power(dev_priv, false);
+
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
@@ -1536,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
goto out;
}
- pci_disable_device(drm_dev->pdev);
+ pci_disable_device(pdev);
/*
* During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
@@ -1550,7 +1508,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
* Acer Aspire 1830T
*/
if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
- pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+ pci_set_power_state(pdev, PCI_D3hot);
dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
@@ -1590,18 +1548,18 @@ static int i915_drm_resume(struct drm_device *dev)
int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ intel_sanitize_gt_powersave(dev_priv);
- ret = i915_ggtt_enable_hw(dev);
+ ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
intel_csr_ucode_resume(dev_priv);
- mutex_lock(&dev->struct_mutex);
- i915_gem_restore_gtt_mappings(dev);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_resume(dev);
i915_restore_state(dev);
+ intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev);
@@ -1620,7 +1578,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ i915_gem_set_wedged(dev_priv);
}
mutex_unlock(&dev->struct_mutex);
@@ -1657,6 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ intel_autoenable_gt_powersave(dev_priv);
drm_kms_helper_poll_enable(dev);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1667,6 +1626,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
/*
@@ -1689,7 +1649,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
* the device powered we can also remove the following set power state
* call.
*/
- ret = pci_set_power_state(dev->pdev, PCI_D0);
+ ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
goto out;
@@ -1708,12 +1668,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
* depend on the device enable refcount we can't anyway depend on them
* disabling/enabling the device.
*/
- if (pci_enable_device(dev->pdev)) {
+ if (pci_enable_device(pdev)) {
ret = -EIO;
goto out;
}
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
disable_rpm_wakeref_asserts(dev_priv);
@@ -1765,8 +1725,10 @@ int i915_resume_switcheroo(struct drm_device *dev)
* i915_reset - reset chip after a hang
* @dev: drm device to reset
*
- * Reset the chip. Useful if a hang is detected. Returns zero on successful
- * reset or otherwise an error code.
+ * Reset the chip. Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
@@ -1776,31 +1738,22 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct i915_gpu_error *error = &dev_priv->gpu_error;
- unsigned reset_counter;
int ret;
- intel_reset_gt_powersave(dev_priv);
+ lockdep_assert_held(&dev->struct_mutex);
- mutex_lock(&dev->struct_mutex);
+ if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+ return;
/* Clear any previous failed attempts at recovery. Time to try again. */
- atomic_andnot(I915_WEDGED, &error->reset_counter);
-
- /* Clear the reset-in-progress flag and increment the reset epoch. */
- reset_counter = atomic_inc_return(&error->reset_counter);
- if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
- ret = -EIO;
- goto error;
- }
+ __clear_bit(I915_WEDGED, &error->flags);
+ error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
-
- i915_gem_reset(dev);
-
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
if (ret) {
if (ret != -ENODEV)
@@ -1810,6 +1763,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
+ i915_gem_reset(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@@ -1832,44 +1786,34 @@ int i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * rps/rc6 re-init is necessary to restore state lost after the
- * reset and the re-install of gt irqs. Skip for ironlake per
- * previous concerns that it doesn't respond well to some forms
- * of re-init after reset.
- */
- if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev_priv);
-
- return 0;
+wakeup:
+ wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+ return;
error:
- atomic_or(I915_WEDGED, &error->reset_counter);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ i915_gem_set_wedged(dev_priv);
+ goto wakeup;
}
-static int i915_pm_suspend(struct device *dev)
+static int i915_pm_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(kdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
- if (!drm_dev) {
- dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ if (!dev) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend(drm_dev);
+ return i915_drm_suspend(dev);
}
-static int i915_pm_suspend_late(struct device *dev)
+static int i915_pm_suspend_late(struct device *kdev)
{
- struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -1880,57 +1824,67 @@ static int i915_pm_suspend_late(struct device *dev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(drm_dev, false);
+ return i915_drm_suspend_late(dev, false);
}
-static int i915_pm_poweroff_late(struct device *dev)
+static int i915_pm_poweroff_late(struct device *kdev)
{
- struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(drm_dev, true);
+ return i915_drm_suspend_late(dev, true);
}
-static int i915_pm_resume_early(struct device *dev)
+static int i915_pm_resume_early(struct device *kdev)
{
- struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume_early(drm_dev);
+ return i915_drm_resume_early(dev);
}
-static int i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *kdev)
{
- struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume(drm_dev);
+ return i915_drm_resume(dev);
}
/* freeze: before creating the hibernation_image */
-static int i915_pm_freeze(struct device *dev)
+static int i915_pm_freeze(struct device *kdev)
{
- return i915_pm_suspend(dev);
+ int ret;
+
+ ret = i915_pm_suspend(kdev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_freeze(kdev_to_i915(kdev));
+ if (ret)
+ return ret;
+
+ return 0;
}
-static int i915_pm_freeze_late(struct device *dev)
+static int i915_pm_freeze_late(struct device *kdev)
{
int ret;
- ret = i915_pm_suspend_late(dev);
+ ret = i915_pm_suspend_late(kdev);
if (ret)
return ret;
- ret = i915_gem_freeze_late(dev_to_i915(dev));
+ ret = i915_gem_freeze_late(kdev_to_i915(kdev));
if (ret)
return ret;
@@ -1938,25 +1892,25 @@ static int i915_pm_freeze_late(struct device *dev)
}
/* thaw: called after creating the hibernation image, but before turning off. */
-static int i915_pm_thaw_early(struct device *dev)
+static int i915_pm_thaw_early(struct device *kdev)
{
- return i915_pm_resume_early(dev);
+ return i915_pm_resume_early(kdev);
}
-static int i915_pm_thaw(struct device *dev)
+static int i915_pm_thaw(struct device *kdev)
{
- return i915_pm_resume(dev);
+ return i915_pm_resume(kdev);
}
/* restore: called after loading the hibernation image. */
-static int i915_pm_restore_early(struct device *dev)
+static int i915_pm_restore_early(struct device *kdev)
{
- return i915_pm_resume_early(dev);
+ return i915_pm_resume_early(kdev);
}
-static int i915_pm_restore(struct device *dev)
+static int i915_pm_restore(struct device *kdev)
{
- return i915_pm_resume(dev);
+ return i915_pm_resume(kdev);
}
/*
@@ -2318,9 +2272,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
return ret;
}
-static int intel_runtime_suspend(struct device *device)
+static int intel_runtime_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(device);
+ struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
@@ -2346,7 +2300,7 @@ static int intel_runtime_suspend(struct device *device)
* Bump the expiration timestamp, otherwise the suspend won't
* be rescheduled.
*/
- pm_runtime_mark_last_busy(device);
+ pm_runtime_mark_last_busy(kdev);
return -EAGAIN;
}
@@ -2425,9 +2379,9 @@ static int intel_runtime_suspend(struct device *device)
return 0;
}
-static int intel_runtime_resume(struct device *device)
+static int intel_runtime_resume(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(device);
+ struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
@@ -2467,7 +2421,6 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
- gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2623,6 +2576,7 @@ static struct drm_driver driver = {
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
+ .gem_close_object = i915_gem_close_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f68c78918d63..685e9e065287 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -61,6 +61,7 @@
#include "i915_gem.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
+#include "i915_gem_request.h"
#include "intel_gvt.h"
@@ -69,7 +70,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160711"
+#define DRIVER_DATE "20160919"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -401,7 +402,7 @@ struct drm_i915_file_private {
unsigned boosts;
} rps;
- unsigned int bsd_ring;
+ unsigned int bsd_engine;
};
/* Used by dp and fdi links */
@@ -431,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-#define WATCH_LISTS 0
-
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -456,15 +455,21 @@ struct intel_opregion {
struct intel_overlay;
struct intel_overlay_error_state;
-#define I915_FENCE_REG_NONE -1
-#define I915_MAX_NUM_FENCES 32
-/* 32 fences + sign bit for FENCE_REG_NONE */
-#define I915_MAX_NUM_FENCE_BITS 6
-
struct drm_i915_fence_reg {
- struct list_head lru_list;
- struct drm_i915_gem_object *obj;
+ struct list_head link;
+ struct drm_i915_private *i915;
+ struct i915_vma *vma;
int pin_count;
+ int id;
+ /**
+ * Whether the tiling parameters for the currently
+ * associated fence register have changed. Note that
+ * for the purposes of tracking tiling changes we also
+ * treat the unfenced register, the register slot that
+ * the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ bool dirty;
};
struct sdvo_device_mapping {
@@ -476,130 +481,6 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
-struct intel_display_error_state;
-
-struct drm_i915_error_state {
- struct kref ref;
- struct timeval time;
-
- char error_msg[128];
- bool simulated;
- int iommu;
- u32 reset_count;
- u32 suspend_count;
-
- /* Generic register state */
- u32 eir;
- u32 pgtbl_er;
- u32 ier;
- u32 gtier[4];
- u32 ccid;
- u32 derrmr;
- u32 forcewake;
- u32 error; /* gen6+ */
- u32 err_int; /* gen7 */
- u32 fault_data0; /* gen8, gen9 */
- u32 fault_data1; /* gen8, gen9 */
- u32 done_reg;
- u32 gac_eco;
- u32 gam_ecochk;
- u32 gab_ctl;
- u32 gfx_mode;
- u32 extra_instdone[I915_NUM_INSTDONE_REG];
- u64 fence[I915_MAX_NUM_FENCES];
- struct intel_overlay_error_state *overlay;
- struct intel_display_error_state *display;
- struct drm_i915_error_object *semaphore_obj;
-
- struct drm_i915_error_ring {
- bool valid;
- /* Software tracked state */
- bool waiting;
- int num_waiters;
- int hangcheck_score;
- enum intel_ring_hangcheck_action hangcheck_action;
- int num_requests;
-
- /* our own tracking of ring head and tail */
- u32 cpu_ring_head;
- u32 cpu_ring_tail;
-
- u32 last_seqno;
- u32 semaphore_seqno[I915_NUM_ENGINES - 1];
-
- /* Register state */
- u32 start;
- u32 tail;
- u32 head;
- u32 ctl;
- u32 hws;
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 bbstate;
- u32 instpm;
- u32 instps;
- u32 seqno;
- u64 bbaddr;
- u64 acthd;
- u32 fault_reg;
- u64 faddr;
- u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
-
- struct drm_i915_error_object {
- int page_count;
- u64 gtt_offset;
- u32 *pages[0];
- } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
-
- struct drm_i915_error_object *wa_ctx;
-
- struct drm_i915_error_request {
- long jiffies;
- u32 seqno;
- u32 tail;
- } *requests;
-
- struct drm_i915_error_waiter {
- char comm[TASK_COMM_LEN];
- pid_t pid;
- u32 seqno;
- } *waiters;
-
- struct {
- u32 gfx_mode;
- union {
- u64 pdp[4];
- u32 pp_dir_base;
- };
- } vm_info;
-
- pid_t pid;
- char comm[TASK_COMM_LEN];
- } ring[I915_NUM_ENGINES];
-
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u32 rseqno[I915_NUM_ENGINES], wseqno;
- u64 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- s32 pinned:2;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- u32 userptr:1;
- s32 ring:4;
- u32 cache_level:3;
- } **active_bo, **pinned_bo;
-
- u32 *active_bo_count, *pinned_bo_count;
- u32 vm_count;
-};
-
struct intel_connector;
struct intel_encoder;
struct intel_crtc_state;
@@ -629,8 +510,12 @@ struct drm_i915_display_funcs {
struct intel_initial_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct drm_crtc *crtc);
- void (*crtc_disable)(struct drm_crtc *crtc);
+ void (*crtc_enable)(struct intel_crtc_state *pipe_config,
+ struct drm_atomic_state *old_state);
+ void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state);
+ void (*update_crtcs)(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
@@ -694,8 +579,6 @@ struct intel_uncore_funcs {
uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint32_t val, bool trace);
- void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint64_t val, bool trace);
};
struct intel_uncore {
@@ -756,7 +639,7 @@ struct intel_csr {
func(is_i915g) sep \
func(is_i945gm) sep \
func(is_g33) sep \
- func(need_gfx_hws) sep \
+ func(hws_needs_physical) sep \
func(is_g4x) sep \
func(is_pineview) sep \
func(is_broadwater) sep \
@@ -771,6 +654,19 @@ struct intel_csr {
func(is_kabylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
+ func(has_psr) sep \
+ func(has_runtime_pm) sep \
+ func(has_csr) sep \
+ func(has_resource_streamer) sep \
+ func(has_rc6) sep \
+ func(has_rc6p) sep \
+ func(has_dp_mst) sep \
+ func(has_gmbus_irq) sep \
+ func(has_hw_contexts) sep \
+ func(has_logical_ring_contexts) sep \
+ func(has_l3_dpf) sep \
+ func(has_gmch_display) sep \
+ func(has_guc) sep \
func(has_pipe_cxsr) sep \
func(has_hotplug) sep \
func(cursor_needs_physical) sep \
@@ -786,6 +682,24 @@ struct intel_csr {
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
@@ -794,7 +708,9 @@ struct intel_device_info {
u8 gen;
u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
+ u8 num_rings;
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+ u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
@@ -802,17 +718,7 @@ struct intel_device_info {
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
- u8 slice_total;
- u8 subslice_total;
- u8 subslice_per_slice;
- u8 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
+ struct sseu_dev_info sseu;
struct color_luts {
u16 degamma_lut_size;
@@ -823,6 +729,134 @@ struct intel_device_info {
#undef DEFINE_FLAG
#undef SEP_SEMICOLON
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+ struct kref ref;
+ struct timeval time;
+
+ char error_msg[128];
+ bool simulated;
+ int iommu;
+ u32 reset_count;
+ u32 suspend_count;
+ struct intel_device_info device_info;
+
+ /* Generic register state */
+ u32 eir;
+ u32 pgtbl_er;
+ u32 ier;
+ u32 gtier[4];
+ u32 ccid;
+ u32 derrmr;
+ u32 forcewake;
+ u32 error; /* gen6+ */
+ u32 err_int; /* gen7 */
+ u32 fault_data0; /* gen8, gen9 */
+ u32 fault_data1; /* gen8, gen9 */
+ u32 done_reg;
+ u32 gac_eco;
+ u32 gam_ecochk;
+ u32 gab_ctl;
+ u32 gfx_mode;
+ u32 extra_instdone[I915_NUM_INSTDONE_REG];
+ u64 fence[I915_MAX_NUM_FENCES];
+ struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
+ struct drm_i915_error_object *semaphore;
+
+ struct drm_i915_error_engine {
+ int engine_id;
+ /* Software tracked state */
+ bool waiting;
+ int num_waiters;
+ int hangcheck_score;
+ enum intel_engine_hangcheck_action hangcheck_action;
+ struct i915_address_space *vm;
+ int num_requests;
+
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head;
+ u32 cpu_ring_tail;
+
+ u32 last_seqno;
+ u32 semaphore_seqno[I915_NUM_ENGINES - 1];
+
+ /* Register state */
+ u32 start;
+ u32 tail;
+ u32 head;
+ u32 ctl;
+ u32 mode;
+ u32 hws;
+ u32 ipeir;
+ u32 ipehr;
+ u32 instdone;
+ u32 bbstate;
+ u32 instpm;
+ u32 instps;
+ u32 seqno;
+ u64 bbaddr;
+ u64 acthd;
+ u32 fault_reg;
+ u64 faddr;
+ u32 rc_psmi; /* sleep state */
+ u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+
+ struct drm_i915_error_object {
+ int page_count;
+ u64 gtt_offset;
+ u64 gtt_size;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+
+ struct drm_i915_error_object *wa_ctx;
+
+ struct drm_i915_error_request {
+ long jiffies;
+ pid_t pid;
+ u32 seqno;
+ u32 head;
+ u32 tail;
+ } *requests;
+
+ struct drm_i915_error_waiter {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 seqno;
+ } *waiters;
+
+ struct {
+ u32 gfx_mode;
+ union {
+ u64 pdp[4];
+ u32 pp_dir_base;
+ };
+ } vm_info;
+
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+ } engine[I915_NUM_ENGINES];
+
+ struct drm_i915_error_buffer {
+ u32 size;
+ u32 name;
+ u32 rseqno[I915_NUM_ENGINES], wseqno;
+ u64 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ u32 userptr:1;
+ s32 engine:4;
+ u32 cache_level:3;
+ } *active_bo[I915_NUM_ENGINES], *pinned_bo;
+ u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
+ struct i915_address_space *active_vm[I915_NUM_ENGINES];
+};
+
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
@@ -879,6 +913,7 @@ struct i915_gem_context {
struct drm_i915_private *i915;
struct drm_i915_file_private *file_priv;
struct i915_hw_ppgtt *ppgtt;
+ struct pid *pid;
struct i915_ctx_hang_stats hang_stats;
@@ -893,9 +928,8 @@ struct i915_gem_context {
u32 ggtt_alignment;
struct intel_context {
- struct drm_i915_gem_object *state;
- struct intel_ringbuffer *ringbuf;
- struct i915_vma *lrc_vma;
+ struct i915_vma *state;
+ struct intel_ring *ring;
uint32_t *lrc_reg_state;
u64 lrc_desc;
int pin_count;
@@ -909,6 +943,7 @@ struct i915_gem_context {
struct list_head link;
u8 remap_slice;
+ bool closed:1;
};
enum fb_op_origin {
@@ -1062,13 +1097,6 @@ struct intel_gmbus {
struct i915_suspend_saved_registers {
u32 saveDSPARB;
- u32 saveLVDS;
- u32 savePP_ON_DELAYS;
- u32 savePP_OFF_DELAYS;
- u32 savePP_ON;
- u32 savePP_OFF;
- u32 savePP_CONTROL;
- u32 savePP_DIVISOR;
u32 saveFBC_CONTROL;
u32 saveCACHE_MODE_0;
u32 saveMI_ARB_STATE;
@@ -1157,6 +1185,7 @@ struct intel_gen6_power_mgmt {
bool interrupts_enabled;
u32 pm_iir;
+ /* PM interrupt bits that should never be masked */
u32 pm_intr_keep;
/* Frequencies are stored in potentially platform dependent multiples.
@@ -1174,6 +1203,7 @@ struct intel_gen6_power_mgmt {
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
u8 idle_freq; /* Frequency to request when we are idle */
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
@@ -1191,11 +1221,9 @@ struct intel_gen6_power_mgmt {
bool client_boost;
bool enabled;
- struct delayed_work delayed_resume_work;
+ struct delayed_work autoenable_work;
unsigned boosts;
- struct intel_rps_client semaphores, mmioflips;
-
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
@@ -1320,7 +1348,6 @@ struct i915_gem_mm {
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- bool shrinker_no_lock_stealing;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -1332,7 +1359,7 @@ struct i915_gem_mm {
bool interruptible;
/* the indicator for dispatch video commands on two BSD rings */
- unsigned int bsd_ring_dispatch_index;
+ atomic_t bsd_engine_dispatch_index;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
@@ -1380,9 +1407,10 @@ struct i915_gpu_error {
* State variable controlling the reset flow and count
*
* This is a counter which gets incremented when reset is triggered,
- * and again when reset has been handled. So odd values (lowest bit set)
- * means that reset is in progress and even values that
- * (reset_counter >> 1):th reset was successfully completed.
+ *
+ * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+ * meaning that any waiters holding onto the struct_mutex should
+ * relinquish the lock immediately in order for the reset to start.
*
* If reset is not completed succesfully, the I915_WEDGE bit is
* set meaning that hardware is terminally sour and there is no
@@ -1397,10 +1425,11 @@ struct i915_gpu_error {
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*/
- atomic_t reset_counter;
+ unsigned long reset_count;
-#define I915_RESET_IN_PROGRESS_FLAG 1
-#define I915_WEDGED (1 << 31)
+ unsigned long flags;
+#define I915_RESET_IN_PROGRESS 0
+#define I915_WEDGED (BITS_PER_LONG - 1)
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1671,7 +1700,7 @@ struct intel_pipe_crc {
};
struct i915_frontbuffer_tracking {
- struct mutex lock;
+ spinlock_t lock;
/*
* Tracking bits for delayed frontbuffer flushing du to gpu activity or
@@ -1706,18 +1735,6 @@ struct i915_virtual_gpu {
bool active;
};
-struct i915_execbuffer_params {
- struct drm_device *dev;
- struct drm_file *file;
- uint32_t dispatch_flags;
- uint32_t args_batch_start_offset;
- uint64_t batch_obj_vm_offset;
- struct intel_engine_cs *engine;
- struct drm_i915_gem_object *batch_obj;
- struct i915_gem_context *ctx;
- struct drm_i915_gem_request *request;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -1764,13 +1781,15 @@ struct drm_i915_private {
uint32_t psr_mmio_base;
+ uint32_t pps_mmio_base;
+
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
- struct drm_i915_gem_object *semaphore_obj;
- uint32_t last_seqno, next_seqno;
+ struct i915_vma *semaphore;
+ u32 next_seqno;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -1965,11 +1984,11 @@ struct drm_i915_private {
struct vlv_s0ix_state vlv_s0ix_state;
enum {
- I915_SKL_SAGV_UNKNOWN = 0,
- I915_SKL_SAGV_DISABLED,
- I915_SKL_SAGV_ENABLED,
- I915_SKL_SAGV_NOT_CONTROLLED
- } skl_sagv_status;
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } sagv_status;
struct {
/*
@@ -2025,12 +2044,8 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- int (*execbuf_submit)(struct i915_execbuffer_params *params,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas);
- int (*init_engines)(struct drm_device *dev);
+ void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
- void (*stop_engine)(struct intel_engine_cs *engine);
/**
* Is the GPU currently considered idle, or busy executing
@@ -2077,9 +2092,9 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
-static inline struct drm_i915_private *dev_to_i915(struct device *dev)
+static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return to_i915(dev_get_drvdata(dev));
+ return to_i915(dev_get_drvdata(kdev));
}
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2102,13 +2117,16 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
for_each_if (((id__) = (engine__)->id, \
intel_engine_initialized(engine__)))
+#define __mask_next_bit(mask) ({ \
+ int __idx = ffs(mask) - 1; \
+ mask &= ~BIT(__idx); \
+ __idx; \
+})
+
/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
- for ((engine__) = &(dev_priv__)->engine[0]; \
- (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
- (engine__)++) \
- for_each_if (((mask__) & intel_engine_flag(engine__)) && \
- intel_engine_initialized(engine__))
+#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
+ for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
+ tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2153,8 +2171,6 @@ struct drm_i915_gem_object_ops {
*/
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
- (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
@@ -2178,18 +2194,21 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
- struct list_head engine_list[I915_NUM_ENGINES];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
struct list_head batch_pool_link;
+ unsigned long flags;
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
- unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+ ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
/**
* This is set if the object has been written to since last bound
@@ -2198,37 +2217,11 @@ struct drm_i915_gem_object {
unsigned int dirty:1;
/**
- * Fence register bits (if any) for this object. Will be set
- * as needed when mapped into the GTT.
- * Protected by dev->struct_mutex.
- */
- signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
/**
- * Current tiling mode for the object.
- */
- unsigned int tiling_mode:2;
- /**
- * Whether the tiling parameters for the currently associated fence
- * register have changed. Note that for the purposes of tracking
- * tiling changes we also treat the unfenced register, the register
- * slot that the object occupies whilst it executes a fenced
- * command (such as BLT on gen2/3), as a "fence".
- */
- unsigned int fence_dirty:1;
-
- /**
- * Is the object at the current location in the gtt mappable and
- * fenceable? Used to avoid costly recalculations.
- */
- unsigned int map_and_fenceable:1;
-
- /**
* Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more
* accurate mappable working set.
@@ -2243,9 +2236,17 @@ struct drm_i915_gem_object {
unsigned int cache_level:3;
unsigned int cache_dirty:1;
- unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
+#define STRIDE_MASK (~TILING_MASK)
- unsigned int has_wc_mmap;
+ /** Count of VMA actually bound by this object */
+ unsigned int bind_count;
unsigned int pin_display;
struct sg_table *pages;
@@ -2265,14 +2266,9 @@ struct drm_i915_gem_object {
* requests on one ring where the write request is older than the
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
- * */
- struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
- struct drm_i915_gem_request *last_write_req;
- /** Breadcrumb of last fenced GPU access to the buffer. */
- struct drm_i915_gem_request *last_fenced_req;
-
- /** Current tiling stride for the object, if it's tiled. */
- uint32_t stride;
+ */
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_write;
/** References from framebuffers, locks out tiling changes. */
unsigned long framebuffer_references;
@@ -2280,23 +2276,70 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- union {
- /** for phy allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
- unsigned workers :4;
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+ unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
- };
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
};
-#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ return to_intel_bo(drm_gem_object_lookup(file, handle));
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_reference(&obj->base);
+ return obj;
+}
+
+__deprecated
+extern void drm_gem_object_reference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_unreference(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference(struct drm_gem_object *);
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_unreference_unlocked(&obj->base);
+}
+
+__deprecated
+extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
@@ -2304,6 +2347,67 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
}
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+ return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+ obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+ obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+ int engine)
+{
+ return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline unsigned int
+i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
+{
+ i915_gem_object_get(vma->obj);
+ return vma;
+}
+
+static inline void i915_vma_put(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ i915_gem_object_put(vma->obj);
+}
+
/*
* Optimised SGL iterator for GEM objects
*/
@@ -2374,171 +2478,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable sequence
- * number comparisons on buffer last_read|write_seqno. It also allows an
- * emission time to be associated with the request for tracking how far ahead
- * of the GPU the submission is.
- *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
- */
-struct drm_i915_gem_request {
- struct kref ref;
-
- /** On Which ring this request was generated */
- struct drm_i915_private *i915;
- struct intel_engine_cs *engine;
- struct intel_signal_node signaling;
-
- /** GEM sequence number associated with the previous request,
- * when the HWS breadcrumb is equal to this the GPU is processing
- * this request.
- */
- u32 previous_seqno;
-
- /** GEM sequence number associated with this request,
- * when the HWS breadcrumb is equal or greater than this the GPU
- * has finished processing this request.
- */
- u32 seqno;
-
- /** Position in the ringbuffer of the start of the request */
- u32 head;
-
- /**
- * Position in the ringbuffer of the start of the postfix.
- * This is required to calculate the maximum available ringbuffer
- * space without overwriting the postfix.
- */
- u32 postfix;
-
- /** Position in the ringbuffer of the end of the whole request */
- u32 tail;
-
- /** Preallocate space in the ringbuffer for the emitting the request */
- u32 reserved_space;
-
- /**
- * Context and ring buffer related to this request
- * Contexts are refcounted, so when this request is associated with a
- * context, we must increment the context's refcount, to guarantee that
- * it persists while any request is linked to it. Requests themselves
- * are also refcounted, so the request will only be freed when the last
- * reference to it is dismissed, and the code in
- * i915_gem_request_free() will then decrement the refcount on the
- * context.
- */
- struct i915_gem_context *ctx;
- struct intel_ringbuffer *ringbuf;
-
- /**
- * Context related to the previous request.
- * As the contexts are accessed by the hardware until the switch is
- * completed to a new context, the hardware may still be writing
- * to the context object after the breadcrumb is visible. We must
- * not unpin/unbind/prune that object whilst still active and so
- * we keep the previous context pinned until the following (this)
- * request is retired.
- */
- struct i915_gem_context *previous_context;
-
- /** Batch buffer related to this request if any (used for
- error state dump only) */
- struct drm_i915_gem_object *batch_obj;
-
- /** Time at which this request was emitted, in jiffies. */
- unsigned long emitted_jiffies;
-
- /** global list entry for this request */
- struct list_head list;
-
- struct drm_i915_file_private *file_priv;
- /** file_priv list entry for this request */
- struct list_head client_list;
-
- /** process identifier submitting this request */
- struct pid *pid;
-
- /**
- * The ELSP only accepts two elements at a time, so we queue
- * context/tail pairs on a given queue (ring->execlist_queue) until the
- * hardware is available. The queue serves a double purpose: we also use
- * it to keep track of the up to 2 contexts currently in the hardware
- * (usually one in execution and the other queued up by the GPU): We
- * only remove elements from the head of the queue when the hardware
- * informs us that an element has been completed.
- *
- * All accesses to the queue are mediated by a spinlock
- * (ring->execlist_lock).
- */
-
- /** Execlist link in the submission queue.*/
- struct list_head execlist_link;
-
- /** Execlists no. of times this request has been sent to the ELSP */
- int elsp_submitted;
-
- /** Execlists context hardware id. */
- unsigned ctx_hw_id;
-};
-
-struct drm_i915_gem_request * __must_check
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-void i915_gem_request_free(struct kref *req_ref);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
- struct drm_file *file);
-
-static inline uint32_t
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
- return req ? req->seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_engine(struct drm_i915_gem_request *req)
-{
- return req ? req->engine : NULL;
-}
-
-static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
-{
- if (req)
- kref_get(&req->ref);
- return req;
-}
-
-static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
-{
- kref_put(&req->ref, i915_gem_request_free);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
- struct drm_i915_gem_request *src)
-{
- if (src)
- i915_gem_request_reference(src);
-
- if (*pdst)
- i915_gem_request_unreference(*pdst);
-
- *pdst = src;
-}
-
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
-
/*
* A command that requires special handling by the command parser.
*/
@@ -2626,8 +2565,9 @@ struct drm_i915_cmd_descriptor {
/*
* A table of commands requiring special handling by the command parser.
*
- * Each ring has an array of tables. Each table consists of an array of command
- * descriptors, which must be sorted with command opcodes in ascending order.
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
*/
struct drm_i915_cmd_table {
const struct drm_i915_cmd_descriptor *table;
@@ -2645,7 +2585,7 @@ struct drm_i915_cmd_table {
BUILD_BUG(); \
__p; \
})
-#define INTEL_INFO(p) (&__I915__(p)->info)
+#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
@@ -2812,10 +2752,10 @@ struct drm_i915_cmd_table {
#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
HAS_EDRAM(dev))
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts)
+#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts)
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
@@ -2839,7 +2779,7 @@ struct drm_i915_cmd_table {
* interrupt source and so prevents the other device from working properly.
*/
#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -2855,38 +2795,27 @@ struct drm_i915_cmd_table {
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
-#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
- INTEL_INFO(dev)->gen >= 9)
+#define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst)
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
-#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
- IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
- IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
- IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
- IS_KABYLAKE(dev) || IS_BROXTON(dev))
-#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
-
-#define HAS_CSR(dev) (IS_GEN9(dev))
+#define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr)
+#define HAS_RUNTIME_PM(dev) (INTEL_INFO(dev)->has_runtime_pm)
+#define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+#define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p)
+
+#define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev) (IS_GEN9(dev))
+#define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc)
#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
-#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
- INTEL_INFO(dev)->gen >= 8)
-
-#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \
- !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
- !IS_BROXTON(dev))
+#define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer)
#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
@@ -2914,11 +2843,10 @@ struct drm_i915_cmd_table {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
#define GT_FREQUENCY_MULTIPLIER 50
@@ -2939,7 +2867,9 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt);
+ int enable_ppgtt);
+
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
/* i915_drv.c */
void __printf(3, 4)
@@ -2953,9 +2883,14 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
+extern const struct dev_pm_ops i915_pm_ops;
+
+extern int i915_driver_load(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern int i915_reset(struct drm_i915_private *dev_priv);
+extern void i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -3116,11 +3051,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
- struct drm_i915_gem_request *req);
-int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -3149,6 +3079,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_device *dev);
@@ -3159,47 +3090,28 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
-void i915_gem_vma_destroy(struct i915_vma *vma);
-
-/* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE (1<<0)
-#define PIN_NONBLOCK (1<<1)
-#define PIN_GLOBAL (1<<2)
-#define PIN_OFFSET_BIAS (1<<3)
-#define PIN_USER (1<<4)
-#define PIN_UPDATE (1<<5)
-#define PIN_ZONE_4G (1<<6)
-#define PIN_HIGH (1<<7)
-#define PIN_OFFSET_FIXED (1<<8)
-#define PIN_OFFSET_MASK (~4095)
-int __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags);
-int __must_check
+
+struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
- uint32_t alignment,
- uint64_t flags);
+ u64 size,
+ u64 alignment,
+ u64 flags);
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
-/*
- * BEWARE: Do not use the function below unless you can _absolutely_
- * _guarantee_ VMA in question is _not in use_ anywhere.
- */
-int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- int *needs_clflush);
-
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __sg_page_count(struct scatterlist *sg)
@@ -3259,13 +3171,20 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
obj->pages_pin_count--;
}
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+};
+
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj - the object to map into kernel address space
+ * @type - the type of mapping, used to select pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space.
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
*
* The caller must hold the struct_mutex, and is responsible for calling
* i915_gem_object_unpin_map() when the mapping is no longer required.
@@ -3273,7 +3192,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
*/
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
/**
* i915_gem_object_unpin_map - releases an earlier mapping
@@ -3292,122 +3212,73 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE 0x1
+#define CLFLUSH_AFTER 0x2
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to,
- struct drm_i915_gem_request **to_req);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req);
+ struct drm_i915_gem_request *req,
+ unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+int i915_gem_mmap_gtt_version(void);
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits);
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
- return (int32_t)(seq1 - seq2) >= 0;
-}
-
-static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->previous_seqno);
-}
-
-static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
-{
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->seqno);
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
- int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
- int state, unsigned long timeout_us)
-{
- return (i915_gem_request_started(request) &&
- __i915_spin_request(request, state, timeout_us));
-}
-
-int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
-
-static inline u32 i915_reset_counter(struct i915_gpu_error *error)
-{
- return atomic_read(&error->reset_counter);
-}
-
-static inline bool __i915_reset_in_progress(u32 reset)
-{
- return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
-}
-
-static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
-{
- return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
-}
-
-static inline bool __i915_terminally_wedged(u32 reset)
-{
- return unlikely(reset & I915_WEDGED);
-}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return __i915_reset_in_progress(i915_reset_counter(error));
+ return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
}
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
+ return unlikely(test_bit(I915_WEDGED, &error->flags));
}
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
{
- return __i915_terminally_wedged(i915_reset_counter(error));
+ return i915_reset_in_progress(error) | i915_terminally_wedged(error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
+ return READ_ONCE(error->reset_count);
}
-void i915_gem_reset(struct drm_device *dev);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+ unsigned int flags);
int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct drm_i915_gem_request *req,
- struct drm_i915_gem_object *batch_obj,
- bool flush_caches);
-#define i915_add_request(req) \
- __i915_add_request(req, NULL, true)
-#define i915_add_request_no_flush(req) \
- __i915_add_request(req, NULL, false)
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+void i915_gem_resume(struct drm_device *dev);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -3417,22 +3288,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
+struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
- int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
+ int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+ int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -3443,86 +3312,82 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view);
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
-static inline u64
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
-{
- return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view);
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
-
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm);
-struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
-
-static inline struct i915_vma *
-i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
- return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
-}
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
-/* Some GGTT VM helpers */
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
}
+static inline struct i915_vma *
+i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
+}
-static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+static inline unsigned long
+i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view)
{
- return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
+ return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
}
-unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
+/* i915_gem_fence.c */
+int __must_check i915_vma_get_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
-static inline int __must_check
-i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
- uint32_t alignment,
- unsigned flags)
+/**
+ * i915_vma_pin_fence - pin fencing state
+ * @vma: vma to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * vma (and its object) is ready to be used as a scanout target. Fencing
+ * status must be synchronize first by calling i915_vma_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_vma_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the vma has a fence, false otherwise.
+ */
+static inline bool
+i915_vma_pin_fence(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- return i915_gem_object_pin(obj, &ggtt->base,
- alignment, flags | PIN_GLOBAL);
+ if (vma->fence) {
+ vma->fence->pin_count++;
+ return true;
+ } else
+ return false;
}
-void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+/**
+ * i915_vma_unpin_fence - unpin fencing state
+ * @vma: vma to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_vma_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
static inline void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_vma_unpin_fence(struct i915_vma *vma)
{
- i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+ if (vma->fence) {
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
+ }
}
-/* i915_gem_fence.c */
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
-
void i915_gem_restore_fences(struct drm_device *dev);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -3533,10 +3398,10 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev);
-void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
@@ -3557,12 +3422,14 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
return ctx;
}
-static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
+static inline struct i915_gem_context *
+i915_gem_context_get(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
+ return ctx;
}
-static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
+static inline void i915_gem_context_put(struct i915_gem_context *ctx)
{
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
kref_put(&ctx->ref, i915_gem_context_free);
@@ -3585,13 +3452,10 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
- struct i915_address_space *vm,
- int min_size,
- unsigned alignment,
+int __must_check i915_gem_evict_something(struct i915_address_space *vm,
+ u64 min_size, u64 alignment,
unsigned cache_level,
- unsigned long start,
- unsigned long end,
+ u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
@@ -3644,28 +3508,21 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj->tiling_mode != I915_TILING_NONE;
+ i915_gem_object_is_tiled(obj);
}
-/* i915_gem_debug.c */
-#if WATCH_LISTS
-int i915_verify_lists(struct drm_device *dev);
-#else
-#define i915_verify_lists(dev) 0
-#endif
-
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
int i915_debugfs_connector_add(struct drm_connector *connector);
-void intel_display_crc_init(struct drm_device *dev);
+void intel_display_crc_init(struct drm_i915_private *dev_priv);
#else
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
-static inline void intel_display_crc_init(struct drm_device *dev) {}
+static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
#endif
/* i915_gpu_error.c */
@@ -3694,23 +3551,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
-bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
-int i915_parse_cmds(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool is_master);
+void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
+void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
+bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *batch_obj,
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u32 batch_start_offset,
+ u32 batch_len,
+ bool is_master);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
/* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_device *dev_priv);
-void i915_teardown_sysfs(struct drm_device *dev_priv);
+void i915_setup_sysfs(struct drm_i915_private *dev_priv);
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
@@ -3810,7 +3667,6 @@ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
-extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3888,9 +3744,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
* will be implemented using 2 32-bit writes in an arbitrary order with
* an arbitrary delay between them. This can cause the hardware to
* act upon the intermediate value, possibly leading to corruption and
- * machine death. You have been warned.
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * dev_priv->uncore.funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actualy support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
*/
-#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
@@ -3933,7 +3796,7 @@ __raw_write(64, q)
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside
- * criticial sections inside IRQ handlers where forcewake is explicitly
+ * critical sections inside IRQ handlers where forcewake is explicitly
* controlled.
* Think twice, and think again, before using these.
* Note: Should only be used between intel_uncore_forcewake_irqlock() and
@@ -4005,7 +3868,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
schedule_timeout_uninterruptible(remaining_jiffies);
}
}
-static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
+
+static inline bool
+__i915_request_irq_complete(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
@@ -4027,7 +3892,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
* is woken.
*/
if (engine->irq_seqno_barrier &&
- READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+ rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
struct task_struct *tsk;
@@ -4052,7 +3917,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
* irq_posted == false but we are still running).
*/
rcu_read_lock();
- tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
if (tsk && tsk != current)
/* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will
@@ -4067,18 +3932,35 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
return true;
}
- /* We need to check whether any gpu reset happened in between
- * the request being submitted and now. If a reset has occurred,
- * the seqno will have been advance past ours and our request
- * is complete. If we are in the process of handling a reset,
- * the request is effectively complete as the rendering will
- * be discarded, but we need to return in order to drop the
- * struct_mutex.
- */
- if (i915_reset_in_progress(&req->i915->gpu_error))
- return true;
-
return false;
}
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+
+/* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap);
+
+#define ptr_mask_bits(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_unpack_bits(ptr, bits) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (bits) = __v & ~PAGE_MASK; \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_pack_bits(ptr, bits) \
+ ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a77ce9983f69..91ab7e9d6d2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,10 +29,13 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
@@ -41,10 +44,6 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
@@ -139,7 +138,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -156,10 +154,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
- if (vma->pin_count)
+ if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
- if (vma->pin_count)
+ if (i915_vma_is_pinned(vma))
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
@@ -281,23 +279,129 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys,
};
-static int
-drop_pages(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct i915_vma *vma, *next;
+ struct i915_vma *vma;
+ LIST_HEAD(still_in_list);
int ret;
- drm_gem_object_reference(&obj->base);
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
- if (i915_vma_unbind(vma))
- break;
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- ret = i915_gem_object_put_pages(obj);
- drm_gem_object_unreference(&obj->base);
+ /* Closed vma are removed from the obj->vma_list - but they may
+ * still have an active binding on the object. To remove those we
+ * must wait for all rendering to complete to the object (as unbinding
+ * must anyway), and retire the requests.
+ */
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(to_i915(obj->base.dev));
+
+ while ((vma = list_first_entry_or_null(&obj->vma_list,
+ struct i915_vma,
+ obj_link))) {
+ list_move_tail(&vma->obj_link, &still_in_list);
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ break;
+ }
+ list_splice(&still_in_list, &obj->vma_list);
return ret;
}
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for just read access or read-write access
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly)
+{
+ struct reservation_object *resv;
+ struct i915_gem_active *active;
+ unsigned long active_mask;
+ int idx;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!readonly) {
+ active = obj->last_read;
+ active_mask = i915_gem_object_get_active(obj);
+ } else {
+ active_mask = 1;
+ active = &obj->last_write;
+ }
+
+ for_each_active(active_mask, idx) {
+ int ret;
+
+ ret = i915_gem_active_wait(&active[idx],
+ &obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+ }
+
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv) {
+ long err;
+
+ err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/* A nonblocking variant of the above wait. Must be called prior to
+ * acquiring the mutex for the object, as the object state may change
+ * during this call. A reference must be held by the caller for the object.
+ */
+static __must_check int
+__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
+ struct intel_rps_client *rps,
+ bool readonly)
+{
+ struct i915_gem_active *active;
+ unsigned long active_mask;
+ int idx;
+
+ active_mask = __I915_BO_ACTIVE(obj);
+ if (!active_mask)
+ return 0;
+
+ if (!readonly) {
+ active = obj->last_read;
+ } else {
+ active_mask = 1;
+ active = &obj->last_write;
+ }
+
+ for_each_active(active_mask, idx) {
+ int ret;
+
+ ret = i915_gem_active_wait_unlocked(&active[idx],
+ I915_WAIT_INTERRUPTIBLE,
+ NULL, rps);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+
+ return &fpriv->rps;
+}
+
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
@@ -318,7 +422,11 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->base.filp == NULL)
return -EINVAL;
- ret = drop_pages(obj);
+ ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_put_pages(obj);
if (ret)
return ret;
@@ -408,7 +516,7 @@ i915_gem_create(struct drm_file *file,
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
if (ret)
return ret;
@@ -502,33 +610,106 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
* flush the object from the CPU cache.
*/
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- int *needs_clflush)
+ unsigned int *needs_clflush)
{
int ret;
*needs_clflush = 0;
- if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
- return -EINVAL;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_pin_pages(obj);
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens. */
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens.
+ */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
- ret = i915_gem_object_wait_rendering(obj, true);
+
+ if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
- return ret;
+ goto err_unpin;
+
+ *needs_clflush = 0;
}
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+}
+
+int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_wait_rendering(obj, false);
+ if (ret)
+ return ret;
+
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ /* If we're not in the cpu write domain, set ourself into the
+ * gtt write domain and manually flush cachelines (as required).
+ * This optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway.
+ */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+ *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+
+ /* Same trick applies to invalidate partially written cachelines read
+ * before writing.
+ */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
+ *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
+ obj->cache_level);
+
+ if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
+ *needs_clflush = 0;
+ }
+
+ if ((*needs_clflush & CLFLUSH_AFTER) == 0)
+ obj->cache_dirty = true;
+
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ obj->dirty = 1;
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
return ret;
}
@@ -638,14 +819,24 @@ i915_gem_gtt_pread(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma;
struct drm_mm_node node;
char __user *user_data;
uint64_t remain;
uint64_t offset;
int ret;
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
- if (ret) {
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (!IS_ERR(vma)) {
+ node.start = i915_ggtt_offset(vma);
+ node.allocated = false;
+ ret = i915_vma_put_fence(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ vma = ERR_PTR(ret);
+ }
+ }
+ if (IS_ERR(vma)) {
ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
if (ret)
goto out;
@@ -657,12 +848,6 @@ i915_gem_gtt_pread(struct drm_device *dev,
}
i915_gem_object_pin_pages(obj);
- } else {
- node.start = i915_gem_obj_ggtt_offset(obj);
- node.allocated = false;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
}
ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -675,7 +860,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_writeable(user_data, remain);
+ ret = fault_in_pages_writeable(user_data, remain);
if (ret) {
mutex_lock(&dev->struct_mutex);
goto out_unpin;
@@ -707,7 +892,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
* and write to user memory which may result into page
* faults, and so we cannot perform this under struct_mutex.
*/
- if (slow_user_access(ggtt->mappable, page_base,
+ if (slow_user_access(&ggtt->mappable, page_base,
page_offset, user_data,
page_length, false)) {
ret = -EFAULT;
@@ -739,7 +924,7 @@ out_unpin:
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
- i915_gem_object_ggtt_unpin(obj);
+ i915_vma_unpin(vma);
}
out:
return ret;
@@ -760,19 +945,14 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
- if (!i915_gem_object_has_struct_page(obj))
- return -ENODEV;
-
- user_data = u64_to_user_ptr(args->data_ptr);
- remain = args->size;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
+ remain = args->size;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
offset >> PAGE_SHIFT) {
@@ -803,7 +983,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
if (likely(!i915.prefault_disable) && !prefaulted) {
- ret = fault_in_multipages_writeable(user_data, remain);
+ ret = fault_in_pages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
* data up to the first fault. Hence ignore any errors
@@ -828,7 +1008,7 @@ next_page:
}
out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -857,25 +1037,27 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
/* Bounds check source. */
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto out;
+ goto err;
}
trace_i915_gem_object_pread(obj, args->offset, args->size);
+ ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
+ if (ret)
+ goto err;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err;
+
ret = i915_gem_shmem_pread(dev, obj, args, file);
/* pread for non shmem backed objects */
@@ -886,10 +1068,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
intel_runtime_pm_put(to_i915(dev));
}
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+
+err:
+ i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -919,7 +1104,7 @@ fast_user_write(struct io_mapping *mapping,
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
- * @dev: drm device pointer
+ * @i915: i915 device private data
* @obj: i915 gem object
* @args: pwrite arguments structure
* @file: drm file pointer
@@ -932,17 +1117,28 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
{
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_device *dev = obj->base.dev;
+ struct i915_vma *vma;
struct drm_mm_node node;
uint64_t remain, offset;
char __user *user_data;
int ret;
bool hit_slow_path = false;
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (i915_gem_object_is_tiled(obj))
return -EFAULT;
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
- if (ret) {
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
+ if (!IS_ERR(vma)) {
+ node.start = i915_ggtt_offset(vma);
+ node.allocated = false;
+ ret = i915_vma_put_fence(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ vma = ERR_PTR(ret);
+ }
+ }
+ if (IS_ERR(vma)) {
ret = insert_mappable_node(i915, &node, PAGE_SIZE);
if (ret)
goto out;
@@ -954,19 +1150,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
}
i915_gem_object_pin_pages(obj);
- } else {
- node.start = i915_gem_obj_ggtt_offset(obj);
- node.allocated = false;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
}
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
- intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
@@ -998,11 +1188,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (fast_user_write(ggtt->mappable, page_base,
+ if (fast_user_write(&ggtt->mappable, page_base,
page_offset, user_data, page_length)) {
hit_slow_path = true;
mutex_unlock(&dev->struct_mutex);
- if (slow_user_access(ggtt->mappable,
+ if (slow_user_access(&ggtt->mappable,
page_base,
page_offset, user_data,
page_length, true)) {
@@ -1033,7 +1223,7 @@ out_flush:
}
}
- intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
out_unpin:
if (node.allocated) {
wmb();
@@ -1043,7 +1233,7 @@ out_unpin:
i915_gem_object_unpin_pages(obj);
remove_mappable_node(&node);
} else {
- i915_gem_object_ggtt_unpin(obj);
+ i915_vma_unpin(vma);
}
out:
return ret;
@@ -1126,41 +1316,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
- int needs_clflush_after = 0;
- int needs_clflush_before = 0;
+ unsigned int needs_clflush;
struct sg_page_iter sg_iter;
- user_data = u64_to_user_ptr(args->data_ptr);
- remain = args->size;
-
- obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- /* If we're not in the cpu write domain, set ourself into the gtt
- * write domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will use the data
- * right away and we therefore have to clflush anyway. */
- needs_clflush_after = cpu_write_needs_clflush(obj);
- ret = i915_gem_object_wait_rendering(obj, false);
- if (ret)
- return ret;
- }
- /* Same trick applies to invalidate partially written cachelines read
- * before writing. */
- if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- needs_clflush_before =
- !cpu_cache_is_coherent(dev, obj->cache_level);
-
- ret = i915_gem_object_get_pages(obj);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
if (ret)
return ret;
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-
- i915_gem_object_pin_pages(obj);
-
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
- obj->dirty = 1;
+ remain = args->size;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
offset >> PAGE_SHIFT) {
@@ -1184,7 +1350,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch. */
- partial_cacheline_write = needs_clflush_before &&
+ partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
@@ -1194,7 +1360,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
- needs_clflush_after);
+ needs_clflush & CLFLUSH_AFTER);
if (ret == 0)
goto next_page;
@@ -1203,7 +1369,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
- needs_clflush_after);
+ needs_clflush & CLFLUSH_AFTER);
mutex_lock(&dev->struct_mutex);
@@ -1217,7 +1383,7 @@ next_page:
}
out:
- i915_gem_object_unpin_pages(obj);
+ i915_gem_obj_finish_shmem_access(obj);
if (hit_slowpath) {
/*
@@ -1225,17 +1391,15 @@ out:
* cachelines in-line while writing and the object moved
* out of the cpu write domain while we've dropped the lock.
*/
- if (!needs_clflush_after &&
+ if (!(needs_clflush & CLFLUSH_AFTER) &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
if (i915_gem_clflush_object(obj, obj->pin_display))
- needs_clflush_after = true;
+ needs_clflush |= CLFLUSH_AFTER;
}
}
- if (needs_clflush_after)
+ if (needs_clflush & CLFLUSH_AFTER)
i915_gem_chipset_flush(to_i915(dev));
- else
- obj->cache_dirty = true;
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
@@ -1267,33 +1431,35 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
+ ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
}
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto put_rpm;
-
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
/* Bounds check destination. */
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
- goto out;
+ goto err;
}
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err_rpm;
+
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
@@ -1312,505 +1478,28 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
- else if (i915_gem_object_has_struct_page(obj))
- ret = i915_gem_shmem_pwrite(dev, obj, args, file);
else
- ret = -ENODEV;
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
}
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
-put_rpm:
intel_runtime_pm_put(dev_priv);
return ret;
-}
-
-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
- if (__i915_terminally_wedged(reset_counter))
- return -EIO;
-
- if (__i915_reset_in_progress(reset_counter)) {
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these. */
- if (!interruptible)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static unsigned long local_clock_us(unsigned *cpu)
-{
- unsigned long t;
-
- /* Cheaply and approximately convert from nanoseconds to microseconds.
- * The result and subsequent calculations are also defined in the same
- * approximate microseconds units. The principal source of timing
- * error here is from the simple truncation.
- *
- * Note that local_clock() is only defined wrt to the current CPU;
- * the comparisons are no longer valid if we switch CPUs. Instead of
- * blocking preemption for the entire busywait, we can detect the CPU
- * switch and use that as indicator of system load and a reason to
- * stop busywaiting, see busywait_stop().
- */
- *cpu = get_cpu();
- t = local_clock() >> 10;
- put_cpu();
-
- return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
- unsigned this_cpu;
-
- if (time_after(local_clock_us(&this_cpu), timeout))
- return true;
-
- return this_cpu != cpu;
-}
-
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
- int state, unsigned long timeout_us)
-{
- unsigned cpu;
-
- /* When waiting for high frequency requests, e.g. during synchronous
- * rendering split between the CPU and GPU, the finite amount of time
- * required to set up the irq and wait upon it limits the response
- * rate. By busywaiting on the request completion for a short while we
- * can service the high frequency waits as quick as possible. However,
- * if it is a slow request, we want to sleep as quickly as possible.
- * The tradeoff between waiting and sleeping is roughly the time it
- * takes to sleep on a request, on the order of a microsecond.
- */
-
- timeout_us += local_clock_us(&cpu);
- do {
- if (i915_gem_request_completed(req))
- return true;
-
- if (signal_pending_state(state, current))
- break;
-
- if (busywait_stop(timeout_us, cpu))
- break;
-
- cpu_relax_lowlatency();
- } while (!need_resched());
-
- return false;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- * @rps: RPS client
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
-{
- int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- DEFINE_WAIT(reset);
- struct intel_wait wait;
- unsigned long timeout_remain;
- s64 before = 0; /* Only to silence a compiler warning. */
- int ret = 0;
-
- might_sleep();
-
- if (list_empty(&req->list))
- return 0;
-
- if (i915_gem_request_completed(req))
- return 0;
-
- timeout_remain = MAX_SCHEDULE_TIMEOUT;
- if (timeout) {
- if (WARN_ON(*timeout < 0))
- return -EINVAL;
-
- if (*timeout == 0)
- return -ETIME;
-
- timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-
- /*
- * Record current time in case interrupted by signal, or wedged.
- */
- before = ktime_get_raw_ns();
- }
-
- trace_i915_gem_request_wait_begin(req);
-
- /* This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (INTEL_INFO(req->i915)->gen >= 6)
- gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
-
- /* Optimistic spin for the next ~jiffie before touching IRQs */
- if (i915_spin_request(req, state, 5))
- goto complete;
-
- set_current_state(state);
- add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
- intel_wait_init(&wait, req->seqno);
- if (intel_engine_add_wait(req->engine, &wait))
- /* In order to check that we haven't missed the interrupt
- * as we enabled it, we need to kick ourselves to do a
- * coherent check on the seqno before we sleep.
- */
- goto wakeup;
-
- for (;;) {
- if (signal_pending_state(state, current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- timeout_remain = io_schedule_timeout(timeout_remain);
- if (timeout_remain == 0) {
- ret = -ETIME;
- break;
- }
-
- if (intel_wait_complete(&wait))
- break;
-
- set_current_state(state);
-
-wakeup:
- /* Carefully check if the request is complete, giving time
- * for the seqno to be visible following the interrupt.
- * We also have to check in case we are kicked by the GPU
- * reset in order to drop the struct_mutex.
- */
- if (__i915_request_irq_complete(req))
- break;
-
- /* Only spin if we know the GPU is processing this request */
- if (i915_spin_request(req, state, 2))
- break;
- }
- remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
- intel_engine_remove_wait(req->engine, &wait);
- __set_current_state(TASK_RUNNING);
-complete:
- trace_i915_gem_request_wait_end(req);
-
- if (timeout) {
- s64 tres = *timeout - (ktime_get_raw_ns() - before);
-
- *timeout = tres < 0 ? 0 : tres;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regrssion from the timespec->ktime conversion.
- */
- if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
- *timeout = 0;
- }
-
- if (rps && req->seqno == req->engine->last_submitted_seqno) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&req->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&req->i915->rps.client_lock);
- }
-
- return ret;
-}
-
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
- struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv;
-
- WARN_ON(!req || !file || req->file_priv);
-
- if (!req || !file)
- return -EINVAL;
-
- if (req->file_priv)
- return -EINVAL;
-
- file_priv = file->driver_priv;
-
- spin_lock(&file_priv->mm.lock);
- req->file_priv = file_priv;
- list_add_tail(&req->client_list, &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-
- req->pid = get_pid(task_pid(current));
-
- return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
- struct drm_i915_file_private *file_priv = request->file_priv;
-
- if (!file_priv)
- return;
-
- spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
- spin_unlock(&file_priv->mm.lock);
-
- put_pid(request->pid);
- request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
- trace_i915_gem_request_retire(request);
-
- /* We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- *
- * Note this requires that we are always called in request
- * completion order.
- */
- request->ringbuf->last_retired_head = request->postfix;
-
- list_del_init(&request->list);
- i915_gem_request_remove_from_client(request);
-
- if (request->previous_context) {
- if (i915.enable_execlists)
- intel_lr_context_unpin(request->previous_context,
- request->engine);
- }
-
- i915_gem_context_unreference(request->ctx);
- i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *tmp;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- if (list_empty(&req->list))
- return;
-
- do {
- tmp = list_first_entry(&engine->request_list,
- typeof(*tmp), list);
-
- i915_gem_request_retire(tmp);
- } while (tmp != req);
-
- WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- * @req: request to wait on
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
- struct drm_i915_private *dev_priv = req->i915;
- bool interruptible;
- int ret;
-
- interruptible = dev_priv->mm.interruptible;
-
- BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
-
- ret = __i915_wait_request(req, interruptible, NULL, NULL);
- if (ret)
- return ret;
-
- /* If the GPU hung, we want to keep the requests to find the guilty. */
- if (!i915_reset_in_progress(&dev_priv->gpu_error))
- __i915_gem_request_retire__upto(req);
-
- return 0;
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for read access or write
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly)
-{
- int ret, i;
-
- if (!obj->active)
- return 0;
-
- if (readonly) {
- if (obj->last_write_req != NULL) {
- ret = i915_wait_request(obj->last_write_req);
- if (ret)
- return ret;
-
- i = obj->last_write_req->engine->id;
- if (obj->last_read_req[i] == obj->last_write_req)
- i915_gem_object_retire__read(obj, i);
- else
- i915_gem_object_retire__write(obj);
- }
- } else {
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (obj->last_read_req[i] == NULL)
- continue;
-
- ret = i915_wait_request(obj->last_read_req[i]);
- if (ret)
- return ret;
-
- i915_gem_object_retire__read(obj, i);
- }
- GEM_BUG_ON(obj->active);
- }
-
- return 0;
-}
-
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req)
-{
- int ring = req->engine->id;
-
- if (obj->last_read_req[ring] == req)
- i915_gem_object_retire__read(obj, ring);
- else if (obj->last_write_req == req)
- i915_gem_object_retire__write(obj);
-
- if (!i915_reset_in_progress(&req->i915->gpu_error))
- __i915_gem_request_retire__upto(req);
-}
-
-/* A nonblocking variant of the above wait. This is a highly dangerous routine
- * as the object state may change during this call.
- */
-static __must_check int
-i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
- struct intel_rps_client *rps,
- bool readonly)
-{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
- int ret, i, n = 0;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!dev_priv->mm.interruptible);
-
- if (!obj->active)
- return 0;
-
- if (readonly) {
- struct drm_i915_gem_request *req;
-
- req = obj->last_write_req;
- if (req == NULL)
- return 0;
-
- requests[n++] = i915_gem_request_reference(req);
- } else {
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct drm_i915_gem_request *req;
-
- req = obj->last_read_req[i];
- if (req == NULL)
- continue;
-
- requests[n++] = i915_gem_request_reference(req);
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
- ret = 0;
- for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], true, NULL, rps);
- mutex_lock(&dev->struct_mutex);
-
- for (i = 0; i < n; i++) {
- if (ret == 0)
- i915_gem_object_retire_request(obj, requests[i]);
- i915_gem_request_unreference(requests[i]);
- }
+err_rpm:
+ intel_runtime_pm_put(dev_priv);
+err:
+ i915_gem_object_put_unlocked(obj);
return ret;
}
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
- struct drm_i915_file_private *fpriv = file->driver_priv;
- return &fpriv->rps;
-}
-
-static enum fb_op_origin
+static inline enum fb_op_origin
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
- return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
- ORIGIN_GTT : ORIGIN_CPU;
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
/**
@@ -1831,10 +1520,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
int ret;
/* Only handle setting domains to types used by the CPU. */
- if (write_domain & I915_GEM_GPU_DOMAINS)
- return -EINVAL;
-
- if (read_domains & I915_GEM_GPU_DOMAINS)
+ if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
@@ -1843,25 +1529,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
- ret = i915_gem_object_wait_rendering__nonblocking(obj,
- to_rps_client(file),
- !write_domain);
+ ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
if (ret)
- goto unref;
+ goto err;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err;
if (read_domains & I915_GEM_DOMAIN_GTT)
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
@@ -1871,11 +1553,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (write_domain != 0)
intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
-unref:
- drm_gem_object_unreference(&obj->base);
-unlock:
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
+
+err:
+ i915_gem_object_put_unlocked(obj);
+ return ret;
}
/**
@@ -1890,26 +1574,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_i915_gem_object *obj;
- int ret = 0;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ int err = 0;
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
/* Pinned buffers may be scanout, so flush the cache */
- if (obj->pin_display)
- i915_gem_object_flush_cpu_write_domain(obj);
+ if (READ_ONCE(obj->pin_display)) {
+ err = i915_mutex_lock_interruptible(dev);
+ if (!err) {
+ i915_gem_object_flush_cpu_write_domain(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ }
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ i915_gem_object_put_unlocked(obj);
+ return err;
}
/**
@@ -1937,7 +1618,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
unsigned long addr;
if (args->flags & ~(I915_MMAP_WC))
@@ -1946,19 +1627,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
return -ENODEV;
- obj = drm_gem_object_lookup(file, args->handle);
- if (obj == NULL)
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
return -ENOENT;
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
- if (!obj->filp) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!obj->base.filp) {
+ i915_gem_object_put_unlocked(obj);
return -EINVAL;
}
- addr = vm_mmap(obj->filp, 0, args->size,
+ addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
if (args->flags & I915_MMAP_WC) {
@@ -1966,7 +1647,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct vm_area_struct *vma;
if (down_write_killable(&mm->mmap_sem)) {
- drm_gem_object_unreference_unlocked(obj);
+ i915_gem_object_put_unlocked(obj);
return -EINTR;
}
vma = find_vma(mm, addr);
@@ -1978,9 +1659,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
up_write(&mm->mmap_sem);
/* This may race, but that's ok, it only gets set */
- WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
+ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
- drm_gem_object_unreference_unlocked(obj);
+ i915_gem_object_put_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1989,9 +1670,69 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
+{
+ u64 size;
+
+ size = i915_gem_object_get_stride(obj);
+ size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
+
+ return size >> PAGE_SHIFT;
+}
+
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ * aligned and suitable for fencing, and still fit into the available
+ * mappable space left by the pinned display objects. A classic problem
+ * we called the page-fault-of-doom where we would ping-pong between
+ * two objects that could not fit inside the GTT and so the memcpy
+ * would page one object in at the expense of the other between every
+ * single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ * object is too large for the available space (or simply too large
+ * for the mappable aperture!), a view is created instead and faulted
+ * into userspace. (This view is aligned and sized appropriately for
+ * fenced access.)
+ *
+ * Restrictions:
+ *
+ * * snoopable objects cannot be accessed via the GTT. It can cause machine
+ * hangs on some architectures, corruption on others. An attempt to service
+ * a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ * * the object must be able to fit into RAM (physical memory, though no
+ * limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ * * a new GTT page fault will synchronize rendering from the GPU and flush
+ * all data to system memory. Subsequent access will not be synchronized.
+ *
+ * * all mappings are revoked on runtime device suspend.
+ *
+ * * there are only 8, 16 or 32 fence registers to share between all users
+ * (older machines require fence register for display and blitter access
+ * as well). Contention of the fence registers will cause the previous users
+ * to be unmapped and any new access will generate new page faults.
+ *
+ * * running out of memory while servicing a fault may generate a SIGBUS,
+ * rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+ return 1;
+}
+
/**
* i915_gem_fault - fault a page into the GTT
- * @vma: VMA in question
+ * @area: CPU VMA in question
* @vmf: fault info
*
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -2004,122 +1745,120 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* from the GTT and/or fence registers to make room. So performance may
* suffer if the GTT working set is large or there are few fence registers
* left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
*/
-int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
- struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+ struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_ggtt_view view = i915_ggtt_view_normal;
- pgoff_t page_offset;
- unsigned long pfn;
- int ret = 0;
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
-
- intel_runtime_pm_get(dev_priv);
+ struct i915_vma *vma;
+ pgoff_t page_offset;
+ unsigned int flags;
+ int ret;
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+ page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
PAGE_SHIFT;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto out;
-
trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Try to flush the object off the GPU first without holding the lock.
- * Upon reacquiring the lock, we will perform our sanity checks and then
+ * Upon acquiring the lock, we will perform our sanity checks and then
* repeat the flush holding the lock in the normal manner to catch cases
* where we are gazumped.
*/
- ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
+ ret = __unsafe_wait_rendering(obj, NULL, !write);
if (ret)
- goto unlock;
+ goto err;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err_rpm;
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
ret = -EFAULT;
- goto unlock;
+ goto err_unlock;
}
- /* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= ggtt->mappable_end &&
- obj->tiling_mode == I915_TILING_NONE) {
- static const unsigned int chunk_size = 256; // 1 MiB
+ /* If the object is smaller than a couple of partial vma, it is
+ * not worth only creating a single partial vma - we may as well
+ * clear enough space for the full object.
+ */
+ flags = PIN_MAPPABLE;
+ if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
+ flags |= PIN_NONBLOCK | PIN_NONFAULT;
+
+ /* Now pin it into the GTT as needed */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+ if (IS_ERR(vma)) {
+ struct i915_ggtt_view view;
+ unsigned int chunk_size;
+
+ /* Use a partial view if it is bigger than available space */
+ chunk_size = MIN_CHUNK_PAGES;
+ if (i915_gem_object_is_tiled(obj))
+ chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL;
view.params.partial.offset = rounddown(page_offset, chunk_size);
view.params.partial.size =
- min_t(unsigned int,
- chunk_size,
- (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+ min_t(unsigned int, chunk_size,
+ (area->vm_end - area->vm_start) / PAGE_SIZE -
view.params.partial.offset);
- }
- /* Now pin it into the GTT if needed */
- ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
- if (ret)
- goto unlock;
+ /* If the partial covers the entire object, just create a
+ * normal VMA.
+ */
+ if (chunk_size >= obj->base.size >> PAGE_SHIFT)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ /* Userspace is now writing through an untracked VMA, abandon
+ * all hope that the hardware is able to track future writes.
+ */
+ obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ }
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unlock;
+ }
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
- goto unpin;
+ goto err_unpin;
- ret = i915_gem_object_get_fence(obj);
+ ret = i915_vma_get_fence(vma);
if (ret)
- goto unpin;
+ goto err_unpin;
/* Finally, remap it using the new GTT offset */
- pfn = ggtt->mappable_base +
- i915_gem_obj_ggtt_offset_view(obj, &view);
- pfn >>= PAGE_SHIFT;
-
- if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
- /* Overriding existing pages in partial view does not cause
- * us any trouble as TLBs are still valid because the fault
- * is due to userspace losing part of the mapping or never
- * having accessed it before (at this partials' range).
- */
- unsigned long base = vma->vm_start +
- (view.params.partial.offset << PAGE_SHIFT);
- unsigned int i;
-
- for (i = 0; i < view.params.partial.size; i++) {
- ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
- if (ret)
- break;
- }
-
- obj->fault_mappable = true;
- } else {
- if (!obj->fault_mappable) {
- unsigned long size = min_t(unsigned long,
- vma->vm_end - vma->vm_start,
- obj->base.size);
- int i;
-
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- ret = vm_insert_pfn(vma,
- (unsigned long)vma->vm_start + i * PAGE_SIZE,
- pfn + i);
- if (ret)
- break;
- }
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+ (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->mappable);
+ if (ret)
+ goto err_unpin;
- obj->fault_mappable = true;
- } else
- ret = vm_insert_pfn(vma,
- (unsigned long)vmf->virtual_address,
- pfn + page_offset);
- }
-unpin:
- i915_gem_object_ggtt_unpin_view(obj, &view);
-unlock:
+ obj->fault_mappable = true;
+err_unpin:
+ __i915_vma_unpin(vma);
+err_unlock:
mutex_unlock(&dev->struct_mutex);
-out:
+err_rpm:
+ intel_runtime_pm_put(dev_priv);
+err:
switch (ret) {
case -EIO:
/*
@@ -2160,8 +1899,6 @@ out:
ret = VM_FAULT_SIGBUS;
break;
}
-
- intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -2215,46 +1952,58 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
i915_gem_release_mmap(obj);
}
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev_priv: i915 device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required global GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
+ u64 size, int tiling_mode)
{
- uint32_t gtt_size;
+ u64 ggtt_size;
+
+ GEM_BUG_ON(size == 0);
- if (INTEL_INFO(dev)->gen >= 4 ||
+ if (INTEL_GEN(dev_priv) >= 4 ||
tiling_mode == I915_TILING_NONE)
return size;
/* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN3(dev))
- gtt_size = 1024*1024;
+ if (IS_GEN3(dev_priv))
+ ggtt_size = 1024*1024;
else
- gtt_size = 512*1024;
+ ggtt_size = 512*1024;
- while (gtt_size < size)
- gtt_size <<= 1;
+ while (ggtt_size < size)
+ ggtt_size <<= 1;
- return gtt_size;
+ return ggtt_size;
}
/**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @dev: drm device
+ * i915_gem_get_ggtt_alignment - return required global GTT alignment
+ * @dev_priv: i915 device
* @size: object size
* @tiling_mode: tiling mode
- * @fenced: is fenced alignemned required or not
+ * @fenced: is fenced alignment required or not
*
- * Return the required GTT alignment for an object, taking into account
+ * Return the required global GTT alignment for an object, taking into account
* potential fence register mapping.
*/
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
- int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
+ int tiling_mode, bool fenced)
{
+ GEM_BUG_ON(size == 0);
+
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+ if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
@@ -2262,42 +2011,34 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- return i915_gem_get_gtt_size(dev, size, tiling_mode);
+ return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
}
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int ret;
-
- dev_priv->mm.shrinker_no_lock_stealing = true;
+ int err;
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret != -ENOSPC)
- goto out;
+ err = drm_gem_create_mmap_offset(&obj->base);
+ if (!err)
+ return 0;
- /* Badly fragmented mmap space? The only way we can recover
- * space is by destroying unwanted objects. We can't randomly release
- * mmap_offsets as userspace expects them to be persistent for the
- * lifetime of the objects. The closest we can is to release the
- * offsets on purgeable objects by truncating it and marking it purged,
- * which prevents userspace from ever using that object again.
+ /* We can idle the GPU locklessly to flush stale objects, but in order
+ * to claim that space for ourselves, we need to take the big
+ * struct_mutex to free the requests+objects and allocate our slot.
*/
- i915_gem_shrink(dev_priv,
- obj->base.size >> PAGE_SHIFT,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
- ret = drm_gem_create_mmap_offset(&obj->base);
- if (ret != -ENOSPC)
- goto out;
+ err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ if (err)
+ return err;
- i915_gem_shrink_all(dev_priv);
- ret = drm_gem_create_mmap_offset(&obj->base);
-out:
- dev_priv->mm.shrinker_no_lock_stealing = false;
+ err = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (!err) {
+ i915_gem_retire_requests(dev_priv);
+ err = drm_gem_create_mmap_offset(&obj->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
- return ret;
+ return err;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2314,32 +2055,15 @@ i915_gem_mmap_gtt(struct drm_file *file,
struct drm_i915_gem_object *obj;
int ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(file, handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
-
- if (obj->madv != I915_MADV_WILLNEED) {
- DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
- ret = -EFAULT;
- goto out;
- }
+ obj = i915_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
ret = i915_gem_object_create_mmap_offset(obj);
- if (ret)
- goto out;
-
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ if (ret == 0)
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
-out:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_put_unlocked(obj);
return ret;
}
@@ -2457,7 +2181,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count)
return -EBUSY;
- BUG_ON(i915_gem_obj_bound_any(obj));
+ GEM_BUG_ON(obj->bind_count);
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
@@ -2465,10 +2189,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
list_del(&obj->global_list);
if (obj->mapping) {
- if (is_vmalloc_addr(obj->mapping))
- vunmap(obj->mapping);
+ void *ptr;
+
+ ptr = ptr_mask_bits(obj->mapping);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
else
- kunmap(kmap_to_page(obj->mapping));
+ kunmap(kmap_to_page(ptr));
+
obj->mapping = NULL;
}
@@ -2577,7 +2305,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
- if (obj->tiling_mode != I915_TILING_NONE &&
+ if (i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
i915_gem_object_pin_pages(obj);
@@ -2641,7 +2369,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->pages;
@@ -2650,10 +2379,11 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
struct page *stack_pages[32];
struct page **pages = stack_pages;
unsigned long i = 0;
+ pgprot_t pgprot;
void *addr;
/* A single page can always be kmapped */
- if (n_pages == 1)
+ if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) {
@@ -2669,7 +2399,15 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
- addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+ switch (type) {
+ case I915_MAP_WB:
+ pgprot = PAGE_KERNEL;
+ break;
+ case I915_MAP_WC:
+ pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+ break;
+ }
+ addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
drm_free_large(pages);
@@ -2678,276 +2416,89 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
}
/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
{
+ enum i915_map_type has_type;
+ bool pinned;
+ void *ptr;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
i915_gem_object_pin_pages(obj);
+ pinned = obj->pages_pin_count > 1;
- if (!obj->mapping) {
- obj->mapping = i915_gem_object_map(obj);
- if (!obj->mapping) {
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(-ENOMEM);
+ ptr = ptr_unpack_bits(obj->mapping, has_type);
+ if (ptr && has_type != type) {
+ if (pinned) {
+ ret = -EBUSY;
+ goto err;
}
- }
- return obj->mapping;
-}
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *engine;
+ ptr = obj->mapping = NULL;
+ }
- engine = i915_gem_request_get_engine(req);
+ if (!ptr) {
+ ptr = i915_gem_object_map(obj, type);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err;
+ }
- /* Add a reference if we're newly entering the active list. */
- if (obj->active == 0)
- drm_gem_object_reference(&obj->base);
- obj->active |= intel_engine_flag(engine);
+ obj->mapping = ptr_pack_bits(ptr, type);
+ }
- list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
- i915_gem_request_assign(&obj->last_read_req[engine->id], req);
+ return ptr;
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
+err:
+ i915_gem_object_unpin_pages(obj);
+ return ERR_PTR(ret);
}
static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__write(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
{
- GEM_BUG_ON(obj->last_write_req == NULL);
- GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
+ struct drm_i915_gem_object *obj =
+ container_of(active, struct drm_i915_gem_object, last_write);
- i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
+i915_gem_object_retire__read(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
{
- struct i915_vma *vma;
-
- GEM_BUG_ON(obj->last_read_req[ring] == NULL);
- GEM_BUG_ON(!(obj->active & (1 << ring)));
+ int idx = request->engine->id;
+ struct drm_i915_gem_object *obj =
+ container_of(active, struct drm_i915_gem_object, last_read[idx]);
- list_del_init(&obj->engine_list[ring]);
- i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+ GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
- if (obj->last_write_req && obj->last_write_req->engine->id == ring)
- i915_gem_object_retire__write(obj);
-
- obj->active &= ~(1 << ring);
- if (obj->active)
+ i915_gem_object_clear_active(obj, idx);
+ if (i915_gem_object_is_active(obj))
return;
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
- list_move_tail(&obj->global_list,
- &to_i915(obj->base.dev)->mm.bound_list);
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!list_empty(&vma->vm_link))
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
- }
-
- i915_gem_request_assign(&obj->last_fenced_req, NULL);
- drm_gem_object_unreference(&obj->base);
-}
-
-static int
-i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
-{
- struct intel_engine_cs *engine;
- int ret;
-
- /* Carefully retire all requests without writing to the rings */
- for_each_engine(engine, dev_priv) {
- ret = intel_engine_idle(engine);
- if (ret)
- return ret;
- }
- i915_gem_retire_requests(dev_priv);
-
- /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
- while (intel_kick_waiters(dev_priv) ||
- intel_kick_signalers(dev_priv))
- yield();
- }
-
- /* Finally reset hw state */
- for_each_engine(engine, dev_priv)
- intel_ring_init_seqno(engine, seqno);
-
- return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- if (seqno == 0)
- return -EINVAL;
-
- /* HWS page needs to be set less than what we
- * will inject to ring
- */
- ret = i915_gem_init_seqno(dev_priv, seqno - 1);
- if (ret)
- return ret;
-
- /* Carefully set the last_seqno value so that wrap
- * detection still works
- */
- dev_priv->next_seqno = seqno;
- dev_priv->last_seqno = seqno - 1;
- if (dev_priv->last_seqno == 0)
- dev_priv->last_seqno--;
+ if (obj->bind_count)
+ list_move_tail(&obj->global_list,
+ &request->i915->mm.bound_list);
- return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
-{
- /* reserve 0 for non-seqno */
- if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev_priv, 0);
- if (ret)
- return ret;
-
- dev_priv->next_seqno = 1;
- }
-
- *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
- return 0;
-}
-
-static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->gt.active_engines |= intel_engine_flag(engine);
- if (dev_priv->gt.awake)
- return;
-
- intel_runtime_pm_get_noresume(dev_priv);
- dev_priv->gt.awake = true;
-
- i915_update_gfx_val(dev_priv);
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_rps_busy(dev_priv);
-
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
- struct drm_i915_gem_object *obj,
- bool flush_caches)
-{
- struct intel_engine_cs *engine;
- struct intel_ringbuffer *ringbuf;
- u32 request_start;
- u32 reserved_tail;
- int ret;
-
- if (WARN_ON(request == NULL))
- return;
-
- engine = request->engine;
- ringbuf = request->ringbuf;
-
- /*
- * To ensure that this call will not fail, space for its emissions
- * should already have been reserved in the ring buffer. Let the ring
- * know that it is time to use that space up.
- */
- request_start = intel_ring_get_tail(ringbuf);
- reserved_tail = request->reserved_space;
- request->reserved_space = 0;
-
- /*
- * Emit any outstanding flushes - execbuf can fail to emit the flush
- * after having emitted the batchbuffer command. Hence we need to fix
- * things up similar to emitting the lazy request. The difference here
- * is that the flush _must_ happen before the next request, no matter
- * what.
- */
- if (flush_caches) {
- if (i915.enable_execlists)
- ret = logical_ring_flush_all_caches(request);
- else
- ret = intel_ring_flush_all_caches(request);
- /* Not allowed to fail! */
- WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
- }
-
- trace_i915_gem_request_add(request);
-
- request->head = request_start;
-
- /* Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- request->batch_obj = obj;
-
- /* Seal the request and mark it as pending execution. Note that
- * we may inspect this state, without holding any locks, during
- * hangcheck. Hence we apply the barrier to ensure that we do not
- * see a more recent value in the hws than we are tracking.
- */
- request->emitted_jiffies = jiffies;
- request->previous_seqno = engine->last_submitted_seqno;
- smp_store_mb(engine->last_submitted_seqno, request->seqno);
- list_add_tail(&request->list, &engine->request_list);
-
- /* Record the position of the start of the request so that
- * should we detect the updated seqno part-way through the
- * GPU processing the request, we never over-estimate the
- * position of the head.
- */
- request->postfix = intel_ring_get_tail(ringbuf);
-
- if (i915.enable_execlists)
- ret = engine->emit_request(request);
- else {
- ret = engine->add_request(request);
-
- request->tail = intel_ring_get_tail(ringbuf);
- }
- /* Not allowed to fail! */
- WARN(ret, "emit|add_request failed: %d!\n", ret);
- /* Sanity check that the reserved size was large enough. */
- ret = intel_ring_get_tail(ringbuf) - request_start;
- if (ret < 0)
- ret += ringbuf->size;
- WARN_ONCE(ret > reserved_tail,
- "Not enough space reserved (%d bytes) "
- "for adding the request (%d bytes)\n",
- reserved_tail, ret);
-
- i915_gem_mark_busy(engine);
+ i915_gem_object_put(obj);
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
@@ -2981,101 +2532,6 @@ static void i915_set_reset_status(struct i915_gem_context *ctx,
}
}
-void i915_gem_request_free(struct kref *req_ref)
-{
- struct drm_i915_gem_request *req = container_of(req_ref,
- typeof(*req), ref);
- kmem_cache_free(req->i915->requests, req);
-}
-
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct drm_i915_gem_request **req_out)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
- struct drm_i915_gem_request *req;
- int ret;
-
- if (!req_out)
- return -EINVAL;
-
- *req_out = NULL;
-
- /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
- * and restart.
- */
- ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
- if (req == NULL)
- return -ENOMEM;
-
- ret = i915_gem_get_seqno(engine->i915, &req->seqno);
- if (ret)
- goto err;
-
- kref_init(&req->ref);
- req->i915 = dev_priv;
- req->engine = engine;
- req->ctx = ctx;
- i915_gem_context_reference(req->ctx);
-
- /*
- * Reserve space in the ring buffer for all the commands required to
- * eventually emit this request. This is to guarantee that the
- * i915_add_request() call can't fail. Note that the reserve may need
- * to be redone if the request is not actually submitted straight
- * away, e.g. because a GPU scheduler has deferred it.
- */
- req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret)
- goto err_ctx;
-
- *req_out = req;
- return 0;
-
-err_ctx:
- i915_gem_context_unreference(ctx);
-err:
- kmem_cache_free(dev_priv->requests, req);
- return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- * This can be NULL if the request is not directly related to
- * any specific user context, in which case this function will
- * choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
-{
- struct drm_i915_gem_request *req;
- int err;
-
- if (ctx == NULL)
- ctx = engine->i915->kernel_context;
- err = __i915_gem_request_alloc(engine, ctx, &req);
- return err ? ERR_PTR(err) : req;
-}
-
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
@@ -3089,185 +2545,143 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
- list_for_each_entry(request, &engine->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, link) {
if (i915_gem_request_completed(request))
continue;
+ if (!i915_sw_fence_done(&request->submit))
+ break;
+
return request;
}
return NULL;
}
-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void reset_request(struct drm_i915_gem_request *request)
+{
+ void *vaddr = request->ring->vaddr;
+ u32 head;
+
+ /* As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ head = request->head;
+ if (request->postfix < head) {
+ memset(vaddr + head, 0, request->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, 0, request->postfix - head);
+}
+
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
+ struct i915_gem_context *incomplete_ctx;
bool ring_hung;
+ /* Ensure irq handler finishes, and not run again. */
+ tasklet_kill(&engine->irq_tasklet);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
request = i915_gem_find_active_request(engine);
- if (request == NULL)
+ if (!request)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
-
i915_set_reset_status(request->ctx, ring_hung);
- list_for_each_entry_continue(request, &engine->request_list, list)
- i915_set_reset_status(request->ctx, false);
-}
-
-static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
-{
- struct intel_ringbuffer *buffer;
-
- while (!list_empty(&engine->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&engine->active_list,
- struct drm_i915_gem_object,
- engine_list[engine->id]);
-
- i915_gem_object_retire__read(obj, engine->id);
- }
-
- /*
- * Clear the execlists queue up before freeing the requests, as those
- * are the ones that keep the context and ringbuffer backing objects
- * pinned in place.
- */
-
- if (i915.enable_execlists) {
- /* Ensure irq handler finishes or is cancelled. */
- tasklet_kill(&engine->irq_tasklet);
-
- intel_execlists_cancel_requests(engine);
- }
-
- /*
- * We must free the requests after all the corresponding objects have
- * been moved off active lists. Which is the same order as the normal
- * retire_requests function does. This is important if object hold
- * implicit references on things like e.g. ppgtt address spaces through
- * the request.
- */
- while (!list_empty(&engine->request_list)) {
- struct drm_i915_gem_request *request;
+ if (!ring_hung)
+ return;
- request = list_first_entry(&engine->request_list,
- struct drm_i915_gem_request,
- list);
+ DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+ engine->name, request->fence.seqno);
- i915_gem_request_retire(request);
- }
+ /* Setup the CS to resume from the breadcrumb of the hung request */
+ engine->reset_hw(engine, request);
- /* Having flushed all requests from all queues, we know that all
- * ringbuffers must now be empty. However, since we do not reclaim
- * all space when retiring the request (to prevent HEADs colliding
- * with rapid ringbuffer wraparound) the amount of available space
- * upon reset is less than when we start. Do one more pass over
- * all the ringbuffers to reset last_retired_head.
+ /* Users of the default context do not rely on logical state
+ * preserved between batches. They have to emit full state on
+ * every batch and so it is safe to execute queued requests following
+ * the hang.
+ *
+ * Other contexts preserve state, now corrupt. We want to skip all
+ * queued requests that reference the corrupt context.
*/
- list_for_each_entry(buffer, &engine->buffers, link) {
- buffer->last_retired_head = buffer->tail;
- intel_ring_update_space(buffer);
- }
-
- intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+ incomplete_ctx = request->ctx;
+ if (i915_gem_context_is_default(incomplete_ctx))
+ return;
- engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
+ list_for_each_entry_continue(request, &engine->request_list, link)
+ if (request->ctx == incomplete_ctx)
+ reset_request(request);
}
-void i915_gem_reset(struct drm_device *dev)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- /*
- * Before we free the objects from the requests, we need to inspect
- * them for finding the guilty party. As the requests only borrow
- * their reference to the objects, the inspection must be done first.
- */
- for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_status(engine);
+ i915_gem_retire_requests(dev_priv);
for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_cleanup(engine);
- mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
+ i915_gem_reset_engine(engine);
- i915_gem_context_reset(dev);
+ i915_gem_restore_fences(&dev_priv->drm);
- i915_gem_restore_fences(dev);
+ if (dev_priv->gt.awake) {
+ intel_sanitize_gt_powersave(dev_priv);
+ intel_enable_gt_powersave(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_busy(dev_priv);
+ }
+}
- WARN_ON(i915_verify_lists(dev));
+static void nop_submit_request(struct drm_i915_gem_request *request)
+{
}
-/**
- * This function clears the request list as sequence numbers are passed.
- * @engine: engine to retire requests on
- */
-void
-i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
- WARN_ON(i915_verify_lists(engine->dev));
+ engine->submit_request = nop_submit_request;
- /* Retire requests first as we use it above for the early return.
- * If we retire requests last, we may use a later seqno and so clear
- * the requests lists without clearing the active list, leading to
- * confusion.
+ /* Mark all pending requests as complete so that any concurrent
+ * (lockless) lookup doesn't try and wait upon the request as we
+ * reset it.
*/
- while (!list_empty(&engine->request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&engine->request_list,
- struct drm_i915_gem_request,
- list);
+ intel_engine_init_seqno(engine, engine->last_submitted_seqno);
- if (!i915_gem_request_completed(request))
- break;
-
- i915_gem_request_retire(request);
- }
-
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate,
- * before we free the context associated with the requests.
+ /*
+ * Clear the execlists queue up before freeing the requests, as those
+ * are the ones that keep the context and ringbuffer backing objects
+ * pinned in place.
*/
- while (!list_empty(&engine->active_list)) {
- struct drm_i915_gem_object *obj;
-
- obj = list_first_entry(&engine->active_list,
- struct drm_i915_gem_object,
- engine_list[engine->id]);
-
- if (!list_empty(&obj->last_read_req[engine->id]->list))
- break;
- i915_gem_object_retire__read(obj, engine->id);
+ if (i915.enable_execlists) {
+ spin_lock(&engine->execlist_lock);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ i915_gem_request_put(engine->execlist_port[0].request);
+ i915_gem_request_put(engine->execlist_port[1].request);
+ memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+ spin_unlock(&engine->execlist_lock);
}
- WARN_ON(i915_verify_lists(engine->dev));
+ engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
}
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
- if (dev_priv->gt.active_engines == 0)
- return;
-
- GEM_BUG_ON(!dev_priv->gt.awake);
-
- for_each_engine(engine, dev_priv) {
- i915_gem_retire_requests_ring(engine);
- if (list_empty(&engine->request_list))
- dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
- }
+ i915_gem_context_lost(dev_priv);
+ for_each_engine(engine, dev_priv)
+ i915_gem_cleanup_engine(engine);
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
- if (dev_priv->gt.active_engines == 0)
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
- msecs_to_jiffies(100));
+ i915_gem_retire_requests(dev_priv);
}
static void
@@ -3287,10 +2701,12 @@ i915_gem_retire_work_handler(struct work_struct *work)
* We do not need to do this test under locking as in the worst-case
* we queue the retire worker once too often.
*/
- if (READ_ONCE(dev_priv->gt.awake))
+ if (READ_ONCE(dev_priv->gt.awake)) {
+ i915_queue_hangcheck(dev_priv);
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
+ }
}
static void
@@ -3300,7 +2716,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
- unsigned int stuck_engines;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@@ -3330,12 +2745,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
dev_priv->gt.awake = false;
rearm_hangcheck = false;
- stuck_engines = intel_kick_waiters(dev_priv);
- if (unlikely(stuck_engines)) {
- DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
- dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
- }
-
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
intel_runtime_pm_put(dev_priv);
@@ -3349,32 +2758,17 @@ out_rearm:
}
}
-/**
- * Ensures that an object will eventually get non-busy by flushing any required
- * write domains, emitting any outstanding lazy request and retiring and
- * completed requests.
- * @obj: object to flush
- */
-static int
-i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
- int i;
-
- if (!obj->active)
- return 0;
-
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct drm_i915_gem_request *req;
-
- req = obj->last_read_req[i];
- if (req == NULL)
- continue;
-
- if (i915_gem_request_completed(req))
- i915_gem_object_retire__read(obj, i);
- }
+ struct drm_i915_gem_object *obj = to_intel_bo(gem);
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_vma *vma, *vn;
- return 0;
+ mutex_lock(&obj->base.dev->struct_mutex);
+ list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
+ if (vma->vm->file == fpriv)
+ i915_vma_close(vma);
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
/**
@@ -3405,219 +2799,35 @@ int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_gem_wait *args = data;
+ struct intel_rps_client *rps = to_rps_client(file);
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *req[I915_NUM_ENGINES];
- int i, n = 0;
- int ret;
+ unsigned long active;
+ int idx, ret = 0;
if (args->flags != 0)
return -EINVAL;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
- if (&obj->base == NULL) {
- mutex_unlock(&dev->struct_mutex);
+ obj = i915_gem_object_lookup(file, args->bo_handle);
+ if (!obj)
return -ENOENT;
- }
-
- /* Need to make sure the object gets inactive eventually. */
- ret = i915_gem_object_flush_active(obj);
- if (ret)
- goto out;
-
- if (!obj->active)
- goto out;
-
- /* Do this after OLR check to make sure we make forward progress polling
- * on this IOCTL with a timeout == 0 (like busy ioctl)
- */
- if (args->timeout_ns == 0) {
- ret = -ETIME;
- goto out;
- }
-
- drm_gem_object_unreference(&obj->base);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (obj->last_read_req[i] == NULL)
- continue;
-
- req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- for (i = 0; i < n; i++) {
- if (ret == 0)
- ret = __i915_wait_request(req[i], true,
- args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- to_rps_client(file));
- i915_gem_request_unreference(req[i]);
- }
- return ret;
-
-out:
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-static int
-__i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to,
- struct drm_i915_gem_request *from_req,
- struct drm_i915_gem_request **to_req)
-{
- struct intel_engine_cs *from;
- int ret;
-
- from = i915_gem_request_get_engine(from_req);
- if (to == from)
- return 0;
-
- if (i915_gem_request_completed(from_req))
- return 0;
-
- if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- ret = __i915_wait_request(from_req,
- i915->mm.interruptible,
- NULL,
- &i915->rps.semaphores);
- if (ret)
- return ret;
-
- i915_gem_object_retire_request(obj, from_req);
- } else {
- int idx = intel_ring_sync_index(from, to);
- u32 seqno = i915_gem_request_get_seqno(from_req);
-
- WARN_ON(!to_req);
-
- if (seqno <= from->semaphore.sync_seqno[idx])
- return 0;
-
- if (*to_req == NULL) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(to, NULL);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- *to_req = req;
- }
-
- trace_i915_gem_ring_sync_to(*to_req, from, from_req);
- ret = to->semaphore.sync_to(*to_req, from, seqno);
- if (ret)
- return ret;
-
- /* We use last_read_req because sync_to()
- * might have just caused seqno wrap under
- * the radar.
- */
- from->semaphore.sync_seqno[idx] =
- i915_gem_request_get_seqno(obj->last_read_req[from->id]);
- }
-
- return 0;
-}
-
-/**
- * i915_gem_object_sync - sync an object to a ring.
- *
- * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- * This will be allocated and returned if a request is
- * required but not passed in.
- *
- * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
- *
- * - If there is an outstanding write request to the object, the new
- * request must wait for it to complete (either CPU or in hw, requests
- * on the same ring will be naturally ordered).
- *
- * - If we are a write request (pending_write_domain is set), the new
- * request must wait for outstanding read requests to complete.
- *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
- *
- * Returns 0 if successful, else propagates up the lower layer error.
- */
-int
-i915_gem_object_sync(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *to,
- struct drm_i915_gem_request **to_req)
-{
- const bool readonly = obj->base.pending_write_domain == 0;
- struct drm_i915_gem_request *req[I915_NUM_ENGINES];
- int ret, i, n;
-
- if (!obj->active)
- return 0;
-
- if (to == NULL)
- return i915_gem_object_wait_rendering(obj, readonly);
-
- n = 0;
- if (readonly) {
- if (obj->last_write_req)
- req[n++] = obj->last_write_req;
- } else {
- for (i = 0; i < I915_NUM_ENGINES; i++)
- if (obj->last_read_req[i])
- req[n++] = obj->last_read_req[i];
- }
- for (i = 0; i < n; i++) {
- ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+ active = __I915_BO_ACTIVE(obj);
+ for_each_active(active, idx) {
+ s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
+ ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
+ I915_WAIT_INTERRUPTIBLE,
+ timeout, rps);
if (ret)
- return ret;
+ break;
}
- return 0;
-}
-
-static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
-{
- u32 old_write_domain, old_read_domains;
-
- /* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
- return;
-
- old_read_domains = obj->base.read_domains;
- old_write_domain = obj->base.write_domain;
-
- obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
- obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
+ i915_gem_object_put_unlocked(obj);
+ return ret;
}
static void __i915_vma_iounmap(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->pin_count);
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
if (vma->iomap == NULL)
return;
@@ -3626,65 +2836,83 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL;
}
-static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
+int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ unsigned long active;
int ret;
- if (list_empty(&vma->obj_link))
- return 0;
-
- if (!drm_mm_node_allocated(&vma->node)) {
- i915_gem_vma_destroy(vma);
- return 0;
- }
-
- if (vma->pin_count)
- return -EBUSY;
+ /* First wait upon any activity as retiring the request may
+ * have side-effects such as unpinning or even unbinding this vma.
+ */
+ active = i915_vma_get_active(vma);
+ if (active) {
+ int idx;
+
+ /* When a closed VMA is retired, it is unbound - eek.
+ * In order to prevent it from being recursively closed,
+ * take a pin on the vma so that the second unbind is
+ * aborted.
+ */
+ __i915_vma_pin(vma);
- BUG_ON(obj->pages == NULL);
+ for_each_active(active, idx) {
+ ret = i915_gem_active_retire(&vma->last_read[idx],
+ &vma->vm->dev->struct_mutex);
+ if (ret)
+ break;
+ }
- if (wait) {
- ret = i915_gem_object_wait_rendering(obj, false);
+ __i915_vma_unpin(vma);
if (ret)
return ret;
+
+ GEM_BUG_ON(i915_vma_is_active(vma));
}
- if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
- i915_gem_object_finish_gtt(obj);
+ if (i915_vma_is_pinned(vma))
+ return -EBUSY;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
+ GEM_BUG_ON(obj->bind_count == 0);
+ GEM_BUG_ON(!obj->pages);
+ if (i915_vma_is_map_and_fenceable(vma)) {
/* release the fence reg _after_ flushing */
- ret = i915_gem_object_put_fence(obj);
+ ret = i915_vma_put_fence(vma);
if (ret)
return ret;
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
__i915_vma_iounmap(vma);
+ vma->flags &= ~I915_VMA_CAN_FENCE;
}
- trace_i915_vma_unbind(vma);
-
- vma->vm->unbind_vma(vma);
- vma->bound = 0;
-
- list_del_init(&vma->vm_link);
- if (vma->is_ggtt) {
- if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
- obj->map_and_fenceable = false;
- } else if (vma->ggtt_view.pages) {
- sg_free_table(vma->ggtt_view.pages);
- kfree(vma->ggtt_view.pages);
- }
- vma->ggtt_view.pages = NULL;
+ if (likely(!vma->vm->closed)) {
+ trace_i915_vma_unbind(vma);
+ vma->vm->unbind_vma(vma);
}
+ vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node);
- i915_gem_vma_destroy(vma);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ if (vma->pages != obj->pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list))
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_list,
+ &to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3692,36 +2920,28 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
*/
i915_gem_object_unpin_pages(obj);
- return 0;
-}
+destroy:
+ if (unlikely(i915_vma_is_closed(vma)))
+ i915_vma_destroy(vma);
-int i915_vma_unbind(struct i915_vma *vma)
-{
- return __i915_vma_unbind(vma, true);
-}
-
-int __i915_vma_unbind_no_wait(struct i915_vma *vma)
-{
- return __i915_vma_unbind(vma, false);
+ return 0;
}
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+ unsigned int flags)
{
struct intel_engine_cs *engine;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
for_each_engine(engine, dev_priv) {
if (engine->last_context == NULL)
continue;
- ret = intel_engine_idle(engine);
+ ret = intel_engine_idle(engine, flags);
if (ret)
return ret;
}
- WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -3759,128 +2979,87 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
}
/**
- * Finds free space in the GTT aperture and binds the object or a view of it
- * there.
- * @obj: object to bind
- * @vm: address space to bind into
- * @ggtt_view: global gtt view if applicable
- * @alignment: requested alignment
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
+ * @size: requested size in bytes (can be larger than the VMA)
+ * @alignment: required alignment
* @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
-static struct i915_vma *
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *ggtt_view,
- unsigned alignment,
- uint64_t flags)
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 fence_alignment, unfenced_alignment;
- u32 search_flag, alloc_flag;
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_gem_object *obj = vma->obj;
u64 start, end;
- u64 size, fence_size;
- struct i915_vma *vma;
int ret;
- if (i915_is_ggtt(vm)) {
- u32 view_size;
-
- if (WARN_ON(!ggtt_view))
- return ERR_PTR(-EINVAL);
-
- view_size = i915_ggtt_view_size(obj, ggtt_view);
-
- fence_size = i915_gem_get_gtt_size(dev,
- view_size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- view_size,
- obj->tiling_mode,
- true);
- unfenced_alignment = i915_gem_get_gtt_alignment(dev,
- view_size,
- obj->tiling_mode,
- false);
- size = flags & PIN_MAPPABLE ? fence_size : view_size;
- } else {
- fence_size = i915_gem_get_gtt_size(dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode,
- true);
- unfenced_alignment =
- i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode,
- false);
- size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
- }
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ size = max(size, vma->size);
+ if (flags & PIN_MAPPABLE)
+ size = i915_gem_get_ggtt_size(dev_priv, size,
+ i915_gem_object_get_tiling(obj));
+
+ alignment = max(max(alignment, vma->display_alignment),
+ i915_gem_get_ggtt_alignment(dev_priv, size,
+ i915_gem_object_get_tiling(obj),
+ flags & PIN_MAPPABLE));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
- end = vm->total;
+
+ end = vma->vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, ggtt->mappable_end);
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
- if (alignment == 0)
- alignment = flags & PIN_MAPPABLE ? fence_alignment :
- unfenced_alignment;
- if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
- DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
- ggtt_view ? ggtt_view->type : 0,
- alignment);
- return ERR_PTR(-EINVAL);
- }
-
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
- ggtt_view ? ggtt_view->type : 0,
- size,
+ DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
+ size, obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
}
ret = i915_gem_object_get_pages(obj);
if (ret)
- return ERR_PTR(ret);
+ return ret;
i915_gem_object_pin_pages(obj);
- vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
- i915_gem_obj_lookup_or_create_vma(obj, vm);
-
- if (IS_ERR(vma))
- goto err_unpin;
-
if (flags & PIN_OFFSET_FIXED) {
- uint64_t offset = flags & PIN_OFFSET_MASK;
-
- if (offset & (alignment - 1) || offset + size > end) {
+ u64 offset = flags & PIN_OFFSET_MASK;
+ if (offset & (alignment - 1) || offset > end - size) {
ret = -EINVAL;
- goto err_free_vma;
+ goto err_unpin;
}
+
vma->node.start = offset;
vma->node.size = size;
vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret) {
ret = i915_gem_evict_for_vma(vma);
if (ret == 0)
- ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+ ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+ if (ret)
+ goto err_unpin;
}
- if (ret)
- goto err_free_vma;
} else {
+ u32 search_flag, alloc_flag;
+
if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP;
@@ -3889,47 +3068,45 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
alloc_flag = DRM_MM_CREATE_DEFAULT;
}
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ if (alignment <= 4096)
+ alignment = 0;
+
search_free:
- ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+ ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+ &vma->node,
size, alignment,
obj->cache_level,
start, end,
search_flag,
alloc_flag);
if (ret) {
- ret = i915_gem_evict_something(dev, vm, size, alignment,
+ ret = i915_gem_evict_something(vma->vm, size, alignment,
obj->cache_level,
start, end,
flags);
if (ret == 0)
goto search_free;
- goto err_free_vma;
+ goto err_unpin;
}
}
- if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
- ret = -EINVAL;
- goto err_remove_node;
- }
-
- trace_i915_vma_bind(vma, flags);
- ret = i915_vma_bind(vma, obj->cache_level, flags);
- if (ret)
- goto err_remove_node;
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&vma->vm_link, &vm->inactive_list);
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ obj->bind_count++;
- return vma;
+ return 0;
-err_remove_node:
- drm_mm_remove_node(&vma->node);
-err_free_vma:
- i915_gem_vma_destroy(vma);
- vma = ERR_PTR(ret);
err_unpin:
i915_gem_object_unpin_pages(obj);
- return vma;
+ return ret;
}
bool
@@ -3974,51 +3151,72 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
- uint32_t old_write_domain;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
- * to it immediately go to main memory as far as we know, so there's
+ * to it "immediately" go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour).
*/
wmb();
+ if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
+ POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
- old_write_domain = obj->base.write_domain;
- obj->base.write_domain = 0;
-
- intel_fb_obj_flush(obj, false, ORIGIN_GTT);
+ intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
- old_write_domain);
+ I915_GEM_DOMAIN_GTT);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
- uint32_t old_write_domain;
-
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
i915_gem_chipset_flush(to_i915(obj->base.dev));
- old_write_domain = obj->base.write_domain;
- obj->base.write_domain = 0;
-
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
- old_write_domain);
+ I915_GEM_DOMAIN_CPU);
+}
+
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ continue;
+
+ if (i915_vma_is_active(vma))
+ continue;
+
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ }
}
/**
@@ -4032,20 +3230,16 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t old_write_domain, old_read_domains;
- struct i915_vma *vma;
int ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
- return 0;
-
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
@@ -4086,10 +3280,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- vma = i915_gem_obj_to_ggtt(obj);
- if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
- list_move_tail(&vma->vm_link,
- &ggtt->base.inactive_list);
+ i915_gem_object_bump_inactive_ggtt(obj);
return 0;
}
@@ -4112,9 +3303,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct drm_device *dev = obj->base.dev;
- struct i915_vma *vma, *next;
- bool bound = false;
+ struct i915_vma *vma;
int ret = 0;
if (obj->cache_level == cache_level)
@@ -4125,21 +3314,28 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
- list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
- if (vma->pin_count) {
+ if (i915_vma_is_pinned(vma)) {
DRM_DEBUG("can not change the cache level of pinned objects\n");
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(vma, cache_level)) {
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
- } else
- bound = true;
+ if (i915_gem_valid_gtt_space(vma, cache_level))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ /* As unbinding may affect other elements in the
+ * obj->vma_list (due to side-effects from retiring
+ * an active vma), play safe and restart the iterator.
+ */
+ goto restart;
}
/* We can reuse the existing drm_mm nodes but need to change the
@@ -4149,7 +3345,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* rewrite the PTE in the belief that doing so tramples upon less
* state and so involves less work.
*/
- if (bound) {
+ if (obj->bind_count) {
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
@@ -4158,7 +3354,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+ if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
@@ -4175,9 +3371,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+ }
} else {
/* We either have incoherent backing store and
* so no GTT access or the architecture is fully
@@ -4221,8 +3419,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL)
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
return -ENOENT;
switch (obj->cache_level) {
@@ -4240,7 +3438,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
break;
}
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
return 0;
}
@@ -4282,15 +3480,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto rpm_put;
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj) {
ret = -ENOENT;
goto unlock;
}
ret = i915_gem_object_set_cache_level(obj, level);
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
rpm_put:
@@ -4304,11 +3502,12 @@ rpm_put:
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
*/
-int
+struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
const struct i915_ggtt_view *view)
{
+ struct i915_vma *vma;
u32 old_read_domains, old_write_domain;
int ret;
@@ -4328,19 +3527,43 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
*/
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
- if (ret)
+ if (ret) {
+ vma = ERR_PTR(ret);
goto err_unpin_display;
+ }
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
- * always use map_and_fenceable for all scanout buffers.
+ * always use map_and_fenceable for all scanout buffers. However,
+ * it may simply be too big to fit into mappable, in which case
+ * put it anyway and hope that userspace can cope (but always first
+ * try to preserve the existing ABI).
*/
- ret = i915_gem_object_ggtt_pin(obj, view, alignment,
- view->type == I915_GGTT_VIEW_NORMAL ?
- PIN_MAPPABLE : 0);
- if (ret)
+ vma = ERR_PTR(-ENOSPC);
+ if (view->type == I915_GGTT_VIEW_NORMAL)
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+ PIN_MAPPABLE | PIN_NONBLOCK);
+ if (IS_ERR(vma)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int flags;
+
+ /* Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ flags = 0;
+ if (HAS_GMCH_DISPLAY(i915))
+ flags = PIN_MAPPABLE;
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ }
+ if (IS_ERR(vma))
goto err_unpin_display;
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
i915_gem_object_flush_cpu_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -4356,23 +3579,27 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
old_read_domains,
old_write_domain);
- return 0;
+ return vma;
err_unpin_display:
obj->pin_display--;
- return ret;
+ return vma;
}
void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- if (WARN_ON(obj->pin_display == 0))
+ if (WARN_ON(vma->obj->pin_display == 0))
return;
- i915_gem_object_ggtt_unpin_view(obj, view);
+ if (--vma->obj->pin_display == 0)
+ vma->display_alignment = 0;
- obj->pin_display--;
+ /* Bump the LRU to try and avoid premature eviction whilst flipping */
+ if (!i915_vma_is_active(vma))
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+ i915_vma_unpin(vma);
}
/**
@@ -4389,13 +3616,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
- return 0;
-
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -4470,28 +3697,31 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
if (target)
- i915_gem_request_reference(target);
+ i915_gem_request_get(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, true, NULL, NULL);
- i915_gem_request_unreference(target);
+ ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
+ i915_gem_request_put(target);
return ret;
}
static bool
-i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_gem_object *obj = vma->obj;
+ if (!drm_mm_node_allocated(&vma->node))
+ return false;
- if (alignment &&
- vma->node.start & (alignment - 1))
+ if (vma->node.size < size)
return true;
- if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
+ if (alignment && vma->node.start & (alignment - 1))
+ return true;
+
+ if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
@@ -4508,135 +3738,213 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool mappable, fenceable;
u32 fence_size, fence_alignment;
- fence_size = i915_gem_get_gtt_size(obj->base.dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
- obj->base.size,
- obj->tiling_mode,
- true);
+ fence_size = i915_gem_get_ggtt_size(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj));
+ fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
+ vma->size,
+ i915_gem_object_get_tiling(obj),
+ true);
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->ggtt.mappable_end);
+ dev_priv->ggtt.mappable_end);
- obj->map_and_fenceable = mappable && fenceable;
+ /*
+ * Explicitly disable for rotated VMA since the display does not
+ * need the fence and the VMA is not accessible to other users.
+ */
+ if (mappable && fenceable &&
+ vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ vma->flags |= I915_VMA_CAN_FENCE;
+ else
+ vma->flags &= ~I915_VMA_CAN_FENCE;
}
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *ggtt_view,
- uint32_t alignment,
- uint64_t flags)
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_vma *vma;
- unsigned bound;
+ unsigned int bound = vma->flags;
int ret;
- if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
- return -ENODEV;
+ GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+ GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
- if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
- return -EINVAL;
+ if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+ ret = -EBUSY;
+ goto err;
+ }
- if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
- return -EINVAL;
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ ret = i915_vma_insert(vma, size, alignment, flags);
+ if (ret)
+ goto err;
+ }
- if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
- return -EINVAL;
+ ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+ if (ret)
+ goto err;
- vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
- i915_gem_obj_to_vma(obj, vm);
+ if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
+ __i915_vma_set_map_and_fenceable(vma);
- if (vma) {
- if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
- return -EBUSY;
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+ return 0;
- if (i915_vma_misplaced(vma, alignment, flags)) {
- WARN(vma->pin_count,
- "bo is already pinned in %s with incorrect alignment:"
- " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
- " obj->map_and_fenceable=%d\n",
- ggtt_view ? "ggtt" : "ppgtt",
- upper_32_bits(vma->node.start),
- lower_32_bits(vma->node.start),
- alignment,
- !!(flags & PIN_MAPPABLE),
- obj->map_and_fenceable);
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
+err:
+ __i915_vma_unpin(vma);
+ return ret;
+}
- vma = NULL;
- }
- }
+struct i915_vma *
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags)
+{
+ struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+ struct i915_vma *vma;
+ int ret;
- bound = vma ? vma->bound : 0;
- if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
- flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
- } else {
- ret = i915_vma_bind(vma, obj->cache_level, flags);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
+ if (IS_ERR(vma))
+ return vma;
+
+ if (i915_vma_misplaced(vma, size, alignment, flags)) {
+ if (flags & PIN_NONBLOCK &&
+ (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
+ return ERR_PTR(-ENOSPC);
+
+ WARN(i915_vma_is_pinned(vma),
+ "bo is already pinned in ggtt with incorrect alignment:"
+ " offset=%08x, req.alignment=%llx,"
+ " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
+ i915_ggtt_offset(vma), alignment,
+ !!(flags & PIN_MAPPABLE),
+ i915_vma_is_map_and_fenceable(vma));
+ ret = i915_vma_unbind(vma);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
- if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
- (bound ^ vma->bound) & GLOBAL_BIND) {
- __i915_vma_set_map_and_fenceable(vma);
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
- }
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ if (ret)
+ return ERR_PTR(ret);
- vma->pin_count++;
- return 0;
+ return vma;
}
-int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- uint32_t alignment,
- uint64_t flags)
+static __always_inline unsigned int __busy_read_flag(unsigned int id)
{
- return i915_gem_object_do_pin(obj, vm,
- i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
- alignment, flags);
+ /* Note that we could alias engines in the execbuf API, but
+ * that would be very unwise as it prevents userspace from
+ * fine control over engine selection. Ahem.
+ *
+ * This should be something like EXEC_MAX_ENGINE instead of
+ * I915_NUM_ENGINES.
+ */
+ BUILD_BUG_ON(I915_NUM_ENGINES > 16);
+ return 0x10000 << id;
}
-int
-i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
- uint32_t alignment,
- uint64_t flags)
+static __always_inline unsigned int __busy_write_id(unsigned int id)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ /* The uABI guarantees an active writer is also amongst the read
+ * engines. This would be true if we accessed the activity tracking
+ * under the lock, but as we perform the lookup of the object and
+ * its activity locklessly we can not guarantee that the last_write
+ * being active implies that we have set the same engine flag from
+ * last_read - hence we always set both read and write busy for
+ * last_write.
+ */
+ return id | __busy_read_flag(id);
+}
- BUG_ON(!view);
+static __always_inline unsigned int
+__busy_set_if_active(const struct i915_gem_active *active,
+ unsigned int (*flag)(unsigned int id))
+{
+ struct drm_i915_gem_request *request;
+
+ request = rcu_dereference(active->request);
+ if (!request || i915_gem_request_completed(request))
+ return 0;
- return i915_gem_object_do_pin(obj, &ggtt->base, view,
- alignment, flags | PIN_GLOBAL);
+ /* This is racy. See __i915_gem_active_get_rcu() for an in detail
+ * discussion of how to handle the race correctly, but for reporting
+ * the busy state we err on the side of potentially reporting the
+ * wrong engine as being busy (but we guarantee that the result
+ * is at least self-consistent).
+ *
+ * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
+ * whilst we are inspecting it, even under the RCU read lock as we are.
+ * This means that there is a small window for the engine and/or the
+ * seqno to have been overwritten. The seqno will always be in the
+ * future compared to the intended, and so we know that if that
+ * seqno is idle (on whatever engine) our request is idle and the
+ * return 0 above is correct.
+ *
+ * The issue is that if the engine is switched, it is just as likely
+ * to report that it is busy (but since the switch happened, we know
+ * the request should be idle). So there is a small chance that a busy
+ * result is actually the wrong engine.
+ *
+ * So why don't we care?
+ *
+ * For starters, the busy ioctl is a heuristic that is by definition
+ * racy. Even with perfect serialisation in the driver, the hardware
+ * state is constantly advancing - the state we report to the user
+ * is stale.
+ *
+ * The critical information for the busy-ioctl is whether the object
+ * is idle as userspace relies on that to detect whether its next
+ * access will stall, or if it has missed submitting commands to
+ * the hardware allowing the GPU to stall. We never generate a
+ * false-positive for idleness, thus busy-ioctl is reliable at the
+ * most fundamental level, and we maintain the guarantee that a
+ * busy object left to itself will eventually become idle (and stay
+ * idle!).
+ *
+ * We allow ourselves the leeway of potentially misreporting the busy
+ * state because that is an optimisation heuristic that is constantly
+ * in flux. Being quickly able to detect the busy/idle state is much
+ * more important than accurate logging of exactly which engines were
+ * busy.
+ *
+ * For accuracy in reporting the engine, we could use
+ *
+ * result = 0;
+ * request = __i915_gem_active_get_rcu(active);
+ * if (request) {
+ * if (!i915_gem_request_completed(request))
+ * result = flag(request->engine->exec_id);
+ * i915_gem_request_put(request);
+ * }
+ *
+ * but that still remains susceptible to both hardware and userspace
+ * races. So we accept making the result of that race slightly worse,
+ * given the rarity of the race and its low impact on the result.
+ */
+ return flag(READ_ONCE(request->engine->exec_id));
}
-void
-i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
+static __always_inline unsigned int
+busy_check_reader(const struct i915_gem_active *active)
{
- struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
-
- WARN_ON(vma->pin_count == 0);
- WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
+ return __busy_set_if_active(active, __busy_read_flag);
+}
- --vma->pin_count;
+static __always_inline unsigned int
+busy_check_writer(const struct i915_gem_active *active)
+{
+ return __busy_set_if_active(active, __busy_write_id);
}
int
@@ -4645,47 +3953,64 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- int ret;
+ unsigned long active;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ args->busy = 0;
+ active = __I915_BO_ACTIVE(obj);
+ if (active) {
+ int idx;
- /* Count all active objects as busy, even if they are currently not used
- * by the gpu. Users of this interface expect objects to eventually
- * become non-busy without any further actions, therefore emit any
- * necessary flushes here.
- */
- ret = i915_gem_object_flush_active(obj);
- if (ret)
- goto unref;
+ /* Yes, the lookups are intentionally racy.
+ *
+ * First, we cannot simply rely on __I915_BO_ACTIVE. We have
+ * to regard the value as stale and as our ABI guarantees
+ * forward progress, we confirm the status of each active
+ * request with the hardware.
+ *
+ * Even though we guard the pointer lookup by RCU, that only
+ * guarantees that the pointer and its contents remain
+ * dereferencable and does *not* mean that the request we
+ * have is the same as the one being tracked by the object.
+ *
+ * Consider that we lookup the request just as it is being
+ * retired and freed. We take a local copy of the pointer,
+ * but before we add its engine into the busy set, the other
+ * thread reallocates it and assigns it to a task on another
+ * engine with a fresh and incomplete seqno. Guarding against
+ * that requires careful serialisation and reference counting,
+ * i.e. using __i915_gem_active_get_request_rcu(). We don't,
+ * instead we expect that if the result is busy, which engines
+ * are busy is not completely reliable - we only guarantee
+ * that the object was busy.
+ */
+ rcu_read_lock();
- args->busy = 0;
- if (obj->active) {
- int i;
+ for_each_active(active, idx)
+ args->busy |= busy_check_reader(&obj->last_read[idx]);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct drm_i915_gem_request *req;
+ /* For ABI sanity, we only care that the write engine is in
+ * the set of read engines. This should be ensured by the
+ * ordering of setting last_read/last_write in
+ * i915_vma_move_to_active(), and then in reverse in retire.
+ * However, for good measure, we always report the last_write
+ * request as a busy read as well as being a busy write.
+ *
+ * We don't care that the set of active read/write engines
+ * may change during construction of the result, as it is
+ * equally liable to change before userspace can inspect
+ * the result.
+ */
+ args->busy |= busy_check_writer(&obj->last_write);
- req = obj->last_read_req[i];
- if (req)
- args->busy |= 1 << (16 + req->engine->exec_id);
- }
- if (obj->last_write_req)
- args->busy |= obj->last_write_req->engine->exec_id;
+ rcu_read_unlock();
}
-unref:
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ i915_gem_object_put_unlocked(obj);
+ return 0;
}
int
@@ -4716,19 +4041,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
- if (&obj->base == NULL) {
+ obj = i915_gem_object_lookup(file_priv, args->handle);
+ if (!obj) {
ret = -ENOENT;
goto unlock;
}
- if (i915_gem_obj_is_pinned(obj)) {
- ret = -EINVAL;
- goto out;
- }
-
if (obj->pages &&
- obj->tiling_mode != I915_TILING_NONE &&
+ i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->madv == I915_MADV_WILLNEED)
i915_gem_object_unpin_pages(obj);
@@ -4745,8 +4065,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
args->retained = obj->madv != __I915_MADV_PURGED;
-out:
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4759,14 +4078,17 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->global_list);
for (i = 0; i < I915_NUM_ENGINES; i++)
- INIT_LIST_HEAD(&obj->engine_list[i]);
+ init_request_active(&obj->last_read[i],
+ i915_gem_object_retire__read);
+ init_request_active(&obj->last_write,
+ i915_gem_object_retire__write);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
- obj->fence_reg = I915_FENCE_REG_NONE;
+ obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
obj->madv = I915_MADV_WILLNEED;
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
@@ -4871,33 +4193,31 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj);
+ /* All file-owned VMA should have been released by this point through
+ * i915_gem_close_object(), or earlier by i915_gem_context_close().
+ * However, the object may also be bound into the global GTT (e.g.
+ * older GPUs without per-process support, or for direct access through
+ * the GTT either for the user or for scanout). Those VMA still need to
+ * unbound now.
+ */
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
- int ret;
-
- vma->pin_count = 0;
- ret = i915_vma_unbind(vma);
- if (WARN_ON(ret == -ERESTARTSYS)) {
- bool was_interruptible;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- WARN_ON(i915_vma_unbind(vma));
-
- dev_priv->mm.interruptible = was_interruptible;
- }
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_close(vma);
}
+ GEM_BUG_ON(obj->bind_count);
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
- WARN_ON(obj->frontbuffer_bits);
+ WARN_ON(atomic_read(&obj->frontbuffer_bits));
if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
- obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_is_tiled(obj))
i915_gem_object_unpin_pages(obj);
if (WARN_ON(obj->pages_pin_count))
@@ -4905,7 +4225,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
- i915_gem_object_free_mmap_offset(obj);
BUG_ON(obj->pages);
@@ -4924,71 +4243,35 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
intel_runtime_pm_put(dev_priv);
}
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
- vma->vm == vm)
- return vma;
- }
- return NULL;
-}
-
-struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(!view);
-
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
- return vma;
- return NULL;
-}
-
-void i915_gem_vma_destroy(struct i915_vma *vma)
-{
- WARN_ON(vma->node.allocated);
-
- /* Keep the vma as a placeholder in the execbuffer reservation lists */
- if (!list_empty(&vma->exec_list))
- return;
-
- if (!vma->is_ggtt)
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
- list_del(&vma->obj_link);
-
- kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
-static void
-i915_gem_stop_engines(struct drm_device *dev)
+int i915_gem_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine;
+ int ret;
- for_each_engine(engine, dev_priv)
- dev_priv->gt.stop_engine(engine);
-}
-
-int
-i915_gem_suspend(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret = 0;
+ intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_wait_for_idle(dev_priv);
+
+ /* We have to flush all the executing contexts to main memory so
+ * that they can saved in the hibernation image. To ensure the last
+ * context image is coherent, we have to switch away from it. That
+ * leaves the dev_priv->kernel_context still active when
+ * we actually suspend, and its image in memory may not match the GPU
+ * state. Fortunately, the kernel_context is disposable and we do
+ * not rely on its state.
+ */
+ ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
if (ret)
goto err;
i915_gem_retire_requests(dev_priv);
- i915_gem_stop_engines(dev);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -5008,6 +4291,22 @@ err:
return ret;
}
+void i915_gem_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+
+ /* As we didn't flush the kernel context before suspend, we cannot
+ * guarantee that the context image is complete. So let's just reset
+ * it and start again.
+ */
+ dev_priv->gt.resume(dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
void i915_gem_init_swizzling(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5060,53 +4359,6 @@ static void init_unused_rings(struct drm_device *dev)
}
}
-int i915_gem_init_engines(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- ret = intel_init_render_ring_buffer(dev);
- if (ret)
- return ret;
-
- if (HAS_BSD(dev)) {
- ret = intel_init_bsd_ring_buffer(dev);
- if (ret)
- goto cleanup_render_ring;
- }
-
- if (HAS_BLT(dev)) {
- ret = intel_init_blt_ring_buffer(dev);
- if (ret)
- goto cleanup_bsd_ring;
- }
-
- if (HAS_VEBOX(dev)) {
- ret = intel_init_vebox_ring_buffer(dev);
- if (ret)
- goto cleanup_blt_ring;
- }
-
- if (HAS_BSD2(dev)) {
- ret = intel_init_bsd2_ring_buffer(dev);
- if (ret)
- goto cleanup_vebox_ring;
- }
-
- return 0;
-
-cleanup_vebox_ring:
- intel_cleanup_engine(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
- intel_cleanup_engine(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
- intel_cleanup_engine(&dev_priv->engine[VCS]);
-cleanup_render_ring:
- intel_cleanup_engine(&dev_priv->engine[RCS]);
-
- return ret;
-}
-
int
i915_gem_init_hw(struct drm_device *dev)
{
@@ -5173,6 +4425,27 @@ out:
return ret;
}
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+{
+ if (INTEL_INFO(dev_priv)->gen < 6)
+ return false;
+
+ /* TODO: make semaphores and Execlists play nicely together */
+ if (i915.enable_execlists)
+ return false;
+
+ if (value >= 0)
+ return value;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5181,15 +4454,11 @@ int i915_gem_init(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) {
- dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
- dev_priv->gt.init_engines = i915_gem_init_engines;
- dev_priv->gt.cleanup_engine = intel_cleanup_engine;
- dev_priv->gt.stop_engine = intel_stop_engine;
+ dev_priv->gt.resume = intel_legacy_submission_resume;
+ dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
- dev_priv->gt.execbuf_submit = intel_execlists_submission;
- dev_priv->gt.init_engines = intel_logical_rings_init;
+ dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- dev_priv->gt.stop_engine = intel_logical_ring_stop;
}
/* This is just a security blanket to placate dragons.
@@ -5201,24 +4470,27 @@ int i915_gem_init(struct drm_device *dev)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
i915_gem_init_userptr(dev_priv);
- i915_gem_init_ggtt(dev);
+
+ ret = i915_gem_init_ggtt(dev_priv);
+ if (ret)
+ goto out_unlock;
ret = i915_gem_context_init(dev);
if (ret)
goto out_unlock;
- ret = dev_priv->gt.init_engines(dev);
+ ret = intel_engines_init(dev);
if (ret)
goto out_unlock;
ret = i915_gem_init_hw(dev);
if (ret == -EIO) {
- /* Allow ring initialisation to fail by marking the GPU as
+ /* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
- atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ i915_gem_set_wedged(dev_priv);
ret = 0;
}
@@ -5242,7 +4514,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
static void
init_engine_lists(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list);
}
@@ -5250,6 +4521,7 @@ void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ int i;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
@@ -5265,6 +4537,13 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
I915_READ(vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+
+ fence->i915 = dev_priv;
+ fence->id = i;
+ list_add_tail(&fence->link, &dev_priv->mm.fence_list);
+ }
i915_gem_restore_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
@@ -5289,18 +4568,17 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->requests =
kmem_cache_create("i915_gem_request",
sizeof(struct drm_i915_gem_request), 0,
- SLAB_HWCACHE_ALIGN,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU,
NULL);
- INIT_LIST_HEAD(&dev_priv->vm_list);
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < I915_NUM_ENGINES; i++)
init_engine_lists(&dev_priv->engine[i]);
- for (i = 0; i < I915_MAX_NUM_FENCES; i++)
- INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -5310,13 +4588,13 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
- mutex_init(&dev_priv->fb_tracking.lock);
+ atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
+
+ spin_lock_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_load_cleanup(struct drm_device *dev)
@@ -5326,11 +4604,32 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
+
+ /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+ rcu_barrier();
+}
+
+int i915_gem_freeze(struct drm_i915_private *dev_priv)
+{
+ intel_runtime_pm_get(dev_priv);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_shrink_all(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
}
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
+ struct list_head *phases[] = {
+ &dev_priv->mm.unbound_list,
+ &dev_priv->mm.bound_list,
+ NULL
+ }, **p;
/* Called just before we write the hibernation image.
*
@@ -5341,17 +4640,21 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
*
* To make sure the hibernation image contains the latest state,
* we update that state just before writing out the image.
+ *
+ * To try and reduce the hibernation image, we manually shrink
+ * the objects as well.
*/
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- }
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ for (p = phases; *p; p++) {
+ list_for_each_entry(obj, *p, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
}
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
@@ -5359,21 +4662,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_request *request;
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
- while (!list_empty(&file_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&file_priv->mm.request_list,
- struct drm_i915_gem_request,
- client_list);
- list_del(&request->client_list);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_list)
request->file_priv = NULL;
- }
spin_unlock(&file_priv->mm.lock);
if (!list_empty(&file_priv->rps.link)) {
@@ -5402,7 +4699,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
- file_priv->bsd_ring = -1;
+ file_priv->bsd_engine = -1;
ret = i915_gem_context_open(dev, file);
if (ret)
@@ -5424,118 +4721,24 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
{
+ /* Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ sizeof(atomic_t) * BITS_PER_BYTE);
+
if (old) {
- WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
- WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
- old->frontbuffer_bits &= ~frontbuffer_bits;
+ WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
}
if (new) {
- WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
- WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
- new->frontbuffer_bits |= frontbuffer_bits;
- }
-}
-
-/* All the new VM stuff */
-u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_vma *vma;
-
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
- return vma->node.start;
- }
-
- WARN(1, "%s vma for this object not found.\n",
- i915_is_ggtt(vm) ? "global" : "ppgtt");
- return -1;
-}
-
-u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
- return vma->node.start;
-
- WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
- return -1;
-}
-
-bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
- return true;
+ WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
}
-
- return false;
-}
-
-bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->is_ggtt &&
- i915_ggtt_view_equal(&vma->ggtt_view, view) &&
- drm_mm_node_allocated(&vma->node))
- return true;
-
- return false;
-}
-
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &o->vma_list, obj_link)
- if (drm_mm_node_allocated(&vma->node))
- return true;
-
- return false;
-}
-
-unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(list_empty(&o->vma_list));
-
- list_for_each_entry(vma, &o->vma_list, obj_link) {
- if (vma->is_ggtt &&
- vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- return vma->node.size;
- }
-
- return 0;
-}
-
-bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->pin_count > 0)
- return true;
-
- return false;
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
@@ -5590,6 +4793,6 @@ i915_gem_object_create_from_data(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 3752d5daa4b2..ed989596d9a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -41,15 +41,15 @@
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
* @pool: the batch buffer pool
*/
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
struct i915_gem_batch_pool *pool)
{
int n;
- pool->dev = dev;
+ pool->engine = engine;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,18 +65,17 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
int n;
- WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- while (!list_empty(&pool->cache_list[n])) {
- struct drm_i915_gem_object *obj =
- list_first_entry(&pool->cache_list[n],
- struct drm_i915_gem_object,
- batch_pool_link);
-
- list_del(&obj->batch_pool_link);
- drm_gem_object_unreference(&obj->base);
- }
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &pool->cache_list[n],
+ batch_pool_link)
+ i915_gem_object_put(obj);
+
+ INIT_LIST_HEAD(&pool->cache_list[n]);
}
}
@@ -102,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
struct list_head *list;
int n;
- WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
/* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -115,13 +114,14 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (tmp->active)
+ if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+ &tmp->base.dev->struct_mutex))
break;
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
list_del(&tmp->batch_pool_link);
- drm_gem_object_unreference(&tmp->base);
+ i915_gem_object_put(tmp);
continue;
}
@@ -134,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) {
int ret;
- obj = i915_gem_object_create(pool->dev, size);
+ obj = i915_gem_object_create(&pool->engine->i915->drm, size);
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 848e90703eed..10d5ac4c00d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -27,13 +27,15 @@
#include "i915_drv.h"
+struct intel_engine_cs;
+
struct i915_gem_batch_pool {
- struct drm_device *dev;
+ struct intel_engine_cs *engine;
struct list_head cache_list[4];
};
/* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
struct i915_gem_batch_pool *pool);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3c97f0e7a003..df10f4e95736 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -134,21 +134,6 @@ static int get_context_size(struct drm_i915_private *dev_priv)
return ret;
}
-static void i915_gem_context_clean(struct i915_gem_context *ctx)
-{
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- struct i915_vma *vma, *next;
-
- if (!ppgtt)
- return;
-
- list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
- vm_link) {
- if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
- break;
- }
-}
-
void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -156,13 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
-
- /*
- * This context is going away and we need to remove all VMAs still
- * around. This is to handle imported shared objects for which
- * destructor did not run when their handles were closed.
- */
- i915_gem_context_clean(ctx);
+ GEM_BUG_ON(!ctx->closed);
i915_ppgtt_put(ctx->ppgtt);
@@ -173,12 +152,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
continue;
WARN_ON(ce->pin_count);
- if (ce->ringbuf)
- intel_ringbuffer_free(ce->ringbuf);
+ if (ce->ring)
+ intel_ring_free(ce->ring);
- drm_gem_object_unreference(&ce->state->base);
+ i915_vma_put(ce->state);
}
+ put_pid(ctx->pid);
list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
@@ -216,7 +196,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
return ERR_PTR(ret);
}
}
@@ -224,6 +204,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+ struct list_head *phases[] = {
+ &vm->active_list,
+ &vm->inactive_list,
+ &vm->unbound_list,
+ NULL,
+ }, **phase;
+
+ GEM_BUG_ON(vm->closed);
+ vm->closed = true;
+
+ for (phase = phases; *phase; phase++) {
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, *phase, vm_link)
+ if (!i915_vma_is_closed(vma))
+ i915_vma_close(vma);
+ }
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(ctx->closed);
+ ctx->closed = true;
+ if (ctx->ppgtt)
+ i915_ppgtt_close(&ctx->ppgtt->base);
+ ctx->file_priv = ERR_PTR(-EBADF);
+ i915_gem_context_put(ctx);
+}
+
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
@@ -271,13 +282,24 @@ __create_hw_context(struct drm_device *dev,
ctx->ggtt_alignment = get_context_alignment(dev_priv);
if (dev_priv->hw_context_size) {
- struct drm_i915_gem_object *obj =
- i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_alloc_context_obj(dev,
+ dev_priv->hw_context_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out;
}
- ctx->engine[RCS].state = obj;
+
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ ret = PTR_ERR(vma);
+ goto err_out;
+ }
+
+ ctx->engine[RCS].state = vma;
}
/* Default context will never have a file_priv */
@@ -290,6 +312,9 @@ __create_hw_context(struct drm_device *dev,
ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
+ if (file_priv)
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -305,7 +330,7 @@ __create_hw_context(struct drm_device *dev,
return ctx;
err_out:
- i915_gem_context_unreference(ctx);
+ context_close(ctx);
return ERR_PTR(ret);
}
@@ -327,13 +352,14 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+ struct i915_hw_ppgtt *ppgtt =
+ i915_ppgtt_create(to_i915(dev), file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
idr_remove(&file_priv->context_idr, ctx->user_handle);
- i915_gem_context_unreference(ctx);
+ context_close(ctx);
return ERR_CAST(ppgtt);
}
@@ -388,28 +414,12 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_context *ce = &ctx->engine[engine->id];
if (ce->state)
- i915_gem_object_ggtt_unpin(ce->state);
+ i915_vma_unpin(ce->state);
- i915_gem_context_unreference(ctx);
+ i915_gem_context_put(ctx);
}
}
-void i915_gem_context_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- lockdep_assert_held(&dev->struct_mutex);
-
- if (i915.enable_execlists) {
- struct i915_gem_context *ctx;
-
- list_for_each_entry(ctx, &dev_priv->context_list, link)
- intel_lr_context_reset(dev_priv, ctx);
- }
-
- i915_gem_context_lost(dev_priv);
-}
-
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -504,7 +514,7 @@ void i915_gem_context_fini(struct drm_device *dev)
lockdep_assert_held(&dev->struct_mutex);
- i915_gem_context_unreference(dctx);
+ context_close(dctx);
dev_priv->kernel_context = NULL;
ida_destroy(&dev_priv->context_hw_ida);
@@ -514,8 +524,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_gem_context *ctx = p;
- ctx->file_priv = ERR_PTR(-EBADF);
- i915_gem_context_unreference(ctx);
+ context_close(ctx);
return 0;
}
@@ -552,12 +561,13 @@ static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(dev_priv) ?
- hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
+ i915.semaphores ?
+ INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len, ret;
@@ -567,7 +577,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* itlb_before_ctx_switch.
*/
if (IS_GEN6(dev_priv)) {
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ ret = engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
return ret;
}
@@ -589,64 +599,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
- intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
- intel_ring_emit_reg(engine,
+ intel_ring_emit_reg(ring,
RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_SET_CONTEXT);
- intel_ring_emit(engine,
- i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
- flags);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring,
+ i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
- intel_ring_emit_reg(engine, last_reg);
- intel_ring_emit(engine,
+ intel_ring_emit_reg(ring, last_reg);
+ intel_ring_emit(ring,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
/* Insert a delay before the next switch! */
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(engine, last_reg);
- intel_ring_emit(engine, engine->scratch.gtt_offset);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit_reg(ring, last_reg);
+ intel_ring_emit(ring,
+ i915_ggtt_offset(engine->scratch));
+ intel_ring_emit(ring, MI_NOOP);
}
- intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
return ret;
}
@@ -654,7 +664,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int i, ret;
if (!remap_info)
@@ -669,13 +679,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
* here because no other code should access these registers other than
* at initialization time.
*/
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
- intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
- intel_ring_emit(engine, remap_info[i]);
+ intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+ intel_ring_emit(ring, remap_info[i]);
}
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -744,6 +754,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+ struct i915_vma *vma = to->engine[RCS].state;
struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
@@ -751,10 +762,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
+ /* Clear this page out of any CPU caches for coherent swap-in/out. */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ret;
+ }
+
/* Trying to pin first makes error handling easier. */
- ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
- to->ggtt_alignment,
- 0);
+ ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
if (ret)
return ret;
@@ -767,18 +783,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
*/
from = engine->last_context;
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out. Note
- * that thanks to write = false in this call and us not setting any gpu
- * write domains when putting a context object onto the active list
- * (when switching away from it), this won't block.
- *
- * XXX: We need a real interface to do this instead of trickery.
- */
- ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
- if (ret)
- goto unpin_out;
-
if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -787,7 +791,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
- goto unpin_out;
+ goto err;
}
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
@@ -804,7 +808,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
- goto unpin_out;
+ goto err;
}
/* The backing object for the context is done after switching to the
@@ -814,8 +818,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -823,14 +825,12 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->engine[RCS].state->dirty = 1;
-
- /* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->engine[RCS].state);
- i915_gem_context_unreference(from);
+ i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+ /* state is kept alive until the next request */
+ i915_vma_unpin(from->engine[RCS].state);
+ i915_gem_context_put(from);
}
- i915_gem_context_reference(to);
- engine->last_context = to;
+ engine->last_context = i915_gem_context_get(to);
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
@@ -872,8 +872,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return 0;
-unpin_out:
- i915_gem_object_ggtt_unpin(to->engine[RCS].state);
+err:
+ i915_vma_unpin(vma);
return ret;
}
@@ -894,8 +894,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- WARN_ON(i915.enable_execlists);
lockdep_assert_held(&req->i915->drm.struct_mutex);
+ if (i915.enable_execlists)
+ return 0;
if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx;
@@ -914,10 +915,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
}
if (to != engine->last_context) {
- i915_gem_context_reference(to);
if (engine->last_context)
- i915_gem_context_unreference(engine->last_context);
- engine->last_context = to;
+ i915_gem_context_put(engine->last_context);
+ engine->last_context = i915_gem_context_get(to);
}
return 0;
@@ -926,6 +926,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
return do_rcs_switch(req);
}
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, dev_priv) {
+ struct drm_i915_gem_request *req;
+ int ret;
+
+ if (engine->last_context == NULL)
+ continue;
+
+ if (engine->last_context == dev_priv->kernel_context)
+ continue;
+
+ req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = i915_switch_context(req);
+ i915_add_request_no_flush(req);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static bool contexts_enabled(struct drm_device *dev)
{
return i915.enable_execlists || to_i915(dev)->hw_context_size;
@@ -985,7 +1012,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
}
idr_remove(&file_priv->context_idr, ctx->user_handle);
- i915_gem_context_unreference(ctx);
+ context_close(ctx);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
deleted file mode 100644
index a56516482394..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Keith Packard <keithp@keithp.com>
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-
-#if WATCH_LISTS
-int
-i915_verify_lists(struct drm_device *dev)
-{
- static int warned;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- int err = 0;
-
- if (warned)
- return 0;
-
- for_each_engine(engine, dev_priv) {
- list_for_each_entry(obj, &engine->active_list,
- engine_list[engine->id]) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("%s: freed active obj %p\n",
- engine->name, obj);
- err++;
- break;
- } else if (!obj->active ||
- obj->last_read_req[engine->id] == NULL) {
- DRM_ERROR("%s: invalid active obj %p\n",
- engine->name, obj);
- err++;
- } else if (obj->base.write_domain) {
- DRM_ERROR("%s: invalid write obj %p (w %x)\n",
- engine->name,
- obj, obj->base.write_domain);
- err++;
- }
- }
- }
-
- return warned = err;
-}
-#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 80bbe43a2e92..97c9d68b45df 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -23,9 +23,13 @@
* Authors:
* Dave Airlie <airlied@redhat.com>
*/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
#include <drm/drmP.h>
+
#include "i915_drv.h"
-#include <linux/dma-buf.h>
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
{
@@ -115,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
if (ret)
return ERR_PTR(ret);
- addr = i915_gem_object_pin_map(obj);
+ addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
mutex_unlock(&dev->struct_mutex);
return addr;
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
+static void export_fences(struct drm_i915_gem_object *obj,
+ struct dma_buf *dma_buf)
+{
+ struct reservation_object *resv = dma_buf->resv;
+ struct drm_i915_gem_request *req;
+ unsigned long active;
+ int idx;
+
+ active = __I915_BO_ACTIVE(obj);
+ if (!active)
+ return;
+
+ /* Serialise with execbuf to prevent concurrent fence-loops */
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ /* Mark the object for future fences before racily adding old fences */
+ obj->base.dma_buf = dma_buf;
+
+ ww_mutex_lock(&resv->lock, NULL);
+
+ for_each_active(active, idx) {
+ req = i915_gem_active_get(&obj->last_read[idx],
+ &obj->base.dev->struct_mutex);
+ if (!req)
+ continue;
+
+ if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &req->fence);
+
+ i915_gem_request_put(req);
+ }
+
+ req = i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
+ if (req) {
+ reservation_object_add_excl_fence(resv, &req->fence);
+ i915_gem_request_put(req);
+ }
+
+ ww_mutex_unlock(&resv->lock);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dma_buf;
exp_info.ops = &i915_dmabuf_ops;
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
-
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
if (ret)
return ERR_PTR(ret);
}
- return dma_buf_export(&exp_info);
+ dma_buf = drm_gem_dmabuf_export(dev, &exp_info);
+ if (IS_ERR(dma_buf))
+ return dma_buf;
+
+ export_fences(obj, dma_buf);
+ return dma_buf;
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -278,8 +330,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
- drm_gem_object_reference(&obj->base);
- return &obj->base;
+ return &i915_gem_object_get(obj)->base;
}
}
@@ -300,6 +351,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
+ /* We use GTT as shorthand for a coherent domain, one that is
+ * neither in the GPU cache nor in the CPU cache, where all
+ * writes are immediately visible in memory. (That's not strictly
+ * true, but it's close! There are internal buffers such as the
+ * write-combined buffer or a delay through the chipset for GTT
+ * writes that do require us to treat GTT as a separate cache domain.)
+ */
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = 0;
+
return &obj->base;
fail_detach:
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3c1280ec7ff6..5b6f81c1dbca 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,53 +33,37 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+static bool
+gpu_is_idle(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- if (i915.enable_execlists)
- return 0;
-
for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *req;
- int ret;
-
- if (engine->last_context == NULL)
- continue;
-
- if (engine->last_context == dev_priv->kernel_context)
- continue;
-
- req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = i915_switch_context(req);
- i915_add_request_no_flush(req);
- if (ret)
- return ret;
+ if (intel_engine_is_active(engine))
+ return false;
}
- return 0;
+ return true;
}
-
static bool
-mark_free(struct i915_vma *vma, struct list_head *unwind)
+mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
{
- if (vma->pin_count)
+ if (i915_vma_is_pinned(vma))
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
+ if (flags & PIN_NONFAULT && vma->obj->fault_mappable)
+ return false;
+
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
- * @dev: drm_device
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
@@ -102,42 +86,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
* memory in e.g. the shrinker.
*/
int
-i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
- int min_size, unsigned alignment, unsigned cache_level,
- unsigned long start, unsigned long end,
+i915_gem_evict_something(struct i915_address_space *vm,
+ u64 min_size, u64 alignment,
+ unsigned cache_level,
+ u64 start, u64 end,
unsigned flags)
{
- struct list_head eviction_list, unwind_list;
- struct i915_vma *vma;
- int ret = 0;
- int pass = 0;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct list_head eviction_list;
+ struct list_head *phases[] = {
+ &vm->inactive_list,
+ &vm->active_list,
+ NULL,
+ }, **phase;
+ struct i915_vma *vma, *next;
+ int ret;
- trace_i915_gem_evict(dev, min_size, alignment, flags);
+ trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
- * retirement order. The next objects to retire are those on the (per
- * ring) active list that do not have an outstanding flush. Once the
- * hardware reports completion (the seqno is updated after the
- * batchbuffer has been finished) the clean buffer objects would
- * be retired to the inactive list. Any dirty objects would be added
- * to the tail of the flushing list. So after processing the clean
- * active objects we need to emit a MI_FLUSH to retire the flushing
- * list, hence the retirement order of the flushing list is in
- * advance of the dirty objects on the active lists.
+ * retirement order. The next objects to retire are those in flight,
+ * on the active list, again in retirement order.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
- * 2. Clean active objects
- * 3. Flushing list
- * 4. Dirty active objects.
+ * 2. Active objects (will stall on unbinding)
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
-
- INIT_LIST_HEAD(&unwind_list);
if (start != 0 || end != vm->total) {
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level,
@@ -145,96 +124,86 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
-search_again:
- /* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(vma, &vm->inactive_list, vm_link) {
- if (mark_free(vma, &unwind_list))
- goto found;
- }
-
if (flags & PIN_NONBLOCK)
- goto none;
+ phases[1] = NULL;
- /* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(vma, &vm->active_list, vm_link) {
- if (mark_free(vma, &unwind_list))
- goto found;
- }
+search_again:
+ INIT_LIST_HEAD(&eviction_list);
+ phase = phases;
+ do {
+ list_for_each_entry(vma, *phase, vm_link)
+ if (mark_free(vma, flags, &eviction_list))
+ goto found;
+ } while (*++phase);
-none:
/* Nothing found, clean up and bail out! */
- while (!list_empty(&unwind_list)) {
- vma = list_first_entry(&unwind_list,
- struct i915_vma,
- exec_list);
+ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
- list_del_init(&vma->exec_list);
+ INIT_LIST_HEAD(&vma->exec_list);
}
/* Can we unpin some objects such as idle hw contents,
- * or pending flips?
+ * or pending flips? But since only the GGTT has global entries
+ * such as scanouts, rinbuffers and contexts, we can skip the
+ * purge when inspecting per-process local address spaces.
*/
- if (flags & PIN_NONBLOCK)
+ if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- /* Only idle the GPU and repeat the search once */
- if (pass++ == 0) {
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (i915_is_ggtt(vm)) {
- ret = switch_to_pinned_context(dev_priv);
- if (ret)
- return ret;
- }
-
- ret = i915_gem_wait_for_idle(dev_priv);
- if (ret)
- return ret;
-
- i915_gem_retire_requests(dev_priv);
- goto search_again;
+ if (gpu_is_idle(dev_priv)) {
+ /* If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
+ */
+ return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
}
- /* If we still have pending pageflip completions, drop
- * back to userspace to give our workqueues time to
- * acquire our locks and unpin the old scanouts.
+ /* Not everything in the GGTT is tracked via vma (otherwise we
+ * could evict as required with minimal stalling) so we are forced
+ * to idle the GPU and explicitly retire outstanding requests in
+ * the hopes that we can then remove contexts and the like only
+ * bound by their active reference.
*/
- return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+ ret = i915_gem_switch_to_kernel_context(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev_priv);
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
- * scanning, therefore store to be evicted objects on a
- * temporary list. */
- INIT_LIST_HEAD(&eviction_list);
- while (!list_empty(&unwind_list)) {
- vma = list_first_entry(&unwind_list,
- struct i915_vma,
- exec_list);
- if (drm_mm_scan_remove_block(&vma->node)) {
- list_move(&vma->exec_list, &eviction_list);
- drm_gem_object_reference(&vma->obj->base);
- continue;
- }
- list_del_init(&vma->exec_list);
+ * scanning, therefore store to-be-evicted objects on a
+ * temporary list and take a reference for all before
+ * calling unbind (which may remove the active reference
+ * of any of our objects, thus corrupting the list).
+ */
+ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+ if (drm_mm_scan_remove_block(&vma->node))
+ __i915_vma_pin(vma);
+ else
+ list_del_init(&vma->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- struct drm_gem_object *obj;
vma = list_first_entry(&eviction_list,
struct i915_vma,
exec_list);
- obj = &vma->obj->base;
list_del_init(&vma->exec_list);
+ __i915_vma_unpin(vma);
if (ret == 0)
ret = i915_vma_unbind(vma);
-
- drm_gem_object_unreference(obj);
}
-
return ret;
}
@@ -256,8 +225,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
vma = container_of(node, typeof(*vma), node);
- if (vma->pin_count) {
- if (!vma->exec_entry || (vma->pin_count > 1))
+ if (i915_vma_is_pinned(vma)) {
+ if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
/* Object is pinned for some other use */
return -EBUSY;
@@ -303,22 +272,23 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct drm_i915_private *dev_priv = to_i915(vm->dev);
if (i915_is_ggtt(vm)) {
- ret = switch_to_pinned_context(dev_priv);
+ ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
return ret;
}
- ret = i915_gem_wait_for_idle(dev_priv);
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(dev_priv);
-
WARN_ON(!list_empty(&vm->active_list));
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
- if (vma->pin_count == 0)
+ if (!i915_vma_is_pinned(vma))
WARN_ON(i915_vma_unbind(vma));
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b35e5b6475b2..a218c2e395e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,22 +26,42 @@
*
*/
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
+#include "intel_frontbuffer.h"
+
+#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
-#define __EXEC_OBJECT_HAS_PIN (1<<31)
-#define __EXEC_OBJECT_HAS_FENCE (1<<30)
-#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
-#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define __EXEC_OBJECT_HAS_PIN (1<<31)
+#define __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
+#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
#define BATCH_OFFSET_BIAS (256*1024)
+struct i915_execbuffer_params {
+ struct drm_device *dev;
+ struct drm_file *file;
+ struct i915_vma *batch;
+ u32 dispatch_flags;
+ u32 args_batch_start_offset;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct drm_i915_gem_request *request;
+};
+
struct eb_vmas {
+ struct drm_i915_private *i915;
struct list_head vmas;
int and;
union {
@@ -51,7 +71,8 @@ struct eb_vmas {
};
static struct eb_vmas *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+eb_create(struct drm_i915_private *i915,
+ struct drm_i915_gem_execbuffer2 *args)
{
struct eb_vmas *eb = NULL;
@@ -78,6 +99,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
} else
eb->and = -args->buffer_count;
+ eb->i915 = i915;
INIT_LIST_HEAD(&eb->vmas);
return eb;
}
@@ -89,6 +111,26 @@ eb_reset(struct eb_vmas *eb)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
+static struct i915_vma *
+eb_get_batch(struct eb_vmas *eb)
+{
+ struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
+ vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+ return vma;
+}
+
static int
eb_lookup_vmas(struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
@@ -122,7 +164,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
- drm_gem_object_reference(&obj->base);
+ i915_gem_object_get(obj);
list_add_tail(&obj->obj_exec_link, &objects);
}
spin_unlock(&file->table_lock);
@@ -143,8 +185,8 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
- if (IS_ERR(vma)) {
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+ if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
goto err;
@@ -175,7 +217,7 @@ err:
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
}
/*
* Objects already transfered to the vmas list will be unreferenced by
@@ -208,7 +250,6 @@ static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
- struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
@@ -216,10 +257,10 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
- i915_gem_object_unpin_fence(obj);
+ i915_vma_unpin_fence(vma);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- vma->pin_count--;
+ __i915_vma_unpin(vma);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
@@ -234,13 +275,19 @@ static void eb_destroy(struct eb_vmas *eb)
exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
- drm_gem_object_unreference(&vma->obj->base);
+ i915_vma_put(vma);
}
kfree(eb);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
+ if (!i915_gem_object_has_struct_page(obj))
+ return false;
+
+ if (DBG_USE_CPU_RELOC)
+ return DBG_USE_CPU_RELOC > 0;
+
return (HAS_LLC(obj->base.dev) ||
obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
@@ -265,144 +312,265 @@ static inline uint64_t gen8_noncanonical_addr(uint64_t address)
}
static inline uint64_t
-relocation_target(struct drm_i915_gem_relocation_entry *reloc,
+relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
uint64_t target_offset)
{
return gen8_canonical_addr((int)reloc->delta + target_offset);
}
-static int
-relocate_entry_cpu(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_relocation_entry *reloc,
- uint64_t target_offset)
+struct reloc_cache {
+ struct drm_i915_private *i915;
+ struct drm_mm_node node;
+ unsigned long vaddr;
+ unsigned int page;
+ bool use_64bit_reloc;
+};
+
+static void reloc_cache_init(struct reloc_cache *cache,
+ struct drm_i915_private *i915)
{
- struct drm_device *dev = obj->base.dev;
- uint32_t page_offset = offset_in_page(reloc->offset);
- uint64_t delta = relocation_target(reloc, target_offset);
- char *vaddr;
- int ret;
+ cache->page = -1;
+ cache->vaddr = 0;
+ cache->i915 = i915;
+ cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
+ cache->node.allocated = false;
+}
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- return ret;
+static inline void *unmask_page(unsigned long p)
+{
+ return (void *)(uintptr_t)(p & PAGE_MASK);
+}
+
+static inline unsigned int unmask_flags(unsigned long p)
+{
+ return p & ~PAGE_MASK;
+}
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
- reloc->offset >> PAGE_SHIFT));
- *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
+
+static void reloc_cache_fini(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
- if (INTEL_INFO(dev)->gen >= 8) {
- page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP) {
+ if (cache->vaddr & CLFLUSH_AFTER)
+ mb();
- if (page_offset == 0) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
- (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+ kunmap_atomic(vaddr);
+ i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
+ } else {
+ wmb();
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+ if (cache->node.allocated) {
+ struct i915_ggtt *ggtt = &cache->i915->ggtt;
+
+ ggtt->base.clear_range(&ggtt->base,
+ cache->node.start,
+ cache->node.size,
+ true);
+ drm_mm_remove_node(&cache->node);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
+ }
+}
+
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ int page)
+{
+ void *vaddr;
+
+ if (cache->vaddr) {
+ kunmap_atomic(unmask_page(cache->vaddr));
+ } else {
+ unsigned int flushes;
+ int ret;
- *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
+ ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+ if (ret)
+ return ERR_PTR(ret);
+
+ BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+ BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+ cache->vaddr = flushes | KMAP;
+ cache->node.mm = (void *)obj;
+ if (flushes)
+ mb();
}
- kunmap_atomic(vaddr);
+ vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+ cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+ cache->page = page;
- return 0;
+ return vaddr;
}
-static int
-relocate_entry_gtt(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_relocation_entry *reloc,
- uint64_t target_offset)
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ int page)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- uint64_t delta = relocation_target(reloc, target_offset);
- uint64_t offset;
- void __iomem *reloc_page;
- int ret;
+ struct i915_ggtt *ggtt = &cache->i915->ggtt;
+ unsigned long offset;
+ void *vaddr;
+
+ if (cache->node.allocated) {
+ wmb();
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, page),
+ cache->node.start, I915_CACHE_NONE, 0);
+ cache->page = page;
+ return unmask_page(cache->vaddr);
+ }
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ if (cache->vaddr) {
+ io_mapping_unmap_atomic(unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+ int ret;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
+ if (use_cpu_reloc(obj))
+ return NULL;
- /* Map the page containing the relocation we're going to perform. */
- offset = i915_gem_obj_ggtt_offset(obj);
- offset += reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
- offset & PAGE_MASK);
- iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
-
- if (INTEL_INFO(dev)->gen >= 8) {
- offset += sizeof(uint32_t);
-
- if (offset_in_page(offset) == 0) {
- io_mapping_unmap_atomic(reloc_page);
- reloc_page =
- io_mapping_map_atomic_wc(ggtt->mappable,
- offset);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE | PIN_NONBLOCK);
+ if (IS_ERR(vma)) {
+ memset(&cache->node, 0, sizeof(cache->node));
+ ret = drm_mm_insert_node_in_range_generic
+ (&ggtt->base.mm, &cache->node,
+ 4096, 0, 0,
+ 0, ggtt->mappable_end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ if (ret) /* no inactive aperture space, use cpu reloc */
+ return NULL;
+ } else {
+ ret = i915_vma_put_fence(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ return ERR_PTR(ret);
+ }
+
+ cache->node.start = vma->node.start;
+ cache->node.mm = (void *)vma;
}
+ }
- iowrite32(upper_32_bits(delta),
- reloc_page + offset_in_page(offset));
+ offset = cache->node.start;
+ if (cache->node.allocated) {
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
+ } else {
+ offset += page << PAGE_SHIFT;
}
- io_mapping_unmap_atomic(reloc_page);
+ vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
+ cache->page = page;
+ cache->vaddr = (unsigned long)vaddr;
- return 0;
+ return vaddr;
}
-static void
-clflush_write32(void *addr, uint32_t value)
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ int page)
{
- /* This is not a fast path, so KISS. */
- drm_clflush_virt_range(addr, sizeof(uint32_t));
- *(uint32_t *)addr = value;
- drm_clflush_virt_range(addr, sizeof(uint32_t));
+ void *vaddr;
+
+ if (cache->page == page) {
+ vaddr = unmask_page(cache->vaddr);
+ } else {
+ vaddr = NULL;
+ if ((cache->vaddr & KMAP) == 0)
+ vaddr = reloc_iomap(obj, cache, page);
+ if (!vaddr)
+ vaddr = reloc_kmap(obj, cache, page);
+ }
+
+ return vaddr;
}
-static int
-relocate_entry_clflush(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_relocation_entry *reloc,
- uint64_t target_offset)
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
- struct drm_device *dev = obj->base.dev;
- uint32_t page_offset = offset_in_page(reloc->offset);
- uint64_t delta = relocation_target(reloc, target_offset);
- char *vaddr;
- int ret;
+ if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+ if (flushes & CLFLUSH_BEFORE) {
+ clflushopt(addr);
+ mb();
+ }
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
+ *addr = value;
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
- reloc->offset >> PAGE_SHIFT));
- clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+ /* Writes to the same cacheline are serialised by the CPU
+ * (including clflush). On the write path, we only require
+ * that it hits memory in an orderly fashion and place
+ * mb barriers at the start and end of the relocation phase
+ * to ensure ordering of clflush wrt to the system.
+ */
+ if (flushes & CLFLUSH_AFTER)
+ clflushopt(addr);
+ } else
+ *addr = value;
+}
- if (INTEL_INFO(dev)->gen >= 8) {
- page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+static int
+relocate_entry(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_relocation_entry *reloc,
+ struct reloc_cache *cache,
+ u64 target_offset)
+{
+ u64 offset = reloc->offset;
+ bool wide = cache->use_64bit_reloc;
+ void *vaddr;
+
+ target_offset = relocation_target(reloc, target_offset);
+repeat:
+ vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_offset),
+ cache->vaddr);
+
+ if (wide) {
+ offset += sizeof(u32);
+ target_offset >>= 32;
+ wide = false;
+ goto repeat;
+ }
- if (page_offset == 0) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
- (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
- }
+ return 0;
+}
- clflush_write32(vaddr + page_offset, upper_32_bits(delta));
- }
+static bool object_is_idle(struct drm_i915_gem_object *obj)
+{
+ unsigned long active = i915_gem_object_get_active(obj);
+ int idx;
- kunmap_atomic(vaddr);
+ for_each_active(active, idx) {
+ if (!i915_gem_active_is_idle(&obj->last_read[idx],
+ &obj->base.dev->struct_mutex))
+ return false;
+ }
- return 0;
+ return true;
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_vmas *eb,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ struct reloc_cache *cache)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -465,7 +633,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+ obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
@@ -482,26 +650,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
/* We can't wait for rendering with pagefaults disabled */
- if (obj->active && pagefault_disabled())
+ if (pagefault_disabled() && !object_is_idle(obj))
return -EFAULT;
- if (use_cpu_reloc(obj))
- ret = relocate_entry_cpu(obj, reloc, target_offset);
- else if (obj->map_and_fenceable)
- ret = relocate_entry_gtt(obj, reloc, target_offset);
- else if (static_cpu_has(X86_FEATURE_CLFLUSH))
- ret = relocate_entry_clflush(obj, reloc, target_offset);
- else {
- WARN_ONCE(1, "Impossible case in relocation handling\n");
- ret = -ENODEV;
- }
-
+ ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
-
return 0;
}
@@ -513,9 +670,11 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- int remain, ret;
+ struct reloc_cache cache;
+ int remain, ret = 0;
user_relocs = u64_to_user_ptr(entry->relocs_ptr);
+ reloc_cache_init(&cache, eb->i915);
remain = entry->relocation_count;
while (remain) {
@@ -525,19 +684,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
count = ARRAY_SIZE(stack_reloc);
remain -= count;
- if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
- return -EFAULT;
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
+ ret = -EFAULT;
+ goto out;
+ }
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
if (ret)
- return ret;
+ goto out;
if (r->presumed_offset != offset &&
- __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
- return -EFAULT;
+ __put_user(r->presumed_offset,
+ &user_relocs->presumed_offset)) {
+ ret = -EFAULT;
+ goto out;
}
user_relocs++;
@@ -545,7 +708,9 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
} while (--count);
}
- return 0;
+out:
+ reloc_cache_fini(&cache);
+ return ret;
#undef N_RELOC
}
@@ -555,15 +720,18 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
struct drm_i915_gem_relocation_entry *relocs)
{
const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- int i, ret;
+ struct reloc_cache cache;
+ int i, ret = 0;
+ reloc_cache_init(&cache, eb->i915);
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
+ ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
if (ret)
- return ret;
+ break;
}
+ reloc_cache_fini(&cache);
- return 0;
+ return ret;
}
static int
@@ -626,23 +794,27 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
flags |= PIN_HIGH;
}
- ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
- if ((ret == -ENOSPC || ret == -E2BIG) &&
+ ret = i915_vma_pin(vma,
+ entry->pad_to_size,
+ entry->alignment,
+ flags);
+ if ((ret == -ENOSPC || ret == -E2BIG) &&
only_mappable_for_reloc(entry->flags))
- ret = i915_gem_object_pin(obj, vma->vm,
- entry->alignment,
- flags & ~PIN_MAPPABLE);
+ ret = i915_vma_pin(vma,
+ entry->pad_to_size,
+ entry->alignment,
+ flags & ~PIN_MAPPABLE);
if (ret)
return ret;
entry->flags |= __EXEC_OBJECT_HAS_PIN;
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- ret = i915_gem_object_get_fence(obj);
+ ret = i915_vma_get_fence(vma);
if (ret)
return ret;
- if (i915_gem_object_pin_fence(obj))
+ if (i915_vma_pin_fence(vma))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
}
@@ -667,7 +839,7 @@ need_reloc_mappable(struct i915_vma *vma)
if (entry->relocation_count == 0)
return false;
- if (!vma->is_ggtt)
+ if (!i915_vma_is_ggtt(vma))
return false;
/* See also use_cpu_reloc() */
@@ -684,14 +856,17 @@ static bool
eb_vma_misplaced(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- struct drm_i915_gem_object *obj = vma->obj;
- WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+ WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_ggtt(vma));
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
return true;
+ if (vma->node.size < entry->pad_to_size)
+ return true;
+
if (entry->flags & EXEC_OBJECT_PINNED &&
vma->node.start != entry->offset)
return true;
@@ -701,7 +876,8 @@ eb_vma_misplaced(struct i915_vma *vma)
return true;
/* avoid costly ping-pong once a batch bo ended up non-mappable */
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+ if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_map_and_fenceable(vma))
return !only_mappable_for_reloc(entry->flags);
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
@@ -725,8 +901,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry;
- i915_gem_retire_requests_ring(engine);
-
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas);
@@ -746,7 +920,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
+ i915_gem_object_is_tiled(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
if (entry->flags & EXEC_OBJECT_PINNED)
@@ -843,7 +1017,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
- drm_gem_object_unreference(&vma->obj->base);
+ i915_vma_put(vma);
}
mutex_unlock(&dev->struct_mutex);
@@ -937,23 +1111,45 @@ err:
return ret;
}
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+ unsigned int mask;
+
+ mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+ mask <<= I915_BO_ACTIVE_SHIFT;
+
+ return mask;
+}
+
static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_engine_flag(req->engine);
+ const unsigned int other_rings = eb_other_engines(req);
struct i915_vma *vma;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
+ struct reservation_object *resv;
- if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->engine, &req);
+ if (obj->flags & other_rings) {
+ ret = i915_gem_request_await_object
+ (req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
}
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv) {
+ ret = i915_sw_fence_await_reservation
+ (&req->submit, resv, &i915_fence_ops,
+ obj->base.pending_write_domain, 10*HZ,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (ret < 0)
+ return ret;
+ }
+
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj, false);
}
@@ -961,10 +1157,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(req->engine->i915);
- /* Unconditionally invalidate gpu caches and ensure that we do flush
- * any residual writes from the previous batch.
- */
- return intel_ring_invalidate_all_caches(req);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ return req->engine->emit_flush(req, EMIT_INVALIDATE);
}
static bool
@@ -1000,6 +1194,9 @@ validate_exec_list(struct drm_device *dev,
unsigned invalid_flags;
int i;
+ /* INTERNAL flags must not overlap with external ones */
+ BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
+
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(dev))
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -1029,6 +1226,14 @@ validate_exec_list(struct drm_device *dev,
if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
return -EINVAL;
+ /* pad_to_size was once a reserved field, so sanitize it */
+ if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
+ if (offset_in_page(exec[i].pad_to_size))
+ return -EINVAL;
+ } else {
+ exec[i].pad_to_size = 0;
+ }
+
/* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
@@ -1048,7 +1253,7 @@ validate_exec_list(struct drm_device *dev,
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- if (fault_in_multipages_readable(ptr, length))
+ if (fault_in_pages_readable(ptr, length))
return -EFAULT;
}
}
@@ -1060,12 +1265,9 @@ static struct i915_gem_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
- struct i915_gem_context *ctx = NULL;
+ struct i915_gem_context *ctx;
struct i915_ctx_hang_stats *hs;
- if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
- return ERR_PTR(-EINVAL);
-
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
@@ -1079,66 +1281,107 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
-void
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct drm_i915_gem_request *req,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ const unsigned int idx = req->engine->id;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ obj->dirty = 1; /* be paranoid */
+
+ /* Add a reference if we're newly entering the active list.
+ * The order in which we add operations to the retirement queue is
+ * vital here: mark_active adds to the start of the callback list,
+ * such that subsequent callbacks are called first. Therefore we
+ * add the active reference first and queue for it to be dropped
+ * *last*.
+ */
+ if (!i915_gem_object_is_active(obj))
+ i915_gem_object_get(obj);
+ i915_gem_object_set_active(obj, idx);
+ i915_gem_active_set(&obj->last_read[idx], req);
+
+ if (flags & EXEC_OBJECT_WRITE) {
+ i915_gem_active_set(&obj->last_write, req);
+
+ intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+ /* update for the implicit flush after a batch */
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_gem_active_set(&vma->last_fence, req);
+
+ i915_vma_set_active(vma, idx);
+ i915_gem_active_set(&vma->last_read[idx], req);
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_request *req,
+ unsigned int flags)
+{
+ struct reservation_object *resv;
+
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (!resv)
+ return;
+
+ /* Ignore errors from failing to allocate the new fence, we can't
+ * handle an error right now. Worst case should be missed
+ * synchronisation leading to rendering corruption.
+ */
+ ww_mutex_lock(&resv->lock, NULL);
+ if (flags & EXEC_OBJECT_WRITE)
+ reservation_object_add_excl_fence(resv, &req->fence);
+ else if (reservation_object_reserve_shared(resv) == 0)
+ reservation_object_add_shared_fence(resv, &req->fence);
+ ww_mutex_unlock(&resv->lock);
+}
+
+static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
- if (obj->base.write_domain == 0)
+ if (obj->base.write_domain)
+ vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+ else
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- i915_vma_move_to_active(vma, req);
- if (obj->base.write_domain) {
- i915_gem_request_assign(&obj->last_write_req, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
- /* update for the implicit flush after a batch */
- obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
- }
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- i915_gem_request_assign(&obj->last_fenced_req, req);
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = engine->i915;
- list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
- &dev_priv->mm.fence_list);
- }
- }
-
+ i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+ eb_export_fence(obj, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
-static void
-i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
-{
- /* Unconditionally force add_request to emit a full flush. */
- params->engine->gpu_caches_dirty = true;
-
- /* Add a breadcrumb for the completion of the batch buffer */
- __i915_add_request(params->request, params->batch_obj, true);
-}
-
static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
- struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_ring *ring = req->ring;
int ret, i;
- if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
+ if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -1148,21 +1391,21 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(engine, 0);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
}
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
return 0;
}
-static struct drm_i915_gem_object*
+static struct i915_vma *
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
- struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
+ struct eb_vmas *eb,
u32 batch_start_offset,
u32 batch_len,
bool is_master)
@@ -1174,51 +1417,44 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
- return shadow_batch_obj;
-
- ret = i915_parse_cmds(engine,
- batch_obj,
- shadow_batch_obj,
- batch_start_offset,
- batch_len,
- is_master);
- if (ret)
- goto err;
-
- ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
- if (ret)
- goto err;
+ return ERR_CAST(shadow_batch_obj);
+
+ ret = intel_engine_cmd_parser(engine,
+ batch_obj,
+ shadow_batch_obj,
+ batch_start_offset,
+ batch_len,
+ is_master);
+ if (ret) {
+ if (ret == -EACCES) /* unhandled chained batch */
+ vma = NULL;
+ else
+ vma = ERR_PTR(ret);
+ goto out;
+ }
- i915_gem_object_unpin_pages(shadow_batch_obj);
+ vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ goto out;
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
- vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
- drm_gem_object_reference(&shadow_batch_obj->base);
+ i915_gem_object_get(shadow_batch_obj);
list_add_tail(&vma->exec_list, &eb->vmas);
- shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-
- return shadow_batch_obj;
-
-err:
+out:
i915_gem_object_unpin_pages(shadow_batch_obj);
- if (ret == -EACCES) /* unhandled chained batch */
- return batch_obj;
- else
- return ERR_PTR(ret);
+ return vma;
}
-int
-i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas)
+static int
+execbuf_submit(struct i915_execbuffer_params *params,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas)
{
- struct drm_device *dev = params->dev;
- struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
@@ -1232,34 +1468,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
- WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
- "%s didn't clear reload\n", engine->name);
-
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+ if (instp_mode != 0 && params->engine->id != RCS) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (instp_mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_INFO(dev_priv)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
return -EINVAL;
}
- if (INTEL_INFO(dev)->gen > 5 &&
+ if (INTEL_INFO(dev_priv)->gen > 5 &&
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
}
/* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_INFO(dev_priv)->gen >= 6)
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
@@ -1268,37 +1501,39 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
- if (engine == &dev_priv->engine[RCS] &&
+ if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
+ struct intel_ring *ring = params->request->ring;
+
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, INSTPM);
- intel_ring_emit(engine, instp_mask << 16 | instp_mode);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
dev_priv->relative_constants_mode = instp_mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, params->request);
+ ret = i915_reset_gen7_sol_offsets(params->request);
if (ret)
return ret;
}
exec_len = args->batch_len;
- exec_start = params->batch_obj_vm_offset +
+ exec_start = params->batch->node.start +
params->args_batch_start_offset;
if (exec_len == 0)
- exec_len = params->batch_obj->base.size;
+ exec_len = params->batch->size - params->args_batch_start_offset;
- ret = engine->dispatch_execbuffer(params->request,
- exec_start, exec_len,
- params->dispatch_flags);
+ ret = params->engine->emit_bb_start(params->request,
+ exec_start, exec_len,
+ params->dispatch_flags);
if (ret)
return ret;
@@ -1311,43 +1546,20 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
/**
* Find one BSD ring to dispatch the corresponding BSD command.
- * The ring index is returned.
+ * The engine index is returned.
*/
static unsigned int
-gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
+gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+ struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
/* Check whether the file_priv has already selected one ring. */
- if ((int)file_priv->bsd_ring < 0) {
- /* If not, use the ping-pong mechanism to select one. */
- mutex_lock(&dev_priv->drm.struct_mutex);
- file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
- dev_priv->mm.bsd_ring_dispatch_index ^= 1;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- return file_priv->bsd_ring;
-}
-
-static struct drm_i915_gem_object *
-eb_get_batch(struct eb_vmas *eb)
-{
- struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+ if ((int)file_priv->bsd_engine < 0)
+ file_priv->bsd_engine = atomic_fetch_xor(1,
+ &dev_priv->mm.bsd_engine_dispatch_index);
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
- vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
- return vma->obj;
+ return file_priv->bsd_engine;
}
#define I915_USER_RINGS (4)
@@ -1360,31 +1572,31 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_VEBOX] = VECS
};
-static int
-eb_select_ring(struct drm_i915_private *dev_priv,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args,
- struct intel_engine_cs **ring)
+static struct intel_engine_cs *
+eb_select_engine(struct drm_i915_private *dev_priv,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
{
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+ struct intel_engine_cs *engine;
if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
- return -EINVAL;
+ return NULL;
}
if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
- return -EINVAL;
+ return NULL;
}
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
- bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+ bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -1392,20 +1604,20 @@ eb_select_ring(struct drm_i915_private *dev_priv,
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
- return -EINVAL;
+ return NULL;
}
- *ring = &dev_priv->engine[_VCS(bsd_idx)];
+ engine = &dev_priv->engine[_VCS(bsd_idx)];
} else {
- *ring = &dev_priv->engine[user_ring_map[user_ring_id]];
+ engine = &dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_engine_initialized(*ring)) {
+ if (!intel_engine_initialized(engine)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
- return -EINVAL;
+ return NULL;
}
- return 0;
+ return engine;
}
static int
@@ -1416,9 +1628,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
- struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
@@ -1447,9 +1657,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- ret = eb_select_ring(dev_priv, file, args, &engine);
- if (ret)
- return ret;
+ engine = eb_select_engine(dev_priv, file, args);
+ if (!engine)
+ return -EINVAL;
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1489,7 +1699,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
- i915_gem_context_reference(ctx);
+ i915_gem_context_get(ctx);
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
@@ -1498,9 +1708,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
memset(&params_master, 0x00, sizeof(params_master));
- eb = eb_create(args);
+ eb = eb_create(dev_priv, args);
if (eb == NULL) {
- i915_gem_context_unreference(ctx);
+ i915_gem_context_put(ctx);
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
goto pre_mutex_err;
@@ -1512,7 +1722,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
/* take note of the batch buffer before we might reorder the lists */
- batch_obj = eb_get_batch(eb);
+ params->batch = eb_get_batch(eb);
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1536,34 +1746,34 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Set the pending read domains for the batch buffer to COMMAND */
- if (batch_obj->base.pending_write_domain) {
+ if (params->batch->obj->base.pending_write_domain) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
+ if (args->batch_start_offset > params->batch->size ||
+ args->batch_len > params->batch->size - args->batch_start_offset) {
+ DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+ ret = -EINVAL;
+ goto err;
+ }
params->args_batch_start_offset = args->batch_start_offset;
- if (i915_needs_cmd_parser(engine) && args->batch_len) {
- struct drm_i915_gem_object *parsed_batch_obj;
-
- parsed_batch_obj = i915_gem_execbuffer_parse(engine,
- &shadow_exec_entry,
- eb,
- batch_obj,
- args->batch_start_offset,
- args->batch_len,
- drm_is_current_master(file));
- if (IS_ERR(parsed_batch_obj)) {
- ret = PTR_ERR(parsed_batch_obj);
+ if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+ struct i915_vma *vma;
+
+ vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
+ params->batch->obj,
+ eb,
+ args->batch_start_offset,
+ args->batch_len,
+ drm_is_current_master(file));
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto err;
}
- /*
- * parsed_batch_obj == batch_obj means batch not fully parsed:
- * Accept, but don't promote to secure.
- */
-
- if (parsed_batch_obj != batch_obj) {
+ if (vma) {
/*
* Batch parsed and accepted:
*
@@ -1575,16 +1785,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dispatch_flags |= I915_DISPATCH_SECURE;
params->args_batch_start_offset = 0;
- batch_obj = parsed_batch_obj;
+ params->batch = vma;
}
}
- batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+ params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (dispatch_flags & I915_DISPATCH_SECURE) {
+ struct drm_i915_gem_object *obj = params->batch->obj;
+ struct i915_vma *vma;
+
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
@@ -1595,22 +1808,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* fitting due to fragmentation.
* So this is actually safe.
*/
- ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
- if (ret)
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto err;
+ }
- params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
- } else
- params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+ params->batch = vma;
+ }
/* Allocate a request for this batch buffer nice and early. */
- req = i915_gem_request_alloc(engine, ctx);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
+ params->request = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(params->request)) {
+ ret = PTR_ERR(params->request);
goto err_batch_unpin;
}
- ret = i915_gem_request_add_to_client(req, file);
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+ params->request->batch = params->batch;
+
+ ret = i915_gem_request_add_to_client(params->request, file);
if (ret)
goto err_request;
@@ -1624,13 +1846,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->file = file;
params->engine = engine;
params->dispatch_flags = dispatch_flags;
- params->batch_obj = batch_obj;
params->ctx = ctx;
- params->request = req;
- ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+ ret = execbuf_submit(params, args, &eb->vmas);
err_request:
- i915_gem_execbuffer_retire_commands(params);
+ __i915_add_request(params->request, ret == 0);
err_batch_unpin:
/*
@@ -1640,11 +1860,10 @@ err_batch_unpin:
* active.
*/
if (dispatch_flags & I915_DISPATCH_SECURE)
- i915_gem_object_ggtt_unpin(batch_obj);
-
+ i915_vma_unpin(params->batch);
err:
/* the request owns the ref now */
- i915_gem_context_unreference(ctx);
+ i915_gem_context_put(ctx);
eb_destroy(eb);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 251d7a95af89..2c7ba0ee127c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -55,226 +55,228 @@
* CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
*/
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
+#define pipelined 0
+
+static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+ struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
+ u64 val;
- if (INTEL_INFO(dev)->gen >= 6) {
- fence_reg_lo = FENCE_REG_GEN6_LO(reg);
- fence_reg_hi = FENCE_REG_GEN6_HI(reg);
+ if (INTEL_INFO(fence->i915)->gen >= 6) {
+ fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
+ fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+
} else {
- fence_reg_lo = FENCE_REG_965_LO(reg);
- fence_reg_hi = FENCE_REG_965_HI(reg);
+ fence_reg_lo = FENCE_REG_965_LO(fence->id);
+ fence_reg_hi = FENCE_REG_965_HI(fence->id);
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
- /* To w/a incoherency with non-atomic 64-bit register updates,
- * we split the 64-bit update into two 32-bit writes. In order
- * for a partial fence not to be evaluated between writes, we
- * precede the update with write to turn off the fence register,
- * and only enable the fence as the last step.
- *
- * For extra levels of paranoia, we make sure each step lands
- * before applying the next step.
- */
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
-
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint64_t val;
-
- /* Adjust fence size to match tiled area */
- if (obj->tiling_mode != I915_TILING_NONE) {
- uint32_t row_size = obj->stride *
- (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
- size = (size / row_size) * row_size;
- }
-
- val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
- 0xfffff000) << 32;
- val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val = 0;
+ if (vma) {
+ unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ bool is_y_tiled = tiling == I915_TILING_Y;
+ unsigned int stride = i915_gem_object_get_stride(vma->obj);
+ u32 row_size = stride * (is_y_tiled ? 32 : 8);
+ u32 size = rounddown((u32)vma->node.size, row_size);
+
+ val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
+ val |= vma->node.start & 0xfffff000;
+ val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
+ if (is_y_tiled)
+ val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
+ }
- I915_WRITE(fence_reg_hi, val >> 32);
- POSTING_READ(fence_reg_hi);
+ if (!pipelined) {
+ struct drm_i915_private *dev_priv = fence->i915;
- I915_WRITE(fence_reg_lo, val);
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg_lo, 0);
+ POSTING_READ(fence_reg_lo);
+
+ I915_WRITE(fence_reg_hi, upper_32_bits(val));
+ I915_WRITE(fence_reg_lo, lower_32_bits(val));
POSTING_READ(fence_reg_lo);
- } else {
- I915_WRITE(fence_reg_hi, 0);
- POSTING_READ(fence_reg_hi);
}
}
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
+static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+ struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
+ val = 0;
+ if (vma) {
+ unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ bool is_y_tiled = tiling == I915_TILING_Y;
+ unsigned int stride = i915_gem_object_get_stride(vma->obj);
int pitch_val;
int tile_width;
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+ WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
+ !is_power_of_2(vma->node.size) ||
+ (vma->node.start & (vma->node.size - 1)),
+ "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
+ vma->node.start,
+ i915_vma_is_map_and_fenceable(vma),
+ vma->node.size);
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
tile_width = 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
+ pitch_val = stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
+ val = vma->node.start;
+ if (is_y_tiled)
+ val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+ val |= I915_FENCE_SIZE_BITS(vma->node.size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
+ }
+
+ if (!pipelined) {
+ struct drm_i915_private *dev_priv = fence->i915;
+ i915_reg_t reg = FENCE_REG(fence->id);
- I915_WRITE(FENCE_REG(reg), val);
- POSTING_READ(FENCE_REG(reg));
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
}
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
+static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+ struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
+ u32 val;
- if (obj) {
- u32 size = i915_gem_obj_ggtt_size(obj);
- uint32_t pitch_val;
+ val = 0;
+ if (vma) {
+ unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
+ bool is_y_tiled = tiling == I915_TILING_Y;
+ unsigned int stride = i915_gem_object_get_stride(vma->obj);
+ u32 pitch_val;
- WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
- "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
- i915_gem_obj_ggtt_offset(obj), size);
+ WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
+ !is_power_of_2(vma->node.size) ||
+ (vma->node.start & (vma->node.size - 1)),
+ "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
+ vma->node.start, vma->node.size);
- pitch_val = obj->stride / 128;
+ pitch_val = stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_ggtt_offset(obj);
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
+ val = vma->node.start;
+ if (is_y_tiled)
+ val |= BIT(I830_FENCE_TILING_Y_SHIFT);
+ val |= I830_FENCE_SIZE_BITS(vma->node.size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- } else
- val = 0;
+ }
- I915_WRITE(FENCE_REG(reg), val);
- POSTING_READ(FENCE_REG(reg));
-}
+ if (!pipelined) {
+ struct drm_i915_private *dev_priv = fence->i915;
+ i915_reg_t reg = FENCE_REG(fence->id);
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
- return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
}
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
- struct drm_i915_gem_object *obj)
+static void fence_write(struct drm_i915_fence_reg *fence,
+ struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* Ensure that all CPU reads are completed before installing a fence
- * and all writes before removing the fence.
+ /* Previous access through the fence register is marshalled by
+ * the mb() inside the fault handlers (i915_gem_release_mmaps)
+ * and explicitly managed for internal users.
*/
- if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
- mb();
-
- WARN(obj && (!obj->stride || !obj->tiling_mode),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- obj->stride, obj->tiling_mode);
-
- if (IS_GEN2(dev))
- i830_write_fence_reg(dev, reg, obj);
- else if (IS_GEN3(dev))
- i915_write_fence_reg(dev, reg, obj);
- else if (INTEL_INFO(dev)->gen >= 4)
- i965_write_fence_reg(dev, reg, obj);
-
- /* And similarly be paranoid that no direct access to this region
- * is reordered to before the fence is installed.
+
+ if (IS_GEN2(fence->i915))
+ i830_write_fence_reg(fence, vma);
+ else if (IS_GEN3(fence->i915))
+ i915_write_fence_reg(fence, vma);
+ else
+ i965_write_fence_reg(fence, vma);
+
+ /* Access through the fenced region afterwards is
+ * ordered by the posting reads whilst writing the registers.
*/
- if (i915_gem_object_needs_mb(obj))
- mb();
-}
-static inline int fence_number(struct drm_i915_private *dev_priv,
- struct drm_i915_fence_reg *fence)
-{
- return fence - dev_priv->fence_regs;
+ fence->dirty = false;
}
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
- struct drm_i915_fence_reg *fence,
- bool enable)
+static int fence_update(struct drm_i915_fence_reg *fence,
+ struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int reg = fence_number(dev_priv, fence);
-
- i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+ int ret;
- if (enable) {
- obj->fence_reg = reg;
- fence->obj = obj;
- list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
- } else {
- obj->fence_reg = I915_FENCE_REG_NONE;
- fence->obj = NULL;
- list_del_init(&fence->lru_list);
- }
- obj->fence_dirty = false;
-}
+ if (vma) {
+ if (!i915_vma_is_map_and_fenceable(vma))
+ return -EINVAL;
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
+ if (WARN(!i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ i915_gem_object_get_stride(vma->obj),
+ i915_gem_object_get_tiling(vma->obj)))
+ return -EINVAL;
- /* As we do not have an associated fence register, we will force
- * a tiling change if we ever need to acquire one.
- */
- obj->fence_dirty = false;
- obj->fence_reg = I915_FENCE_REG_NONE;
-}
+ ret = i915_gem_active_retire(&vma->last_fence,
+ &vma->obj->base.dev->struct_mutex);
+ if (ret)
+ return ret;
+ }
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->last_fenced_req) {
- int ret = i915_wait_request(obj->last_fenced_req);
+ if (fence->vma) {
+ ret = i915_gem_active_retire(&fence->vma->last_fence,
+ &fence->vma->obj->base.dev->struct_mutex);
if (ret)
return ret;
+ }
+
+ if (fence->vma && fence->vma != vma) {
+ /* Ensure that all userspace CPU access is completed before
+ * stealing the fence.
+ */
+ i915_gem_release_mmap(fence->vma->obj);
+
+ fence->vma->fence = NULL;
+ fence->vma = NULL;
+
+ list_move(&fence->link, &fence->i915->mm.fence_list);
+ }
+
+ fence_write(fence, vma);
+
+ if (vma) {
+ if (fence->vma != vma) {
+ vma->fence = fence;
+ fence->vma = vma;
+ }
- i915_gem_request_assign(&obj->last_fenced_req, NULL);
+ list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
return 0;
}
/**
- * i915_gem_object_put_fence - force-remove fence for an object
- * @obj: object to map through a fence reg
+ * i915_vma_put_fence - force-remove fence for a VMA
+ * @vma: vma to map linearly (not through a fence reg)
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
@@ -284,70 +286,42 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
* 0 on success, negative error code on failure.
*/
int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+i915_vma_put_fence(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct drm_i915_fence_reg *fence;
- int ret;
+ struct drm_i915_fence_reg *fence = vma->fence;
- ret = i915_gem_object_wait_fence(obj);
- if (ret)
- return ret;
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
- if (obj->fence_reg == I915_FENCE_REG_NONE)
+ if (!fence)
return 0;
- fence = &dev_priv->fence_regs[obj->fence_reg];
-
- if (WARN_ON(fence->pin_count))
+ if (fence->pin_count)
return -EBUSY;
- i915_gem_object_fence_lost(obj);
- i915_gem_object_update_fence(obj, fence, false);
-
- return 0;
+ return fence_update(fence, NULL);
}
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
+static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_fence_reg *reg, *avail;
- int i;
-
- /* First try to find a free reg */
- avail = NULL;
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- return reg;
-
- if (!reg->pin_count)
- avail = reg;
- }
-
- if (avail == NULL)
- goto deadlock;
+ struct drm_i915_fence_reg *fence;
- /* None available, try to steal one or wait for a user to finish */
- list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->pin_count)
+ list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ if (fence->pin_count)
continue;
- return reg;
+ return fence;
}
-deadlock:
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev))
+ if (intel_has_pending_fb_unpin(&dev_priv->drm))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
+ * i915_vma_get_fence - set up fencing for a vma
+ * @vma: vma to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -364,103 +338,29 @@ deadlock:
* 0 on success, negative error code on failure.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+i915_vma_get_fence(struct i915_vma *vma)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- bool enable = obj->tiling_mode != I915_TILING_NONE;
- struct drm_i915_fence_reg *reg;
- int ret;
+ struct drm_i915_fence_reg *fence;
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
- /* Have we updated the tiling parameters upon the object and so
- * will need to serialise the write to the associated fence register?
- */
- if (obj->fence_dirty) {
- ret = i915_gem_object_wait_fence(obj);
- if (ret)
- return ret;
- }
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj->fence_reg];
- if (!obj->fence_dirty) {
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
+ if (vma->fence) {
+ fence = vma->fence;
+ if (!fence->dirty) {
+ list_move_tail(&fence->link,
+ &fence->i915->mm.fence_list);
return 0;
}
- } else if (enable) {
- if (WARN_ON(!obj->map_and_fenceable))
- return -EINVAL;
-
- reg = i915_find_fence_reg(dev);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- ret = i915_gem_object_wait_fence(old);
- if (ret)
- return ret;
-
- i915_gem_object_fence_lost(old);
- }
+ } else if (set) {
+ fence = fence_find(to_i915(vma->vm->dev));
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
} else
return 0;
- i915_gem_object_update_fence(obj, reg, enable);
-
- return 0;
-}
-
-/**
- * i915_gem_object_pin_fence - pin fencing state
- * @obj: object to pin fencing for
- *
- * This pins the fencing state (whether tiled or untiled) to make sure the
- * object is ready to be used as a scanout target. Fencing status must be
- * synchronize first by calling i915_gem_object_get_fence():
- *
- * The resulting fence pin reference must be released again with
- * i915_gem_object_unpin_fence().
- *
- * Returns:
- *
- * True if the object has a fence, false otherwise.
- */
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
- WARN_ON(!ggtt_vma ||
- dev_priv->fence_regs[obj->fence_reg].pin_count >
- ggtt_vma->pin_count);
- dev_priv->fence_regs[obj->fence_reg].pin_count++;
- return true;
- } else
- return false;
-}
-
-/**
- * i915_gem_object_unpin_fence - unpin fencing state
- * @obj: object to unpin fencing for
- *
- * This releases the fence pin reference acquired through
- * i915_gem_object_pin_fence. It will handle both objects with and without an
- * attached fence correctly, callers do not need to distinguish this.
- */
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
- dev_priv->fence_regs[obj->fence_reg].pin_count--;
- }
+ return fence_update(fence, set);
}
/**
@@ -475,19 +375,31 @@ void i915_gem_restore_fences(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
+ /* Note that this may be called outside of struct_mutex, by
+ * runtime suspend/resume. The barrier we require is enforced by
+ * rpm itself - all access to fences/GTT are only within an rpm
+ * wakeref, and to acquire that wakeref you must pass through here.
+ */
+
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct i915_vma *vma = reg->vma;
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (reg->obj) {
- i915_gem_object_update_fence(reg->obj, reg,
- reg->obj->tiling_mode);
- } else {
- i915_gem_write_fence(dev, i, NULL);
+ if (vma && !i915_gem_object_is_tiled(vma->obj)) {
+ GEM_BUG_ON(!reg->dirty);
+ GEM_BUG_ON(vma->obj->fault_mappable);
+
+ list_move(&reg->link, &dev_priv->mm.fence_list);
+ vma->fence = NULL;
+ vma = NULL;
}
+
+ fence_write(reg, vma);
+ reg->vma = vma;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f38ceffd82c3..0bb4232f66bc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -32,6 +32,8 @@
#include "i915_trace.h"
#include "intel_drv.h"
+#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+
/**
* DOC: Global GTT views
*
@@ -173,11 +175,13 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{
u32 pte_flags = 0;
+ vma->pages = vma->obj->pages;
+
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
return 0;
@@ -187,7 +191,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{
vma->vm->clear_range(vma->vm,
vma->node.start,
- vma->obj->base.size,
+ vma->size,
true);
}
@@ -327,16 +331,16 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static int __setup_page_dma(struct drm_device *dev,
struct i915_page_dma *p, gfp_t flags)
{
- struct device *device = &dev->pdev->dev;
+ struct device *kdev = &dev->pdev->dev;
p->page = alloc_page(flags);
if (!p->page)
return -ENOMEM;
- p->daddr = dma_map_page(device,
+ p->daddr = dma_map_page(kdev,
p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device, p->daddr)) {
+ if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page);
return -EINVAL;
}
@@ -346,15 +350,17 @@ static int __setup_page_dma(struct drm_device *dev,
static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
- return __setup_page_dma(dev, p, GFP_KERNEL);
+ return __setup_page_dma(dev, p, I915_GFP_DMA);
}
static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
+ struct pci_dev *pdev = dev->pdev;
+
if (WARN_ON(!p->page))
return;
- dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
@@ -408,33 +414,18 @@ static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
fill_page_dma(dev, p, v);
}
-static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+static int
+setup_scratch_page(struct drm_device *dev,
+ struct i915_page_dma *scratch,
+ gfp_t gfp)
{
- struct i915_page_scratch *sp;
- int ret;
-
- sp = kzalloc(sizeof(*sp), GFP_KERNEL);
- if (sp == NULL)
- return ERR_PTR(-ENOMEM);
-
- ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
- if (ret) {
- kfree(sp);
- return ERR_PTR(ret);
- }
-
- set_pages_uc(px_page(sp), 1);
-
- return sp;
+ return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
}
-static void free_scratch_page(struct drm_device *dev,
- struct i915_page_scratch *sp)
+static void cleanup_scratch_page(struct drm_device *dev,
+ struct i915_page_dma *scratch)
{
- set_pages_wb(px_page(sp), 1);
-
- cleanup_px(dev, sp);
- kfree(sp);
+ cleanup_page_dma(dev, scratch);
}
static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -480,7 +471,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
{
gen8_pte_t scratch_pte;
- scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true);
fill_px(vm->dev, pt, scratch_pte);
@@ -491,9 +482,9 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
{
gen6_pte_t scratch_pte;
- WARN_ON(px_dma(vm->scratch_page) == 0);
+ WARN_ON(vm->scratch_page.daddr == 0);
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
fill32_px(vm->dev, pt, scratch_pte);
@@ -672,6 +663,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
@@ -681,13 +673,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
- intel_ring_emit(engine, upper_32_bits(addr));
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
- intel_ring_emit(engine, lower_32_bits(addr));
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
+ intel_ring_emit(ring, upper_32_bits(addr));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
+ intel_ring_emit(ring, lower_32_bits(addr));
+ intel_ring_advance(ring);
return 0;
}
@@ -776,7 +768,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
bool use_scratch)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, use_scratch);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -882,9 +874,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
struct drm_device *dev = vm->dev;
int ret;
- vm->scratch_page = alloc_scratch_page(dev);
- if (IS_ERR(vm->scratch_page))
- return PTR_ERR(vm->scratch_page);
+ ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ if (ret)
+ return ret;
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
@@ -918,7 +910,7 @@ free_pd:
free_pt:
free_pt(dev, vm->scratch_pt);
free_scratch_page:
- free_scratch_page(dev, vm->scratch_page);
+ cleanup_scratch_page(dev, &vm->scratch_page);
return ret;
}
@@ -962,7 +954,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
free_pdp(dev, vm->scratch_pdp);
free_pd(dev, vm->scratch_pd);
free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
+ cleanup_scratch_page(dev, &vm->scratch_page);
}
static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
@@ -1459,7 +1451,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_address_space *vm = &ppgtt->base;
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
- gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
@@ -1576,7 +1568,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pte, pde;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
@@ -1663,11 +1655,12 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
if (ret)
return ret;
@@ -1675,13 +1668,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(engine, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
- intel_ring_emit(engine, get_pd_offset(ppgtt));
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -1689,11 +1682,12 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
if (ret)
return ret;
@@ -1701,17 +1695,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(engine, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
- intel_ring_emit(engine, get_pd_offset(ppgtt));
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(ring, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(ring, get_pd_offset(ppgtt));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (engine->id != RCS) {
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
if (ret)
return ret;
}
@@ -1799,7 +1793,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % GEN6_PTES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, true, 0);
while (num_entries) {
@@ -1945,14 +1939,15 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
+ int ret;
- vm->scratch_page = alloc_scratch_page(dev);
- if (IS_ERR(vm->scratch_page))
- return PTR_ERR(vm->scratch_page);
+ ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
+ if (ret)
+ return ret;
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
- free_scratch_page(dev, vm->scratch_page);
+ cleanup_scratch_page(dev, &vm->scratch_page);
return PTR_ERR(vm->scratch_pt);
}
@@ -1966,7 +1961,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
struct drm_device *dev = vm->dev;
free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
+ cleanup_scratch_page(dev, &vm->scratch_page);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -2012,7 +2007,7 @@ alloc:
0, ggtt->base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &ggtt->base,
+ ret = i915_gem_evict_something(&ggtt->base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
0, ggtt->base.total,
@@ -2104,11 +2099,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_private *dev_priv)
{
- ppgtt->base.dev = dev;
+ ppgtt->base.dev = &dev_priv->drm;
- if (INTEL_INFO(dev)->gen < 8)
+ if (INTEL_INFO(dev_priv)->gen < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
@@ -2118,9 +2114,9 @@ static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
{
drm_mm_init(&vm->mm, vm->start, vm->total);
- vm->dev = &dev_priv->drm;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
@@ -2143,15 +2139,17 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
-static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_private *dev_priv,
+ struct drm_i915_file_private *file_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret = 0;
+ int ret;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret == 0) {
kref_init(&ppgtt->ref);
i915_address_space_init(&ppgtt->base, dev_priv);
+ ppgtt->base.file = file_priv;
}
return ret;
@@ -2183,7 +2181,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
}
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *dev_priv,
+ struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
@@ -2192,14 +2191,12 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(dev, ppgtt);
+ ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
- ppgtt->file_priv = fpriv;
-
trace_i915_ppgtt_create(&ppgtt->base);
return ppgtt;
@@ -2212,9 +2209,10 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base);
- /* vmas should already be unbound */
+ /* vmas should already be unbound and destroyed */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+ WARN_ON(!list_empty(&ppgtt->base.unbound_list));
list_del(&ppgtt->base.global_link);
drm_mm_takedown(&ppgtt->base.mm);
@@ -2223,47 +2221,21 @@ void i915_ppgtt_release(struct kref *kref)
kfree(ppgtt);
}
-extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
-static bool needs_idle_maps(struct drm_device *dev)
+static bool needs_idle_maps(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+ if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
-static bool do_idling(struct drm_i915_private *dev_priv)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool ret = dev_priv->mm.interruptible;
-
- if (unlikely(ggtt->do_idle_maps)) {
- dev_priv->mm.interruptible = false;
- if (i915_gem_wait_for_idle(dev_priv)) {
- DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
- /* Wait a bit, in hopes it avoids the hang */
- udelay(10);
- }
- }
-
- return ret;
-}
-
-static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (unlikely(ggtt->do_idle_maps))
- dev_priv->mm.interruptible = interruptible;
-}
-
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -2332,12 +2304,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
-#ifdef writeq
writeq(pte, addr);
-#else
- iowrite32((u32)pte, addr);
- iowrite32(pte >> 32, addr + 4);
-#endif
}
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
@@ -2530,7 +2497,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
use_scratch);
for (i = 0; i < num_entries; i++)
@@ -2562,7 +2529,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
@@ -2641,8 +2608,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
- vma->node.start,
+ vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
cache_level, pte_flags);
/*
@@ -2650,7 +2616,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+ vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0;
}
@@ -2672,19 +2638,17 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
- if (flags & GLOBAL_BIND) {
+ if (flags & I915_VMA_GLOBAL_BIND) {
vma->vm->insert_entries(vma->vm,
- vma->ggtt_view.pages,
- vma->node.start,
+ vma->pages, vma->node.start,
cache_level, pte_flags);
}
- if (flags & LOCAL_BIND) {
+ if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt =
to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
- vma->ggtt_view.pages,
- vma->node.start,
+ vma->pages, vma->node.start,
cache_level, pte_flags);
}
@@ -2693,42 +2657,36 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct drm_device *dev = vma->vm->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = vma->obj;
- const uint64_t size = min_t(uint64_t,
- obj->base.size,
- vma->node.size);
+ struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ const u64 size = min(vma->size, vma->node.size);
- if (vma->bound & GLOBAL_BIND) {
+ if (vma->flags & I915_VMA_GLOBAL_BIND)
vma->vm->clear_range(vma->vm,
- vma->node.start,
- size,
+ vma->node.start, size,
true);
- }
-
- if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+ if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base,
- vma->node.start,
- size,
+ vma->node.start, size,
true);
- }
}
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- bool interruptible;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct device *kdev = &dev_priv->drm.pdev->dev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- interruptible = do_idling(dev_priv);
+ if (unlikely(ggtt->do_idle_maps)) {
+ if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+ DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+ /* Wait a bit, in hopes it avoids the hang */
+ udelay(10);
+ }
+ }
- dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+ dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
-
- undo_idling(dev_priv, interruptible);
}
static void i915_gtt_color_adjust(struct drm_mm_node *node,
@@ -2739,19 +2697,14 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
if (node->color != color)
*start += 4096;
- if (!list_empty(&node->node_list)) {
- node = list_entry(node->node_list.next,
- struct drm_mm_node,
- node_list);
- if (node->allocated && node->color != color)
- *end -= 4096;
- }
+ node = list_first_entry_or_null(&node->node_list,
+ struct drm_mm_node,
+ node_list);
+ if (node && node->allocated && node->color != color)
+ *end -= 4096;
}
-static int i915_gem_setup_global_gtt(struct drm_device *dev,
- u64 start,
- u64 mappable_end,
- u64 end)
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
{
/* Let GEM Manage all of the aperture.
*
@@ -2762,48 +2715,15 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_mm_node *entry;
- struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
+ struct drm_mm_node *entry;
int ret;
- BUG_ON(mappable_end > end);
-
- ggtt->base.start = start;
-
- /* Subtract the guard page before address space initialization to
- * shrink the range used by drm_mm */
- ggtt->base.total = end - start - PAGE_SIZE;
- i915_address_space_init(&ggtt->base, dev_priv);
- ggtt->base.total += PAGE_SIZE;
-
ret = intel_vgt_balloon(dev_priv);
if (ret)
return ret;
- if (!HAS_LLC(dev))
- ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
-
- /* Mark any preallocated objects as occupied */
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
-
- DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
- i915_gem_obj_ggtt_offset(obj), obj->base.size);
-
- WARN_ON(i915_gem_obj_ggtt_bound(obj));
- ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
- if (ret) {
- DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
- return ret;
- }
- vma->bound |= GLOBAL_BIND;
- __i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
- }
-
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2813,18 +2733,19 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
/* And finally clear the reserved guard page */
- ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
+ ggtt->base.clear_range(&ggtt->base,
+ ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
+ true);
- if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt);
+ ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret) {
- ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
return ret;
}
@@ -2852,34 +2773,20 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
/**
- * i915_gem_init_ggtt - Initialize GEM for Global GTT
- * @dev: DRM device
- */
-void i915_gem_init_ggtt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
-}
-
-/**
* i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev: DRM device
+ * @dev_priv: i915 device
*/
-void i915_ggtt_cleanup_hw(struct drm_device *dev)
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
}
- i915_gem_cleanup_stolen(dev);
+ i915_gem_cleanup_stolen(&dev_priv->drm);
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
@@ -2889,6 +2796,9 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
}
ggtt->base.cleanup(&ggtt->base);
+
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_fini(&ggtt->mappable);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2969,17 +2879,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}
-static int ggtt_probe_common(struct drm_device *dev,
- size_t gtt_size)
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_page_scratch *scratch_page;
- phys_addr_t ggtt_phys_addr;
+ struct pci_dev *pdev = ggtt->base.dev->pdev;
+ phys_addr_t phys_addr;
+ int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
- ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
- (pci_resource_len(dev->pdev, 0) / 2);
+ phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2988,25 +2895,25 @@ static int ggtt_probe_common(struct drm_device *dev,
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(dev))
- ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
+ if (IS_BROXTON(ggtt->base.dev))
+ ggtt->gsm = ioremap_nocache(phys_addr, size);
else
- ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+ ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
- DRM_ERROR("Failed to map the gtt page table\n");
+ DRM_ERROR("Failed to map the ggtt page table\n");
return -ENOMEM;
}
- scratch_page = alloc_scratch_page(dev);
- if (IS_ERR(scratch_page)) {
+ ret = setup_scratch_page(ggtt->base.dev,
+ &ggtt->base.scratch_page,
+ GFP_DMA32);
+ if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
- return PTR_ERR(scratch_page);
+ return ret;
}
- ggtt->base.scratch_page = scratch_page;
-
return 0;
}
@@ -3083,42 +2990,49 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
}
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ cleanup_scratch_page(vm->dev, &vm->scratch_page);
+}
+
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int size;
u16 snb_gmch_ctl;
- int ret;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
- ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(pdev, 2);
+ ggtt->mappable_end = pci_resource_len(pdev, 2);
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
- ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
- } else if (IS_CHERRYVIEW(dev)) {
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
- ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
- ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
- ret = ggtt_probe_common(dev, ggtt->size);
-
+ ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.insert_page = gen8_ggtt_insert_page;
@@ -3130,57 +3044,65 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
- return ret;
+ return ggtt_probe_common(ggtt, size);
}
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int size;
u16 snb_gmch_ctl;
- int ret;
- ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
- ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(pdev, 2);
+ ggtt->mappable_end = pci_resource_len(pdev, 2);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
- if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
return -ENXIO;
}
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
- pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
- ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- ret = ggtt_probe_common(dev, ggtt->size);
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
ggtt->base.clear_range = gen6_ggtt_clear_range;
ggtt->base.insert_page = gen6_ggtt_insert_page;
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.cleanup = gen6_gmch_remove;
+
+ if (HAS_EDRAM(dev_priv))
+ ggtt->base.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(dev_priv))
+ ggtt->base.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(dev_priv))
+ ggtt->base.pte_encode = byt_pte_encode;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ ggtt->base.pte_encode = ivb_pte_encode;
+ else
+ ggtt->base.pte_encode = snb_pte_encode;
- return ret;
+ return ggtt_probe_common(ggtt, size);
}
-static void gen6_gmch_remove(struct i915_address_space *vm)
+static void i915_gmch_remove(struct i915_address_space *vm)
{
- struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
-
- iounmap(ggtt->gsm);
- free_scratch_page(vm->dev, vm->scratch_page);
+ intel_gmch_remove();
}
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_device *dev = ggtt->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3192,12 +3114,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
&ggtt->mappable_base, &ggtt->mappable_end);
- ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+ ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.cleanup = i915_gmch_remove;
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3205,65 +3128,40 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return 0;
}
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev: DRM device
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @dev_priv: i915 device
*/
-int i915_ggtt_init_hw(struct drm_device *dev)
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- if (INTEL_INFO(dev)->gen <= 5) {
- ggtt->probe = i915_gmch_probe;
- ggtt->base.cleanup = i915_gmch_remove;
- } else if (INTEL_INFO(dev)->gen < 8) {
- ggtt->probe = gen6_gmch_probe;
- ggtt->base.cleanup = gen6_gmch_remove;
-
- if (HAS_EDRAM(dev))
- ggtt->base.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(dev))
- ggtt->base.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(dev))
- ggtt->base.pte_encode = byt_pte_encode;
- else if (INTEL_INFO(dev)->gen >= 7)
- ggtt->base.pte_encode = ivb_pte_encode;
- else
- ggtt->base.pte_encode = snb_pte_encode;
- } else {
- ggtt->probe = gen8_gmch_probe;
- ggtt->base.cleanup = gen6_gmch_remove;
- }
-
- ggtt->base.dev = dev;
- ggtt->base.is_ggtt = true;
+ ggtt->base.dev = &dev_priv->drm;
- ret = ggtt->probe(ggtt);
+ if (INTEL_GEN(dev_priv) <= 5)
+ ret = i915_gmch_probe(ggtt);
+ else if (INTEL_GEN(dev_priv) < 8)
+ ret = gen6_gmch_probe(ggtt);
+ else
+ ret = gen8_gmch_probe(ggtt);
if (ret)
return ret;
if ((ggtt->base.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
- "of address space! Found %lldM!\n",
+ " of address space! Found %lldM!\n",
ggtt->base.total >> 20);
ggtt->base.total = 1ULL << 32;
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
}
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev);
- if (ret)
- goto out_gtt_cleanup;
+ if (ggtt->mappable_end > ggtt->base.total) {
+ DRM_ERROR("mappable aperture extends past end of GGTT,"
+ " aperture=%llx, total=%llx\n",
+ ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = ggtt->base.total;
+ }
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
@@ -3276,16 +3174,55 @@ int i915_ggtt_init_hw(struct drm_device *dev)
#endif
return 0;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ int ret;
+
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+
+ /* Subtract the guard page before address space initialization to
+ * shrink the range used by drm_mm.
+ */
+ ggtt->base.total -= PAGE_SIZE;
+ i915_address_space_init(&ggtt->base, dev_priv);
+ ggtt->base.total += PAGE_SIZE;
+ if (!HAS_LLC(dev_priv))
+ ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+
+ if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
+ dev_priv->ggtt.mappable_base,
+ dev_priv->ggtt.mappable_end)) {
+ ret = -EIO;
+ goto out_gtt_cleanup;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(&dev_priv->drm);
+ if (ret)
+ goto out_gtt_cleanup;
+
+ return 0;
out_gtt_cleanup:
ggtt->base.cleanup(&ggtt->base);
-
return ret;
}
-int i915_ggtt_enable_hw(struct drm_device *dev)
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
return -EIO;
return 0;
@@ -3295,8 +3232,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
+ struct drm_i915_gem_object *obj, *on;
i915_check_and_clear_faults(dev_priv);
@@ -3304,20 +3240,32 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
- /* Cache flush objects bound into GGTT and rebind them. */
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+
+ /* clflush objects bound into the GGTT and rebind them. */
+ list_for_each_entry_safe(obj, on,
+ &dev_priv->mm.bound_list, global_list) {
+ bool ggtt_bound = false;
+ struct i915_vma *vma;
+
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base)
continue;
+ if (!i915_vma_unbind(vma))
+ continue;
+
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
+ ggtt_bound = true;
}
- if (obj->pin_display)
+ if (ggtt_bound)
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
+ ggtt->base.closed = false;
+
if (INTEL_INFO(dev)->gen >= 8) {
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
@@ -3335,7 +3283,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt;
- if (vm->is_ggtt)
+ if (i915_is_ggtt(vm))
ppgtt = dev_priv->mm.aliasing_ppgtt;
else
ppgtt = i915_vm_to_ppgtt(vm);
@@ -3348,65 +3296,155 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
+static void
+i915_vma_retire(struct i915_gem_active *active,
+ struct drm_i915_gem_request *rq)
+{
+ const unsigned int idx = rq->engine->id;
+ struct i915_vma *vma =
+ container_of(active, struct i915_vma, last_read[idx]);
+
+ GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+ i915_vma_clear_active(vma, idx);
+ if (i915_vma_is_active(vma))
+ return;
+
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->node.allocated);
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ GEM_BUG_ON(!i915_vma_is_closed(vma));
+ GEM_BUG_ON(vma->fence);
+
+ list_del(&vma->vm_link);
+ if (!i915_vma_is_ggtt(vma))
+ i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+ GEM_BUG_ON(i915_vma_is_closed(vma));
+ vma->flags |= I915_VMA_CLOSED;
+
+ list_del_init(&vma->obj_link);
+ if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
+ WARN_ON(i915_vma_unbind(vma));
+}
+
static struct i915_vma *
-__i915_gem_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *ggtt_view)
+__i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
+ int i;
- if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
- return ERR_PTR(-EINVAL);
+ GEM_BUG_ON(vm->closed);
vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->vm_link);
- INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->exec_list);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ init_request_active(&vma->last_read[i], i915_vma_retire);
+ init_request_active(&vma->last_fence, NULL);
+ list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
vma->obj = obj;
- vma->is_ggtt = i915_is_ggtt(vm);
+ vma->size = obj->base.size;
+
+ if (view) {
+ vma->ggtt_view = *view;
+ if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ vma->size = view->params.partial.size;
+ vma->size <<= PAGE_SHIFT;
+ } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ vma->size =
+ intel_rotation_info_size(&view->params.rotated);
+ vma->size <<= PAGE_SHIFT;
+ }
+ }
- if (i915_is_ggtt(vm))
- vma->ggtt_view = *ggtt_view;
- else
+ if (i915_is_ggtt(vm)) {
+ vma->flags |= I915_VMA_GGTT;
+ } else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+ }
list_add_tail(&vma->obj_link, &obj->vma_list);
-
return vma;
}
+static inline bool vma_matches(struct i915_vma *vma,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ if (vma->vm != vm)
+ return false;
+
+ if (!i915_vma_is_ggtt(vma))
+ return true;
+
+ if (!view)
+ return vma->ggtt_view.type == 0;
+
+ if (vma->ggtt_view.type != view->type)
+ return false;
+
+ return memcmp(&vma->ggtt_view.params,
+ &view->params,
+ sizeof(view->params)) == 0;
+}
+
struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm)
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+
+ return __i915_vma_create(obj, vm, view);
+}
+
+struct i915_vma *
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- vma = i915_gem_obj_to_vma(obj, vm);
- if (!vma)
- vma = __i915_gem_vma_create(obj, vm,
- i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
+ list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
+ if (vma_matches(vma, vm, view))
+ return vma;
- return vma;
+ return NULL;
}
struct i915_vma *
-i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(view && !i915_is_ggtt(vm));
+ vma = i915_gem_obj_to_vma(obj, vm, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, &ggtt->base, view);
+ vma = __i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(i915_vma_is_closed(vma));
return vma;
-
}
static struct scatterlist *
@@ -3438,18 +3476,16 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
}
static struct sg_table *
-intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
+intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
const size_t n_pages = obj->base.size / PAGE_SIZE;
- unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
- unsigned int size_pages_uv;
+ unsigned int size = intel_rotation_info_size(rot_info);
struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
- unsigned int uv_start_page;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -3460,18 +3496,12 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
if (!page_addr_list)
return ERR_PTR(ret);
- /* Account for UV plane with NV12. */
- if (rot_info->pixel_format == DRM_FORMAT_NV12)
- size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
- else
- size_pages_uv = 0;
-
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto err_st_alloc;
- ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
+ ret = sg_alloc_table(st, size, GFP_KERNEL);
if (ret)
goto err_sg_alloc;
@@ -3484,32 +3514,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
st->nents = 0;
sg = st->sgl;
- /* Rotate the pages. */
- sg = rotate_pages(page_addr_list, 0,
- rot_info->plane[0].width, rot_info->plane[0].height,
- rot_info->plane[0].width,
- st, sg);
-
- /* Append the UV plane if NV12. */
- if (rot_info->pixel_format == DRM_FORMAT_NV12) {
- uv_start_page = size_pages;
-
- /* Check for tile-row un-alignment. */
- if (offset_in_page(rot_info->uv_offset))
- uv_start_page--;
-
- rot_info->uv_start_page = uv_start_page;
-
- sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
- rot_info->plane[1].width, rot_info->plane[1].height,
- rot_info->plane[1].width,
- st, sg);
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
+ sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+ rot_info->plane[i].width, rot_info->plane[i].height,
+ rot_info->plane[i].stride, st, sg);
}
- DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
- obj->base.size, rot_info->plane[0].width,
- rot_info->plane[0].height, size_pages + size_pages_uv,
- size_pages);
+ DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
drm_free_large(page_addr_list);
@@ -3520,10 +3532,9 @@ err_sg_alloc:
err_st_alloc:
drm_free_large(page_addr_list);
- DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
- obj->base.size, ret, rot_info->plane[0].width,
- rot_info->plane[0].height, size_pages + size_pages_uv,
- size_pages);
+ DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+
return ERR_PTR(ret);
}
@@ -3573,28 +3584,27 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
- if (vma->ggtt_view.pages)
+ if (vma->pages)
return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->ggtt_view.pages = vma->obj->pages;
+ vma->pages = vma->obj->pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
- vma->ggtt_view.pages =
+ vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
- vma->ggtt_view.pages =
- intel_partial_pages(&vma->ggtt_view, vma->obj);
+ vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
- if (!vma->ggtt_view.pages) {
+ if (!vma->pages) {
DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
vma->ggtt_view.type);
ret = -EINVAL;
- } else if (IS_ERR(vma->ggtt_view.pages)) {
- ret = PTR_ERR(vma->ggtt_view.pages);
- vma->ggtt_view.pages = NULL;
+ } else if (IS_ERR(vma->pages)) {
+ ret = PTR_ERR(vma->pages);
+ vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
vma->ggtt_view.type, ret);
}
@@ -3615,34 +3625,32 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- int ret;
u32 bind_flags;
+ u32 vma_flags;
+ int ret;
if (WARN_ON(flags == 0))
return -EINVAL;
bind_flags = 0;
if (flags & PIN_GLOBAL)
- bind_flags |= GLOBAL_BIND;
+ bind_flags |= I915_VMA_GLOBAL_BIND;
if (flags & PIN_USER)
- bind_flags |= LOCAL_BIND;
+ bind_flags |= I915_VMA_LOCAL_BIND;
+ vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
if (flags & PIN_UPDATE)
- bind_flags |= vma->bound;
+ bind_flags |= vma_flags;
else
- bind_flags &= ~vma->bound;
-
+ bind_flags &= ~vma_flags;
if (bind_flags == 0)
return 0;
- if (vma->bound == 0 && vma->vm->allocate_va_range) {
- /* XXX: i915_vma_pin() will fix this +- hack */
- vma->pin_count++;
+ if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
vma->node.start,
vma->node.size);
- vma->pin_count--;
if (ret)
return ret;
}
@@ -3651,56 +3659,47 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (ret)
return ret;
- vma->bound |= bind_flags;
-
+ vma->flags |= bind_flags;
return 0;
}
-/**
- * i915_ggtt_view_size - Get the size of a GGTT view.
- * @obj: Object the view is of.
- * @view: The view in question.
- *
- * @return The size of the GGTT view in bytes.
- */
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- if (view->type == I915_GGTT_VIEW_NORMAL) {
- return obj->base.size;
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
- return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
- return view->params.partial.size << PAGE_SHIFT;
- } else {
- WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
- return obj->base.size;
- }
-}
-
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
+ /* Access through the GTT requires the device to be awake. */
+ assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+
lockdep_assert_held(&vma->vm->dev->struct_mutex);
- if (WARN_ON(!vma->obj->map_and_fenceable))
- return ERR_PTR(-ENODEV);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
+ return IO_ERR_PTR(-ENODEV);
- GEM_BUG_ON(!vma->is_ggtt);
- GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
ptr = vma->iomap;
if (ptr == NULL) {
- ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start,
vma->node.size);
if (ptr == NULL)
- return ERR_PTR(-ENOMEM);
+ return IO_ERR_PTR(-ENOMEM);
vma->iomap = ptr;
}
- vma->pin_count++;
+ __i915_vma_pin(vma);
return ptr;
}
+
+void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(p_vma);
+ if (!vma)
+ return;
+
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index aa5f31d1c2ed..ec78be2f8c77 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,7 +36,15 @@
#include <linux/io-mapping.h>
+#include "i915_gem_request.h"
+
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
struct drm_i915_file_private;
+struct drm_i915_fence_reg;
typedef uint32_t gen6_pte_t;
typedef uint64_t gen8_pte_t;
@@ -137,12 +145,9 @@ enum i915_ggtt_view_type {
};
struct intel_rotation_info {
- unsigned int uv_offset;
- uint32_t pixel_format;
- unsigned int uv_start_page;
struct {
/* tiles */
- unsigned int width, height;
+ unsigned int width, height, stride, offset;
} plane[2];
};
@@ -156,8 +161,6 @@ struct i915_ggtt_view {
} partial;
struct intel_rotation_info rotated;
} params;
-
- struct sg_table *pages;
};
extern const struct i915_ggtt_view i915_ggtt_view_normal;
@@ -177,13 +180,38 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ struct drm_i915_fence_reg *fence;
+ struct sg_table *pages;
void __iomem *iomap;
+ u64 size;
+ u64 display_alignment;
+
+ unsigned int flags;
+ /**
+ * How many users have pinned this object in GTT space. The following
+ * users can each hold at most one reference: pwrite/pread, execbuffer
+ * (objects are not allowed multiple times for the same batchbuffer),
+ * and the framebuffer code. When switching/pageflipping, the
+ * framebuffer code has at most two buffers pinned per crtc.
+ *
+ * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+ * bits with absolutely no headroom. So use 4 bits.
+ */
+#define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW BIT(5)
/** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND (1<<0)
-#define LOCAL_BIND (1<<1)
- unsigned int bound : 4;
- bool is_ggtt : 1;
+#define I915_VMA_GLOBAL_BIND BIT(6)
+#define I915_VMA_LOCAL_BIND BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+
+#define I915_VMA_GGTT BIT(8)
+#define I915_VMA_CAN_FENCE BIT(9)
+#define I915_VMA_CLOSED BIT(10)
+
+ unsigned int active;
+ struct i915_gem_active last_read[I915_NUM_ENGINES];
+ struct i915_gem_active last_fence;
/**
* Support different GGTT views into the same object.
@@ -208,20 +236,66 @@ struct i915_vma {
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
-
- /**
- * How many users have pinned this object in GTT space. The following
- * users can each hold at most one reference: pwrite/pread, execbuffer
- * (objects are not allowed multiple times for the same batchbuffer),
- * and the framebuffer code. When switching/pageflipping, the
- * framebuffer code has at most two buffers pinned per crtc.
- *
- * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
- * bits with absolutely no headroom. So use 4 bits. */
- unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
};
+struct i915_vma *
+i915_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
+void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CAN_FENCE;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_CLOSED;
+}
+
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+ return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+ return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+ unsigned int engine)
+{
+ vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+ unsigned int engine)
+{
+ return vma->active & BIT(engine);
+}
+
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(upper_32_bits(vma->node.start));
+ GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
+ return lower_32_bits(vma->node.start);
+}
+
struct i915_page_dma {
struct page *page;
union {
@@ -238,10 +312,6 @@ struct i915_page_dma {
#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)
-struct i915_page_scratch {
- struct i915_page_dma base;
-};
-
struct i915_page_table {
struct i915_page_dma base;
@@ -272,13 +342,22 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
+ /* Every address space belongs to a struct file - except for the global
+ * GTT that is owned by the driver (and so @file is set to NULL). In
+ * principle, no information should leak from one context to another
+ * (or between files/processes etc) unless explicitly shared by the
+ * owner. Tracking the owner is important in order to free up per-file
+ * objects along with the file, to aide resource tracking, and to
+ * assign blame.
+ */
+ struct drm_i915_file_private *file;
struct list_head global_link;
u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
- bool is_ggtt;
+ bool closed;
- struct i915_page_scratch *scratch_page;
+ struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
@@ -306,6 +385,13 @@ struct i915_address_space {
*/
struct list_head inactive_list;
+ /**
+ * List of vma that have been unbound.
+ *
+ * A reference is not held on the buffer while on this list.
+ */
+ struct list_head unbound_list;
+
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
@@ -338,7 +424,7 @@ struct i915_address_space {
u32 flags);
};
-#define i915_is_ggtt(V) ((V)->is_ggtt)
+#define i915_is_ggtt(V) (!(V)->file)
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
@@ -349,14 +435,13 @@ struct i915_address_space {
*/
struct i915_ggtt {
struct i915_address_space base;
+ struct io_mapping mappable; /* Mapping to our CPU mappable region */
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
- size_t size; /* Total size of Global GTT */
u64 mappable_end; /* End offset that we can CPU map */
- struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */
@@ -365,8 +450,6 @@ struct i915_ggtt {
bool do_idle_maps;
int mtrr;
-
- int (*probe)(struct i915_ggtt *ggtt);
};
struct i915_hw_ppgtt {
@@ -380,8 +463,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory pd; /* GEN6-7 */
};
- struct drm_i915_file_private *file_priv;
-
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
@@ -521,14 +602,15 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
-int i915_ggtt_init_hw(struct drm_device *dev);
-int i915_ggtt_enable_hw(struct drm_device *dev);
-void i915_gem_init_ggtt(struct drm_device *dev);
-void i915_ggtt_cleanup_hw(struct drm_device *dev);
+int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
+void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
@@ -548,23 +630,67 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-static inline bool
-i915_ggtt_view_equal(const struct i915_ggtt_view *a,
- const struct i915_ggtt_view *b)
+/* Flags used by pin/bind&friends. */
+#define PIN_NONBLOCK BIT(0)
+#define PIN_MAPPABLE BIT(1)
+#define PIN_ZONE_4G BIT(2)
+#define PIN_NONFAULT BIT(3)
+
+#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT(8)
+
+#define PIN_HIGH BIT(9)
+#define PIN_OFFSET_BIAS BIT(10)
+#define PIN_OFFSET_FIXED BIT(11)
+#define PIN_OFFSET_MASK (~4095)
+
+int __i915_vma_do_pin(struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- if (WARN_ON(!a || !b))
- return false;
-
- if (a->type != b->type)
- return false;
- if (a->type != I915_GGTT_VIEW_NORMAL)
- return !memcmp(&a->params, &b->params, sizeof(a->params));
- return true;
+ BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ /* Pin early to prevent the shrinker/eviction logic from destroying
+ * our vma as we insert and bind.
+ */
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ return 0;
+
+ return __i915_vma_do_pin(vma, size, alignment, flags);
}
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view);
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_PIN_MASK;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+ return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+ vma->flags++;
+ GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ vma->flags--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ __i915_vma_unpin(vma);
+}
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
@@ -580,6 +706,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
@@ -593,9 +720,14 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
- GEM_BUG_ON(vma->pin_count == 0);
GEM_BUG_ON(vma->iomap == NULL);
- vma->pin_count--;
+ i915_vma_unpin(vma);
+}
+
+static inline struct page *i915_vma_first_page(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ return sg_page(vma->pages->sgl);
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index f75bbd67a13a..95b7e9afd5f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,10 +28,17 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
+struct render_state {
+ const struct intel_renderstate_rodata *rodata;
+ struct i915_vma *vma;
+ u32 aux_batch_size;
+ u32 aux_batch_offset;
+};
+
static const struct intel_renderstate_rodata *
-render_state_get_rodata(const int gen)
+render_state_get_rodata(const struct drm_i915_gem_request *req)
{
- switch (gen) {
+ switch (INTEL_GEN(req->i915)) {
case 6:
return &gen6_null_state;
case 7:
@@ -45,35 +52,6 @@ render_state_get_rodata(const int gen)
return NULL;
}
-static int render_state_init(struct render_state *so,
- struct drm_i915_private *dev_priv)
-{
- int ret;
-
- so->gen = INTEL_GEN(dev_priv);
- so->rodata = render_state_get_rodata(so->gen);
- if (so->rodata == NULL)
- return 0;
-
- if (so->rodata->batch_items * 4 > 4096)
- return -EINVAL;
-
- so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
- if (IS_ERR(so->obj))
- return PTR_ERR(so->obj);
-
- ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
- if (ret)
- goto free_gem;
-
- so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
- return 0;
-
-free_gem:
- drm_gem_object_unreference(&so->obj->base);
- return ret;
-}
-
/*
* Macro to add commands to auxiliary batch.
* This macro only checks for page overflow before inserting the commands,
@@ -94,27 +72,28 @@ free_gem:
static int render_state_setup(struct render_state *so)
{
- struct drm_device *dev = so->obj->base.dev;
+ struct drm_device *dev = so->vma->vm->dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
+ const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
+ ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
if (ret)
return ret;
- page = i915_gem_object_get_dirty_page(so->obj, 0);
+ page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
d = kmap(page);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
if (i * 4 == rodata->reloc[reloc_index]) {
- u64 r = s + so->ggtt_offset;
+ u64 r = s + so->vma->node.start;
s = lower_32_bits(r);
- if (so->gen >= 8) {
+ if (has_64bit_reloc) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0) {
ret = -EINVAL;
@@ -174,7 +153,7 @@ static int render_state_setup(struct render_state *so)
kunmap(page);
- ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
if (ret)
return ret;
@@ -192,67 +171,60 @@ err_out:
#undef OUT_BATCH
-void i915_gem_render_state_fini(struct render_state *so)
-{
- i915_gem_object_ggtt_unpin(so->obj);
- drm_gem_object_unreference(&so->obj->base);
-}
-
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
- struct render_state *so)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
+ struct render_state so;
+ struct drm_i915_gem_object *obj;
int ret;
- if (WARN_ON(engine->id != RCS))
+ if (WARN_ON(req->engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, engine->i915);
- if (ret)
- return ret;
-
- if (so->rodata == NULL)
+ so.rodata = render_state_get_rodata(req);
+ if (!so.rodata)
return 0;
- ret = render_state_setup(so);
- if (ret) {
- i915_gem_render_state_fini(so);
- return ret;
- }
+ if (so.rodata->batch_items * 4 > 4096)
+ return -EINVAL;
- return 0;
-}
+ obj = i915_gem_object_create(&req->i915->drm, 4096);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
-{
- struct render_state so;
- int ret;
+ so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
+ if (IS_ERR(so.vma)) {
+ ret = PTR_ERR(so.vma);
+ goto err_obj;
+ }
- ret = i915_gem_render_state_prepare(req->engine, &so);
+ ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
if (ret)
- return ret;
+ goto err_obj;
- if (so.rodata == NULL)
- return 0;
+ ret = render_state_setup(&so);
+ if (ret)
+ goto err_unpin;
- ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
- so.rodata->batch_items * 4,
- I915_DISPATCH_SECURE);
+ ret = req->engine->emit_bb_start(req, so.vma->node.start,
+ so.rodata->batch_items * 4,
+ I915_DISPATCH_SECURE);
if (ret)
- goto out;
+ goto err_unpin;
if (so.aux_batch_size > 8) {
- ret = req->engine->dispatch_execbuffer(req,
- (so.ggtt_offset +
- so.aux_batch_offset),
- so.aux_batch_size,
- I915_DISPATCH_SECURE);
+ ret = req->engine->emit_bb_start(req,
+ (so.vma->node.start +
+ so.aux_batch_offset),
+ so.aux_batch_size,
+ I915_DISPATCH_SECURE);
if (ret)
- goto out;
+ goto err_unpin;
}
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
- i915_gem_render_state_fini(&so);
+ i915_vma_move_to_active(so.vma, req, 0);
+err_unpin:
+ i915_vma_unpin(so.vma);
+err_obj:
+ i915_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 6aaa3a10a630..18cce3f06e9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -24,26 +24,8 @@
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
-#include <linux/types.h>
-
-struct intel_renderstate_rodata {
- const u32 *reloc;
- const u32 *batch;
- const u32 batch_items;
-};
-
-struct render_state {
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- u64 ggtt_offset;
- int gen;
- u32 aux_batch_size;
- u32 aux_batch_offset;
-};
+struct drm_i915_gem_request;
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
- struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
new file mode 100644
index 000000000000..8832f8ec1583
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -0,0 +1,947 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prefetch.h>
+
+#include "i915_drv.h"
+
+static const char *i915_fence_get_driver_name(struct fence *fence)
+{
+ return "i915";
+}
+
+static const char *i915_fence_get_timeline_name(struct fence *fence)
+{
+ /* Timelines are bound by eviction to a VM. However, since
+ * we only have a global seqno at the moment, we only have
+ * a single timeline. Note that each timeline will have
+ * multiple execution contexts (fence contexts) as we allow
+ * engines within a single timeline to execute in parallel.
+ */
+ return "global";
+}
+
+static bool i915_fence_signaled(struct fence *fence)
+{
+ return i915_gem_request_completed(to_request(fence));
+}
+
+static bool i915_fence_enable_signaling(struct fence *fence)
+{
+ if (i915_fence_signaled(fence))
+ return false;
+
+ intel_engine_enable_signaling(to_request(fence));
+ return true;
+}
+
+static signed long i915_fence_wait(struct fence *fence,
+ bool interruptible,
+ signed long timeout_jiffies)
+{
+ s64 timeout_ns, *timeout;
+ int ret;
+
+ if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+ timeout_ns = jiffies_to_nsecs(timeout_jiffies);
+ timeout = &timeout_ns;
+ } else {
+ timeout = NULL;
+ }
+
+ ret = i915_wait_request(to_request(fence),
+ interruptible, timeout,
+ NO_WAITBOOST);
+ if (ret == -ETIME)
+ return 0;
+
+ if (ret < 0)
+ return ret;
+
+ if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
+ timeout_jiffies = nsecs_to_jiffies(timeout_ns);
+
+ return timeout_jiffies;
+}
+
+static void i915_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+ int size)
+{
+ snprintf(str, size, "%u",
+ intel_engine_get_seqno(to_request(fence)->engine));
+}
+
+static void i915_fence_release(struct fence *fence)
+{
+ struct drm_i915_gem_request *req = to_request(fence);
+
+ kmem_cache_free(req->i915->requests, req);
+}
+
+const struct fence_ops i915_fence_ops = {
+ .get_driver_name = i915_fence_get_driver_name,
+ .get_timeline_name = i915_fence_get_timeline_name,
+ .enable_signaling = i915_fence_enable_signaling,
+ .signaled = i915_fence_signaled,
+ .wait = i915_fence_wait,
+ .release = i915_fence_release,
+ .fence_value_str = i915_fence_value_str,
+ .timeline_value_str = i915_fence_timeline_value_str,
+};
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_private;
+ struct drm_i915_file_private *file_priv;
+
+ WARN_ON(!req || !file || req->file_priv);
+
+ if (!req || !file)
+ return -EINVAL;
+
+ if (req->file_priv)
+ return -EINVAL;
+
+ dev_private = req->i915;
+ file_priv = file->driver_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ req->file_priv = file_priv;
+ list_add_tail(&req->client_list, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
+
+ return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
+
+void i915_gem_retire_noop(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ /* Space left intentionally blank */
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+ struct i915_gem_active *active, *next;
+
+ trace_i915_gem_request_retire(request);
+ list_del(&request->link);
+
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ list_del(&request->ring_link);
+ request->ring->last_retired_head = request->postfix;
+
+ /* Walk through the active list, calling retire on each. This allows
+ * objects to track their GPU activity and mark themselves as idle
+ * when their *last* active request is completed (updating state
+ * tracking lists for eviction, active references for GEM, etc).
+ *
+ * As the ->retire() may free the node, we decouple it first and
+ * pass along the auxiliary information (to avoid dereferencing
+ * the node after the callback).
+ */
+ list_for_each_entry_safe(active, next, &request->active_list, link) {
+ /* In microbenchmarks or focusing upon time inside the kernel,
+ * we may spend an inordinate amount of time simply handling
+ * the retirement of requests and processing their callbacks.
+ * Of which, this loop itself is particularly hot due to the
+ * cache misses when jumping around the list of i915_gem_active.
+ * So we try to keep this loop as streamlined as possible and
+ * also prefetch the next i915_gem_active to try and hide
+ * the likely cache miss.
+ */
+ prefetchw(next);
+
+ INIT_LIST_HEAD(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+ active->retire(active, request);
+ }
+
+ i915_gem_request_remove_from_client(request);
+
+ if (request->previous_context) {
+ if (i915.enable_execlists)
+ intel_lr_context_unpin(request->previous_context,
+ request->engine);
+ }
+
+ i915_gem_context_put(request->ctx);
+ i915_gem_request_put(request);
+}
+
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_gem_request *tmp;
+
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
+ GEM_BUG_ON(list_empty(&req->link));
+
+ do {
+ tmp = list_first_entry(&engine->request_list,
+ typeof(*tmp), link);
+
+ i915_gem_request_retire(tmp);
+ } while (tmp != req);
+}
+
+static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+{
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+
+ if (i915_terminally_wedged(error))
+ return -EIO;
+
+ if (i915_reset_in_progress(error)) {
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these.
+ */
+ if (!dev_priv->mm.interruptible)
+ return -EIO;
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+{
+ struct intel_engine_cs *engine;
+ int ret;
+
+ /* Carefully retire all requests without writing to the rings */
+ for_each_engine(engine, dev_priv) {
+ ret = intel_engine_idle(engine,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+ }
+ i915_gem_retire_requests(dev_priv);
+
+ /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+ if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+ while (intel_kick_waiters(dev_priv) ||
+ intel_kick_signalers(dev_priv))
+ yield();
+ }
+
+ /* Finally reset hw state */
+ for_each_engine(engine, dev_priv)
+ intel_engine_init_seqno(engine, seqno);
+
+ return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = seqno;
+ return 0;
+}
+
+static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+{
+ /* reserve 0 for non-seqno */
+ if (unlikely(dev_priv->next_seqno == 0)) {
+ int ret;
+
+ ret = i915_gem_init_seqno(dev_priv, 0);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
+
+ *seqno = dev_priv->next_seqno++;
+ return 0;
+}
+
+static int __i915_sw_fence_call
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct drm_i915_gem_request *request =
+ container_of(fence, typeof(*request), submit);
+
+ /* Will be called from irq-context when using foreign DMA fences */
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ request->engine->last_submitted_seqno = request->fence.seqno;
+ request->engine->submit_request(request);
+ break;
+
+ case FENCE_FREE:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ * This can be NULL if the request is not directly related to
+ * any specific user context, in which case this function will
+ * choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_gem_request *req;
+ u32 seqno;
+ int ret;
+
+ /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+ * and restart.
+ */
+ ret = i915_gem_check_wedge(dev_priv);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Move the oldest request to the slab-cache (if not in use!) */
+ req = list_first_entry_or_null(&engine->request_list,
+ typeof(*req), link);
+ if (req && i915_gem_request_completed(req))
+ i915_gem_request_retire(req);
+
+ /* Beware: Dragons be flying overhead.
+ *
+ * We use RCU to look up requests in flight. The lookups may
+ * race with the request being allocated from the slab freelist.
+ * That is the request we are writing to here, may be in the process
+ * of being read by __i915_gem_active_get_rcu(). As such,
+ * we have to be very careful when overwriting the contents. During
+ * the RCU lookup, we change chase the request->engine pointer,
+ * read the request->fence.seqno and increment the reference count.
+ *
+ * The reference count is incremented atomically. If it is zero,
+ * the lookup knows the request is unallocated and complete. Otherwise,
+ * it is either still in use, or has been reallocated and reset
+ * with fence_init(). This increment is safe for release as we check
+ * that the request we have a reference to and matches the active
+ * request.
+ *
+ * Before we increment the refcount, we chase the request->engine
+ * pointer. We must not call kmem_cache_zalloc() or else we set
+ * that pointer to NULL and cause a crash during the lookup. If
+ * we see the request is completed (based on the value of the
+ * old engine and seqno), the lookup is complete and reports NULL.
+ * If we decide the request is not completed (new engine or seqno),
+ * then we grab a reference and double check that it is still the
+ * active request - which it won't be and restart the lookup.
+ *
+ * Do not use kmem_cache_zalloc() here!
+ */
+ req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ ret = i915_gem_get_seqno(dev_priv, &seqno);
+ if (ret)
+ goto err;
+
+ spin_lock_init(&req->lock);
+ fence_init(&req->fence,
+ &i915_fence_ops,
+ &req->lock,
+ engine->fence_context,
+ seqno);
+
+ i915_sw_fence_init(&req->submit, submit_notify);
+
+ INIT_LIST_HEAD(&req->active_list);
+ req->i915 = dev_priv;
+ req->engine = engine;
+ req->ctx = i915_gem_context_get(ctx);
+
+ /* No zalloc, must clear what we need by hand */
+ req->previous_context = NULL;
+ req->file_priv = NULL;
+ req->batch = NULL;
+
+ /*
+ * Reserve space in the ring buffer for all the commands required to
+ * eventually emit this request. This is to guarantee that the
+ * i915_add_request() call can't fail. Note that the reserve may need
+ * to be redone if the request is not actually submitted straight
+ * away, e.g. because a GPU scheduler has deferred it.
+ */
+ req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_alloc_request_extras(req);
+ else
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
+
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ req->head = req->ring->tail;
+
+ return req;
+
+err_ctx:
+ i915_gem_context_put(ctx);
+err:
+ kmem_cache_free(dev_priv->requests, req);
+ return ERR_PTR(ret);
+}
+
+static int
+i915_gem_request_await_request(struct drm_i915_gem_request *to,
+ struct drm_i915_gem_request *from)
+{
+ int idx, ret;
+
+ GEM_BUG_ON(to == from);
+
+ if (to->engine == from->engine)
+ return 0;
+
+ idx = intel_engine_sync_index(from->engine, to->engine);
+ if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
+ return 0;
+
+ trace_i915_gem_ring_sync_to(to, from);
+ if (!i915.semaphores) {
+ if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ ret = to->engine->semaphore.sync_to(to, from);
+ if (ret)
+ return ret;
+ }
+
+ from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
+ return 0;
+}
+
+/**
+ * i915_gem_request_await_object - set this request to (async) wait upon a bo
+ *
+ * @to: request we are wishing to use
+ * @obj: object which may be in use on another ring.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ * request must wait for it to complete (either CPU or in hw, requests
+ * on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ * request must wait for outstanding read requests to complete.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+ struct drm_i915_gem_object *obj,
+ bool write)
+{
+ struct i915_gem_active *active;
+ unsigned long active_mask;
+ int idx;
+
+ if (write) {
+ active_mask = i915_gem_object_get_active(obj);
+ active = obj->last_read;
+ } else {
+ active_mask = 1;
+ active = &obj->last_write;
+ }
+
+ for_each_active(active_mask, idx) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ request = i915_gem_active_peek(&active[idx],
+ &obj->base.dev->struct_mutex);
+ if (!request)
+ continue;
+
+ ret = i915_gem_request_await_request(to, request);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ dev_priv->gt.active_engines |= intel_engine_flag(engine);
+ if (dev_priv->gt.awake)
+ return;
+
+ intel_runtime_pm_get_noresume(dev_priv);
+ dev_priv->gt.awake = true;
+
+ intel_enable_gt_powersave(dev_priv);
+ i915_update_gfx_val(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_busy(dev_priv);
+
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_ring *ring = request->ring;
+ struct drm_i915_gem_request *prev;
+ u32 request_start;
+ u32 reserved_tail;
+ int ret;
+
+ trace_i915_gem_request_add(request);
+
+ /*
+ * To ensure that this call will not fail, space for its emissions
+ * should already have been reserved in the ring buffer. Let the ring
+ * know that it is time to use that space up.
+ */
+ request_start = ring->tail;
+ reserved_tail = request->reserved_space;
+ request->reserved_space = 0;
+
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ if (flush_caches) {
+ ret = engine->emit_flush(request, EMIT_FLUSH);
+
+ /* Not allowed to fail! */
+ WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
+ }
+
+ /* Record the position of the start of the breadcrumb so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the ring's HEAD.
+ */
+ request->postfix = ring->tail;
+
+ /* Not allowed to fail! */
+ ret = engine->emit_request(request);
+ WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
+
+ /* Sanity check that the reserved size was large enough. */
+ ret = ring->tail - request_start;
+ if (ret < 0)
+ ret += ring->size;
+ WARN_ONCE(ret > reserved_tail,
+ "Not enough space reserved (%d bytes) "
+ "for adding the request (%d bytes)\n",
+ reserved_tail, ret);
+
+ /* Seal the request and mark it as pending execution. Note that
+ * we may inspect this state, without holding any locks, during
+ * hangcheck. Hence we apply the barrier to ensure that we do not
+ * see a more recent value in the hws than we are tracking.
+ */
+
+ prev = i915_gem_active_raw(&engine->last_request,
+ &request->i915->drm.struct_mutex);
+ if (prev)
+ i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
+ &request->submitq);
+
+ request->emitted_jiffies = jiffies;
+ request->previous_seqno = engine->last_pending_seqno;
+ engine->last_pending_seqno = request->fence.seqno;
+ i915_gem_active_set(&engine->last_request, request);
+ list_add_tail(&request->link, &engine->request_list);
+ list_add_tail(&request->ring_link, &ring->request_list);
+
+ i915_gem_mark_busy(engine);
+
+ local_bh_disable();
+ i915_sw_fence_commit(&request->submit);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+}
+
+static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (list_empty(&wait->task_list))
+ __add_wait_queue(q, wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+static unsigned long local_clock_us(unsigned int *cpu)
+{
+ unsigned long t;
+
+ /* Cheaply and approximately convert from nanoseconds to microseconds.
+ * The result and subsequent calculations are also defined in the same
+ * approximate microseconds units. The principal source of timing
+ * error here is from the simple truncation.
+ *
+ * Note that local_clock() is only defined wrt to the current CPU;
+ * the comparisons are no longer valid if we switch CPUs. Instead of
+ * blocking preemption for the entire busywait, we can detect the CPU
+ * switch and use that as indicator of system load and a reason to
+ * stop busywaiting, see busywait_stop().
+ */
+ *cpu = get_cpu();
+ t = local_clock() >> 10;
+ put_cpu();
+
+ return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned int cpu)
+{
+ unsigned int this_cpu;
+
+ if (time_after(local_clock_us(&this_cpu), timeout))
+ return true;
+
+ return this_cpu != cpu;
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ int state, unsigned long timeout_us)
+{
+ unsigned int cpu;
+
+ /* When waiting for high frequency requests, e.g. during synchronous
+ * rendering split between the CPU and GPU, the finite amount of time
+ * required to set up the irq and wait upon it limits the response
+ * rate. By busywaiting on the request completion for a short while we
+ * can service the high frequency waits as quick as possible. However,
+ * if it is a slow request, we want to sleep as quickly as possible.
+ * The tradeoff between waiting and sleeping is roughly the time it
+ * takes to sleep on a request, on the order of a microsecond.
+ */
+
+ timeout_us += local_clock_us(&cpu);
+ do {
+ if (i915_gem_request_completed(req))
+ return true;
+
+ if (signal_pending_state(state, current))
+ break;
+
+ if (busywait_stop(timeout_us, cpu))
+ break;
+
+ cpu_relax_lowlatency();
+ } while (!need_resched());
+
+ return false;
+}
+
+/**
+ * i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @flags: how to wait
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: client to charge for RPS boosting
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ s64 *timeout,
+ struct intel_rps_client *rps)
+{
+ const int state = flags & I915_WAIT_INTERRUPTIBLE ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ DEFINE_WAIT(reset);
+ struct intel_wait wait;
+ unsigned long timeout_remain;
+ int ret = 0;
+
+ might_sleep();
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
+ !!(flags & I915_WAIT_LOCKED));
+#endif
+
+ if (i915_gem_request_completed(req))
+ return 0;
+
+ timeout_remain = MAX_SCHEDULE_TIMEOUT;
+ if (timeout) {
+ if (WARN_ON(*timeout < 0))
+ return -EINVAL;
+
+ if (*timeout == 0)
+ return -ETIME;
+
+ /* Record current time in case interrupted, or wedged */
+ timeout_remain = nsecs_to_jiffies_timeout(*timeout);
+ *timeout += ktime_get_raw_ns();
+ }
+
+ trace_i915_gem_request_wait_begin(req);
+
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
+ gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+
+ /* Optimistic short spin before touching IRQs */
+ if (i915_spin_request(req, state, 5))
+ goto complete;
+
+ set_current_state(state);
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+
+ intel_wait_init(&wait, req->fence.seqno);
+ if (intel_engine_add_wait(req->engine, &wait))
+ /* In order to check that we haven't missed the interrupt
+ * as we enabled it, we need to kick ourselves to do a
+ * coherent check on the seqno before we sleep.
+ */
+ goto wakeup;
+
+ for (;;) {
+ if (signal_pending_state(state, current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ timeout_remain = io_schedule_timeout(timeout_remain);
+ if (timeout_remain == 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ if (intel_wait_complete(&wait))
+ break;
+
+ set_current_state(state);
+
+wakeup:
+ /* Carefully check if the request is complete, giving time
+ * for the seqno to be visible following the interrupt.
+ * We also have to check in case we are kicked by the GPU
+ * reset in order to drop the struct_mutex.
+ */
+ if (__i915_request_irq_complete(req))
+ break;
+
+ /* If the GPU is hung, and we hold the lock, reset the GPU
+ * and then check for completion. On a full reset, the engine's
+ * HW seqno will be advanced passed us and we are complete.
+ * If we do a partial reset, we have to wait for the GPU to
+ * resume and update the breadcrumb.
+ *
+ * If we don't hold the mutex, we can just wait for the worker
+ * to come along and update the breadcrumb (either directly
+ * itself, or indirectly by recovering the GPU).
+ */
+ if (flags & I915_WAIT_LOCKED &&
+ i915_reset_in_progress(&req->i915->gpu_error)) {
+ __set_current_state(TASK_RUNNING);
+ i915_reset(req->i915);
+ reset_wait_queue(&req->i915->gpu_error.wait_queue,
+ &reset);
+ continue;
+ }
+
+ /* Only spin if we know the GPU is processing this request */
+ if (i915_spin_request(req, state, 2))
+ break;
+ }
+
+ intel_engine_remove_wait(req->engine, &wait);
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
+ __set_current_state(TASK_RUNNING);
+
+complete:
+ trace_i915_gem_request_wait_end(req);
+
+ if (timeout) {
+ *timeout -= ktime_get_raw_ns();
+ if (*timeout < 0)
+ *timeout = 0;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regrssion from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+ *timeout = 0;
+ }
+
+ if (IS_RPS_USER(rps) &&
+ req->fence.seqno == req->engine->last_submitted_seqno) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&req->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&req->i915->rps.client_lock);
+ }
+
+ return ret;
+}
+
+static bool engine_retire_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request, *next;
+
+ list_for_each_entry_safe(request, next, &engine->request_list, link) {
+ if (!i915_gem_request_completed(request))
+ return false;
+
+ i915_gem_request_retire(request);
+ }
+
+ return true;
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (dev_priv->gt.active_engines == 0)
+ return;
+
+ GEM_BUG_ON(!dev_priv->gt.awake);
+
+ for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
+ if (engine_retire_requests(engine))
+ dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+
+ if (dev_priv->gt.active_engines == 0)
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
+ msecs_to_jiffies(100));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
new file mode 100644
index 000000000000..974bd7bcc801
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_REQUEST_H
+#define I915_GEM_REQUEST_H
+
+#include <linux/fence.h>
+
+#include "i915_gem.h"
+#include "i915_sw_fence.h"
+
+struct intel_wait {
+ struct rb_node node;
+ struct task_struct *tsk;
+ u32 seqno;
+};
+
+struct intel_signal_node {
+ struct rb_node node;
+ struct intel_wait wait;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * When modifying this structure be very aware that we perform a lockless
+ * RCU lookup of it that may race against reallocation of the struct
+ * from the slab freelist. We intentionally do not zero the structure on
+ * allocation so that the lookup can use the dangling pointers (and is
+ * cogniscent that those pointers may be wrong). Instead, everything that
+ * needs to be initialised must be done so explicitly.
+ *
+ * The requests are reference counted.
+ */
+struct drm_i915_gem_request {
+ struct fence fence;
+ spinlock_t lock;
+
+ /** On Which ring this request was generated */
+ struct drm_i915_private *i915;
+
+ /**
+ * Context and ring buffer related to this request
+ * Contexts are refcounted, so when this request is associated with a
+ * context, we must increment the context's refcount, to guarantee that
+ * it persists while any request is linked to it. Requests themselves
+ * are also refcounted, so the request will only be freed when the last
+ * reference to it is dismissed, and the code in
+ * i915_gem_request_free() will then decrement the refcount on the
+ * context.
+ */
+ struct i915_gem_context *ctx;
+ struct intel_engine_cs *engine;
+ struct intel_ring *ring;
+ struct intel_signal_node signaling;
+
+ struct i915_sw_fence submit;
+ wait_queue_t submitq;
+
+ /** GEM sequence number associated with the previous request,
+ * when the HWS breadcrumb is equal to this the GPU is processing
+ * this request.
+ */
+ u32 previous_seqno;
+
+ /** Position in the ring of the start of the request */
+ u32 head;
+
+ /**
+ * Position in the ring of the start of the postfix.
+ * This is required to calculate the maximum available ring space
+ * without overwriting the postfix.
+ */
+ u32 postfix;
+
+ /** Position in the ring of the end of the whole request */
+ u32 tail;
+
+ /** Position in the ring of the end of any workarounds after the tail */
+ u32 wa_tail;
+
+ /** Preallocate space in the ring for the emitting the request */
+ u32 reserved_space;
+
+ /**
+ * Context related to the previous request.
+ * As the contexts are accessed by the hardware until the switch is
+ * completed to a new context, the hardware may still be writing
+ * to the context object after the breadcrumb is visible. We must
+ * not unpin/unbind/prune that object whilst still active and so
+ * we keep the previous context pinned until the following (this)
+ * request is retired.
+ */
+ struct i915_gem_context *previous_context;
+
+ /** Batch buffer related to this request if any (used for
+ * error state dump only).
+ */
+ struct i915_vma *batch;
+ struct list_head active_list;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** engine->request_list entry for this request */
+ struct list_head link;
+
+ /** ring->request_list entry for this request */
+ struct list_head ring_link;
+
+ struct drm_i915_file_private *file_priv;
+ /** file_priv list entry for this request */
+ struct list_head client_list;
+
+ /** Link in the execlist submission queue, guarded by execlist_lock. */
+ struct list_head execlist_link;
+};
+
+extern const struct fence_ops i915_fence_ops;
+
+static inline bool fence_is_i915(struct fence *fence)
+{
+ return fence->ops == &i915_fence_ops;
+}
+
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file);
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+
+static inline u32
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+ return req ? req->fence.seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
+{
+ return req ? req->engine : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+to_request(struct fence *fence)
+{
+ /* We assume that NULL fence/request are interoperable */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
+ GEM_BUG_ON(fence && !fence_is_i915(fence));
+ return container_of(fence, struct drm_i915_gem_request, fence);
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get(struct drm_i915_gem_request *req)
+{
+ return to_request(fence_get(&req->fence));
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+{
+ return to_request(fence_get_rcu(&req->fence));
+}
+
+static inline void
+i915_gem_request_put(struct drm_i915_gem_request *req)
+{
+ fence_put(&req->fence);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+ struct drm_i915_gem_request *src)
+{
+ if (src)
+ i915_gem_request_get(src);
+
+ if (*pdst)
+ i915_gem_request_put(*pdst);
+
+ *pdst = src;
+}
+
+int
+i915_gem_request_await_object(struct drm_i915_gem_request *to,
+ struct drm_i915_gem_object *obj,
+ bool write);
+
+void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
+#define i915_add_request(req) \
+ __i915_add_request(req, true)
+#define i915_add_request_no_flush(req) \
+ __i915_add_request(req, false)
+
+struct intel_rps_client;
+#define NO_WAITBOOST ERR_PTR(-1)
+#define IS_RPS_CLIENT(p) (!IS_ERR(p))
+#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
+
+int i915_wait_request(struct drm_i915_gem_request *req,
+ unsigned int flags,
+ s64 *timeout,
+ struct intel_rps_client *rps)
+ __attribute__((nonnull(1)));
+#define I915_WAIT_INTERRUPTIBLE BIT(0)
+#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
+
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) >= 0;
+}
+
+static inline bool
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+ req->previous_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
+ req->fence.seqno);
+}
+
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us)
+{
+ return (i915_gem_request_started(request) &&
+ __i915_spin_request(request, state, timeout_us));
+}
+
+/* We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_gem_active to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_gem_active is updated with i915_gem_active_set() to track the
+ * most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_gem_active completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_gem_active.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+struct i915_gem_active;
+
+typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
+ struct drm_i915_gem_request *);
+
+struct i915_gem_active {
+ struct drm_i915_gem_request __rcu *request;
+ struct list_head link;
+ i915_gem_retire_fn retire;
+};
+
+void i915_gem_retire_noop(struct i915_gem_active *,
+ struct drm_i915_gem_request *request);
+
+/**
+ * init_request_active - prepares the activity tracker for use
+ * @active - the active tracker
+ * @func - a callback when then the tracker is retired (becomes idle),
+ * can be NULL
+ *
+ * init_request_active() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+init_request_active(struct i915_gem_active *active,
+ i915_gem_retire_fn retire)
+{
+ INIT_LIST_HEAD(&active->link);
+ active->retire = retire ?: i915_gem_retire_noop;
+}
+
+/**
+ * i915_gem_active_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * i915_gem_active_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
+static inline void
+i915_gem_active_set(struct i915_gem_active *active,
+ struct drm_i915_gem_request *request)
+{
+ list_move(&active->link, &request->active_list);
+ rcu_assign_pointer(active->request, request);
+}
+
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+ /* Inside the error capture (running with the driver in an unknown
+ * state), we want to bend the rules slightly (a lot).
+ *
+ * Work is in progress to make it safer, in the meantime this keeps
+ * the known issue from spamming the logs.
+ */
+ return rcu_dereference_protected(active->request, 1);
+}
+
+/**
+ * i915_gem_active_raw - return the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_raw() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the caller
+ * must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
+{
+ return rcu_dereference_protected(active->request,
+ lockdep_is_held(mutex));
+}
+
+/**
+ * i915_gem_active_peek - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
+{
+ struct drm_i915_gem_request *request;
+
+ request = i915_gem_active_raw(active, mutex);
+ if (!request || i915_gem_request_completed(request))
+ return NULL;
+
+ return request;
+}
+
+/**
+ * i915_gem_active_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
+{
+ return i915_gem_request_get(i915_gem_active_peek(active, mutex));
+}
+
+/**
+ * __i915_gem_active_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold the RCU read lock, but
+ * the returned pointer is safe to use outside of RCU.
+ */
+static inline struct drm_i915_gem_request *
+__i915_gem_active_get_rcu(const struct i915_gem_active *active)
+{
+ /* Performing a lockless retrieval of the active request is super
+ * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+ * slab of request objects will not be freed whilst we hold the
+ * RCU read lock. It does not guarantee that the request itself
+ * will not be freed and then *reused*. Viz,
+ *
+ * Thread A Thread B
+ *
+ * req = active.request
+ * retire(req) -> free(req);
+ * (req is now first on the slab freelist)
+ * active.request = NULL
+ *
+ * req = new submission on a new object
+ * ref(req)
+ *
+ * To prevent the request from being reused whilst the caller
+ * uses it, we take a reference like normal. Whilst acquiring
+ * the reference we check that it is not in a destroyed state
+ * (refcnt == 0). That prevents the request being reallocated
+ * whilst the caller holds on to it. To check that the request
+ * was not reallocated as we acquired the reference we have to
+ * check that our request remains the active request across
+ * the lookup, in the same manner as a seqlock. The visibility
+ * of the pointer versus the reference counting is controlled
+ * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+ *
+ * In the middle of all that, we inspect whether the request is
+ * complete. Retiring is lazy so the request may be completed long
+ * before the active tracker is updated. Querying whether the
+ * request is complete is far cheaper (as it involves no locked
+ * instructions setting cachelines to exclusive) than acquiring
+ * the reference, so we do it first. The RCU read lock ensures the
+ * pointer dereference is valid, but does not ensure that the
+ * seqno nor HWS is the right one! However, if the request was
+ * reallocated, that means the active tracker's request was complete.
+ * If the new request is also complete, then both are and we can
+ * just report the active tracker is idle. If the new request is
+ * incomplete, then we acquire a reference on it and check that
+ * it remained the active request.
+ *
+ * It is then imperative that we do not zero the request on
+ * reallocation, so that we can chase the dangling pointers!
+ * See i915_gem_request_alloc().
+ */
+ do {
+ struct drm_i915_gem_request *request;
+
+ request = rcu_dereference(active->request);
+ if (!request || i915_gem_request_completed(request))
+ return NULL;
+
+ /* An especially silly compiler could decide to recompute the
+ * result of i915_gem_request_completed, more specifically
+ * re-emit the load for request->fence.seqno. A race would catch
+ * a later seqno value, which could flip the result from true to
+ * false. Which means part of the instructions below might not
+ * be executed, while later on instructions are executed. Due to
+ * barriers within the refcounting the inconsistency can't reach
+ * past the call to i915_gem_request_get_rcu, but not executing
+ * that while still executing i915_gem_request_put() creates
+ * havoc enough. Prevent this with a compiler barrier.
+ */
+ barrier();
+
+ request = i915_gem_request_get_rcu(request);
+
+ /* What stops the following rcu_access_pointer() from occurring
+ * before the above i915_gem_request_get_rcu()? If we were
+ * to read the value before pausing to get the reference to
+ * the request, we may not notice a change in the active
+ * tracker.
+ *
+ * The rcu_access_pointer() is a mere compiler barrier, which
+ * means both the CPU and compiler are free to perform the
+ * memory read without constraint. The compiler only has to
+ * ensure that any operations after the rcu_access_pointer()
+ * occur afterwards in program order. This means the read may
+ * be performed earlier by an out-of-order CPU, or adventurous
+ * compiler.
+ *
+ * The atomic operation at the heart of
+ * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+ * atomic_inc_not_zero() which is only a full memory barrier
+ * when successful. That is, if i915_gem_request_get_rcu()
+ * returns the request (and so with the reference counted
+ * incremented) then the following read for rcu_access_pointer()
+ * must occur after the atomic operation and so confirm
+ * that this request is the one currently being tracked.
+ *
+ * The corresponding write barrier is part of
+ * rcu_assign_pointer().
+ */
+ if (!request || request == rcu_access_pointer(active->request))
+ return rcu_pointer_handoff(request);
+
+ i915_gem_request_put(request);
+ } while (1);
+}
+
+/**
+ * i915_gem_active_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_gem_request_put().
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+{
+ struct drm_i915_gem_request *request;
+
+ rcu_read_lock();
+ request = __i915_gem_active_get_rcu(active);
+ rcu_read_unlock();
+
+ return request;
+}
+
+/**
+ * i915_gem_active_isset - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * i915_gem_active_isset() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+i915_gem_active_isset(const struct i915_gem_active *active)
+{
+ return rcu_access_pointer(active->request);
+}
+
+/**
+ * i915_gem_active_is_idle - report whether the active tracker is idle
+ * @active - the active tracker
+ *
+ * i915_gem_active_is_idle() returns true if the active tracker is currently
+ * unassigned or if the request is complete (but not yet retired). Requires
+ * the caller to hold struct_mutex (but that can be relaxed if desired).
+ */
+static inline bool
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+ struct mutex *mutex)
+{
+ return !i915_gem_active_peek(active, mutex);
+}
+
+/**
+ * i915_gem_active_wait - waits until the request is completed
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_wait() waits until the request is completed before
+ * returning. Note that it does not guarantee that the request is
+ * retired first, see i915_gem_active_retire().
+ *
+ * i915_gem_active_wait() returns immediately if the active
+ * request is already complete.
+ */
+static inline int __must_check
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
+{
+ struct drm_i915_gem_request *request;
+
+ request = i915_gem_active_peek(active, mutex);
+ if (!request)
+ return 0;
+
+ return i915_wait_request(request,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ NULL, NULL);
+}
+
+/**
+ * i915_gem_active_wait_unlocked - waits until the request is completed
+ * @active - the active request on which to wait
+ * @flags - how to wait
+ * @timeout - how long to wait at most
+ * @rps - userspace client to charge for a waitboost
+ *
+ * i915_gem_active_wait_unlocked() waits until the request is completed before
+ * returning, without requiring any locks to be held. Note that it does not
+ * retire any requests before returning.
+ *
+ * This function relies on RCU in order to acquire the reference to the active
+ * request without holding any locks. See __i915_gem_active_get_rcu() for the
+ * glory details on how that is managed. Once the reference is acquired, we
+ * can then wait upon the request, and afterwards release our reference,
+ * free of any locking.
+ *
+ * This function wraps i915_wait_request(), see it for the full details on
+ * the arguments.
+ *
+ * Returns 0 if successful, or a negative error code.
+ */
+static inline int
+i915_gem_active_wait_unlocked(const struct i915_gem_active *active,
+ unsigned int flags,
+ s64 *timeout,
+ struct intel_rps_client *rps)
+{
+ struct drm_i915_gem_request *request;
+ int ret = 0;
+
+ request = i915_gem_active_get_unlocked(active);
+ if (request) {
+ ret = i915_wait_request(request, flags, timeout, rps);
+ i915_gem_request_put(request);
+ }
+
+ return ret;
+}
+
+/**
+ * i915_gem_active_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_gem_active_retire(struct i915_gem_active *active,
+ struct mutex *mutex)
+{
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ request = i915_gem_active_raw(active, mutex);
+ if (!request)
+ return 0;
+
+ ret = i915_wait_request(request,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ list_del_init(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+ active->retire(active, request);
+
+ return 0;
+}
+
+/* Convenience functions for peeking at state inside active's request whilst
+ * guarded by the struct_mutex.
+ */
+
+static inline uint32_t
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+ struct mutex *mutex)
+{
+ return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
+}
+
+static inline struct intel_engine_cs *
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+ struct mutex *mutex)
+{
+ return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
+}
+
+#define for_each_active(mask, idx) \
+ for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+
+#endif /* I915_GEM_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 6f10b421487b..1c237d02f30b 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- int count = 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (drm_mm_node_allocated(&vma->node))
- count++;
- if (vma->pin_count)
- count++;
- }
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
+ if (i915_vma_is_pinned(vma))
+ return true;
- return count;
+ return false;
}
static bool swap_available(void)
@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (obj->pages_pin_count != num_vma_bound(obj))
+ if (obj->pages_pin_count > obj->bind_count)
+ return false;
+
+ if (any_vma_pinned(obj))
return false;
/* We can only return physical pages to the system if we can either
@@ -163,17 +162,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
*/
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
+ struct drm_i915_gem_object *obj;
if ((flags & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
- while (count < target && !list_empty(phase->list)) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma, *v;
-
- obj = list_first_entry(phase->list,
- typeof(*obj), global_list);
+ while (count < target &&
+ (obj = list_first_entry_or_null(phase->list,
+ typeof(*obj),
+ global_list))) {
list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
@@ -184,24 +182,21 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
!is_vmalloc_addr(obj->mapping))
continue;
- if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+ if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+ i915_gem_object_is_active(obj))
continue;
if (!can_release_pages(obj))
continue;
- drm_gem_object_reference(&obj->base);
+ i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */
- list_for_each_entry_safe(vma, v,
- &obj->vma_list, obj_link)
- if (i915_vma_unbind(vma))
- break;
-
+ i915_gem_object_unbind(obj);
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
}
list_splice(&still_in_list, phase->list);
}
@@ -210,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
+ /* expedite the RCU grace period to free some request slabs */
+ synchronize_rcu_expedited();
return count;
}
@@ -230,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
- return i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
+ unsigned long freed;
+
+ freed = i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
+ rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+
+ return freed;
}
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
@@ -242,9 +244,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return false;
- if (to_i915(dev)->mm.shrinker_no_lock_stealing)
- return false;
-
*unlock = false;
} else
*unlock = true;
@@ -273,7 +272,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->active && can_release_pages(obj))
+ if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
}
@@ -321,17 +320,22 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu,
int timeout_ms)
{
- unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
+ unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
+
+ do {
+ if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
+ i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
+ break;
- while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
- if (--timeout == 0) {
+
+ if (time_after(jiffies, timeout)) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;
}
- }
+ } while (1);
slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
@@ -410,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(dev_priv);
+ ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 66be299a1486..59989e8ee5dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -92,6 +92,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
u32 base;
@@ -111,33 +112,44 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm;
- pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
+ pci_read_config_dword(pdev, INTEL_BSM, &bsm);
base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
+ u32 tseg_size = 0;
u16 toud = 0;
+ u8 tmp;
- /*
- * FIXME is the graphics stolen memory region
- * always at TOUD? Ie. is it always the last
- * one to be allocated by the BIOS?
- */
- pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
+ I845_ESMRAMC, &tmp);
+
+ if (tmp & TSEG_ENABLE) {
+ switch (tmp & I845_TSEG_SIZE_MASK) {
+ case I845_TSEG_SIZE_512K:
+ tseg_size = KB(512);
+ break;
+ case I845_TSEG_SIZE_1M:
+ tseg_size = MB(1);
+ break;
+ }
+ }
+
+ pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
I865_TOUD, &toud);
- base = toud << 16;
+ base = (toud << 16) + tseg_size;
} else if (IS_I85X(dev)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I85X_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE)
tseg_size = MB(1);
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
I85X_DRB3, &tmp);
tom = tmp * MB(32);
@@ -147,7 +159,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 tom;
u8 tmp;
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I845_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE) {
@@ -161,7 +173,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
}
}
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_DRB3, &tmp);
tom = tmp * MB(32);
@@ -171,7 +183,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 tom;
u8 tmp;
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_ESMRAMC, &tmp);
if (tmp & TSEG_ENABLE) {
@@ -181,7 +193,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
tseg_size = KB(512);
}
- pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
I830_DRB3, &tmp);
tom = tmp * MB(32);
@@ -685,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -698,24 +710,25 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
- if (drm_mm_initialized(&ggtt->base.mm)) {
- ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
- if (ret) {
- DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- goto err;
- }
- vma->bound |= GLOBAL_BIND;
- __i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ goto err;
}
+ vma->pages = obj->pages;
+ vma->flags |= I915_VMA_GLOBAL_BIND;
+ __i915_vma_set_map_and_fenceable(vma);
+ list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+ obj->bind_count++;
+
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
i915_gem_object_pin_pages(obj);
return obj;
err:
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8030199731db..a14b1e3d4c78 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -68,6 +68,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (tiling_mode == I915_TILING_NONE)
return true;
+ if (tiling_mode > I915_TILING_LAST)
+ return false;
+
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
@@ -113,36 +116,58 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-/* Is the current GTT allocation valid for the change in tiling? */
-static bool
-i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
{
+ struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
u32 size;
- if (tiling_mode == I915_TILING_NONE)
- return true;
-
- if (INTEL_INFO(obj->base.dev)->gen >= 4)
+ if (!i915_vma_is_map_and_fenceable(vma))
return true;
- if (IS_GEN3(obj->base.dev)) {
- if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
+ if (INTEL_GEN(dev_priv) == 3) {
+ if (vma->node.start & ~I915_FENCE_START_MASK)
return false;
} else {
- if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
+ if (vma->node.start & ~I830_FENCE_START_MASK)
return false;
}
- size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (i915_gem_obj_ggtt_size(obj) != size)
+ size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+ if (vma->node.size < size)
return false;
- if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
+ if (vma->node.start & (size - 1))
return false;
return true;
}
+/* Make the current GTT allocation valid for the change in tiling. */
+static int
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+ int ret;
+
+ if (tiling_mode == I915_TILING_NONE)
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ return 0;
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (i915_vma_fence_prepare(vma, tiling_mode))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* i915_gem_set_tiling - IOCTL handler to set tiling mode
* @dev: DRM device
@@ -164,15 +189,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int ret = 0;
+ int err = 0;
+
+ /* Make sure we don't cross-contaminate obj->tiling_and_stride */
+ BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL)
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
return -ENOENT;
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
return -EINVAL;
}
@@ -180,7 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) {
- ret = -EBUSY;
+ err = -EBUSY;
goto err;
}
@@ -213,8 +241,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
- if (args->tiling_mode != obj->tiling_mode ||
- args->stride != obj->stride) {
+ if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
+ args->stride != i915_gem_object_get_stride(obj)) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
@@ -227,34 +255,36 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
- if (obj->map_and_fenceable &&
- !i915_gem_object_fence_ok(obj, args->tiling_mode))
- ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
- if (ret == 0) {
+ err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
+ if (!err) {
+ struct i915_vma *vma;
+
if (obj->pages &&
obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
- if (obj->tiling_mode == I915_TILING_NONE)
+ if (!i915_gem_object_is_tiled(obj))
i915_gem_object_pin_pages(obj);
}
- obj->fence_dirty =
- obj->last_fenced_req ||
- obj->fence_reg != I915_FENCE_REG_NONE;
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!vma->fence)
+ continue;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
+ vma->fence->dirty = true;
+ }
+ obj->tiling_and_stride =
+ args->stride | args->tiling_mode;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
- args->stride = obj->stride;
- args->tiling_mode = obj->tiling_mode;
+ args->stride = i915_gem_object_get_stride(obj);
+ args->tiling_mode = i915_gem_object_get_tiling(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -268,12 +298,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
err:
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
- return ret;
+ return err;
}
/**
@@ -297,14 +327,12 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
- if (&obj->base == NULL)
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
return -ENOENT;
- mutex_lock(&dev->struct_mutex);
-
- args->tiling_mode = obj->tiling_mode;
- switch (obj->tiling_mode) {
+ args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK;
+ switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -328,8 +356,6 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&dev->struct_mutex);
-
+ i915_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2314c88323e3..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -63,33 +63,12 @@ struct i915_mmu_object {
static void wait_rendering(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
- int i, n;
-
- if (!obj->active)
- return;
-
- n = 0;
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct drm_i915_gem_request *req;
-
- req = obj->last_read_req[i];
- if (req == NULL)
- continue;
-
- requests[n++] = i915_gem_request_reference(req);
- }
+ unsigned long active = __I915_BO_ACTIVE(obj);
+ int idx;
- mutex_unlock(&dev->struct_mutex);
-
- for (i = 0; i < n; i++)
- __i915_wait_request(requests[i], false, NULL, NULL);
-
- mutex_lock(&dev->struct_mutex);
-
- for (i = 0; i < n; i++)
- i915_gem_request_unreference(requests[i]);
+ for_each_active(active, idx)
+ i915_gem_active_wait_unlocked(&obj->last_read[idx],
+ 0, NULL, NULL);
}
static void cancel_userptr(struct work_struct *work)
@@ -98,28 +77,19 @@ static void cancel_userptr(struct work_struct *work)
struct drm_i915_gem_object *obj = mo->obj;
struct drm_device *dev = obj->base.dev;
+ wait_rendering(obj);
+
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
if (obj->pages != NULL) {
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_vma *vma, *tmp;
- bool was_interruptible;
-
- wait_rendering(obj);
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
- WARN_ON(i915_vma_unbind(vma));
+ /* We are inside a kthread context and can't be interrupted */
+ WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_gem_object_put_pages(obj));
-
- dev_priv->mm.interruptible = was_interruptible;
}
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
}
@@ -538,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -547,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
@@ -572,12 +546,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
obj->userptr.work = ERR_PTR(ret);
- if (ret)
- __i915_gem_userptr_set_active(obj, false);
}
obj->userptr.workers--;
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
release_pages(pvec, pinned, 0);
@@ -622,8 +594,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
obj->userptr.work = &work->work;
obj->userptr.workers++;
- work->obj = obj;
- drm_gem_object_reference(&obj->base);
+ work->obj = i915_gem_object_get(obj);
work->task = current;
get_task_struct(work->task);
@@ -659,15 +630,14 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
* to the vma (discard or cloning) which should prevent the more
* egregious cases from causing harm.
*/
- if (IS_ERR(obj->userptr.work)) {
- /* active flag will have been dropped already by the worker */
- ret = PTR_ERR(obj->userptr.work);
- obj->userptr.work = NULL;
- return ret;
- }
- if (obj->userptr.work)
+
+ if (obj->userptr.work) {
/* active flag should still be held for the pending work */
- return -EAGAIN;
+ if (IS_ERR(obj->userptr.work))
+ return PTR_ERR(obj->userptr.work);
+ else
+ return -EAGAIN;
+ }
/* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true);
@@ -846,7 +816,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d73d2216adc..334f15df7c8d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,9 +30,9 @@
#include <generated/utsrelease.h>
#include "i915_drv.h"
-static const char *ring_str(int ring)
+static const char *engine_str(int engine)
{
- switch (ring) {
+ switch (engine) {
case RCS: return "render";
case VCS: return "bsd";
case BCS: return "blt";
@@ -42,16 +42,6 @@ static const char *ring_str(int ring)
}
}
-static const char *pin_flag(int pinned)
-{
- if (pinned > 0)
- return " P";
- else if (pinned < 0)
- return " p";
- else
- return "";
-}
-
static const char *tiling_flag(int tiling)
{
switch (tiling) {
@@ -189,7 +179,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
{
int i;
- err_printf(m, " %s [%d]:\n", name, count);
+ err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
err_printf(m, " %08x_%08x %8u %02x %02x [ ",
@@ -202,13 +192,12 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_printf(m, "%02x ", err->rseqno[i]);
err_printf(m, "] %02x", err->wseqno);
- err_puts(m, pin_flag(err->pinned));
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, err->ring != -1 ? " " : "");
- err_puts(m, ring_str(err->ring));
+ err_puts(m, err->engine != -1 ? " " : "");
+ err_puts(m, engine_str(err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@@ -221,7 +210,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
{
switch (a) {
case HANGCHECK_IDLE:
@@ -239,70 +228,74 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
return "unknown";
}
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
- struct drm_i915_error_state *error,
- int ring_idx)
+static void error_print_engine(struct drm_i915_error_state_buf *m,
+ struct drm_i915_error_engine *ee)
{
- struct drm_i915_error_ring *ring = &error->ring[ring_idx];
-
- if (!ring->valid)
- return;
-
- err_printf(m, "%s command stream:\n", ring_str(ring_idx));
- err_printf(m, " START: 0x%08x\n", ring->start);
- err_printf(m, " HEAD: 0x%08x\n", ring->head);
- err_printf(m, " TAIL: 0x%08x\n", ring->tail);
- err_printf(m, " CTL: 0x%08x\n", ring->ctl);
- err_printf(m, " HWS: 0x%08x\n", ring->hws);
- err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
- err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
- err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
- err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
- if (INTEL_INFO(dev)->gen >= 4) {
- err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
- err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
- err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
+ err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+ err_printf(m, " START: 0x%08x\n", ee->start);
+ err_printf(m, " HEAD: 0x%08x\n", ee->head);
+ err_printf(m, " TAIL: 0x%08x\n", ee->tail);
+ err_printf(m, " CTL: 0x%08x\n", ee->ctl);
+ err_printf(m, " MODE: 0x%08x\n", ee->mode);
+ err_printf(m, " HWS: 0x%08x\n", ee->hws);
+ err_printf(m, " ACTHD: 0x%08x %08x\n",
+ (u32)(ee->acthd>>32), (u32)ee->acthd);
+ err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
+ err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
+ err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone);
+ if (ee->batchbuffer) {
+ u64 start = ee->batchbuffer->gtt_offset;
+ u64 end = start + ee->batchbuffer->gtt_size;
+
+ err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
+ upper_32_bits(start), lower_32_bits(start),
+ upper_32_bits(end), lower_32_bits(end));
}
- err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
- err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
- lower_32_bits(ring->faddr));
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
- err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
+ if (INTEL_GEN(m->i915) >= 4) {
+ err_printf(m, " BBADDR: 0x%08x_%08x\n",
+ (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
+ err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
+ err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
+ }
+ err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
+ err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
+ lower_32_bits(ee->faddr));
+ if (INTEL_GEN(m->i915) >= 6) {
+ err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
+ err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- ring->semaphore_mboxes[0],
- ring->semaphore_seqno[0]);
+ ee->semaphore_mboxes[0],
+ ee->semaphore_seqno[0]);
err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- ring->semaphore_mboxes[1],
- ring->semaphore_seqno[1]);
- if (HAS_VEBOX(dev)) {
+ ee->semaphore_mboxes[1],
+ ee->semaphore_seqno[1]);
+ if (HAS_VEBOX(m->i915)) {
err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
- ring->semaphore_mboxes[2],
- ring->semaphore_seqno[2]);
+ ee->semaphore_mboxes[2],
+ ee->semaphore_seqno[2]);
}
}
- if (USES_PPGTT(dev)) {
- err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+ if (USES_PPGTT(m->i915)) {
+ err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(m->i915) >= 8) {
int i;
for (i = 0; i < 4; i++)
err_printf(m, " PDP%d: 0x%016llx\n",
- i, ring->vm_info.pdp[i]);
+ i, ee->vm_info.pdp[i]);
} else {
err_printf(m, " PP_DIR_BASE: 0x%08x\n",
- ring->vm_info.pp_dir_base);
+ ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " seqno: 0x%08x\n", ring->seqno);
- err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
- err_printf(m, " waiting: %s\n", yesno(ring->waiting));
- err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
- err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+ err_printf(m, " seqno: 0x%08x\n", ee->seqno);
+ err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
+ err_printf(m, " waiting: %s\n", yesno(ee->waiting));
+ err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
+ err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck: %s [%d]\n",
- hangcheck_action_to_str(ring->hangcheck_action),
- ring->hangcheck_score);
+ hangcheck_action_to_str(ee->hangcheck_action),
+ ee->hangcheck_score);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -328,11 +321,22 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
}
}
+static void err_print_capabilities(struct drm_i915_error_state_buf *m,
+ const struct intel_device_info *info)
+{
+#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt;
@@ -347,27 +351,28 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_print_capabilities(m, &error->device_info);
max_hangcheck_score = 0;
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- if (error->ring[i].hangcheck_score > max_hangcheck_score)
- max_hangcheck_score = error->ring[i].hangcheck_score;
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ if (error->engine[i].hangcheck_score > max_hangcheck_score)
+ max_hangcheck_score = error->engine[i].hangcheck_score;
}
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- if (error->ring[i].hangcheck_score == max_hangcheck_score &&
- error->ring[i].pid != -1) {
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+ error->engine[i].pid != -1) {
err_printf(m, "Active process (on ring %s): %s [%d]\n",
- ring_str(i),
- error->ring[i].comm,
- error->ring[i].pid);
+ engine_str(i),
+ error->engine[i].comm,
+ error->engine[i].pid);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
- err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
- err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+ err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+ err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
- dev->pdev->subsystem_vendor,
- dev->pdev->subsystem_device);
+ pdev->subsystem_vendor,
+ pdev->subsystem_device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev)) {
@@ -414,36 +419,55 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->ring); i++)
- i915_ring_error_state(m, dev, error, i);
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ if (error->engine[i].engine_id != -1)
+ error_print_engine(m, &error->engine[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
+ char buf[128];
+ int len, first = 1;
+
+ if (!error->active_vm[i])
+ break;
- for (i = 0; i < error->vm_count; i++) {
- err_printf(m, "vm[%d]\n", i);
+ len = scnprintf(buf, sizeof(buf), "Active (");
+ for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
+ if (error->engine[j].vm != error->active_vm[i])
+ continue;
- print_error_buffers(m, "Active",
+ len += scnprintf(buf + len, sizeof(buf), "%s%s",
+ first ? "" : ", ",
+ dev_priv->engine[j].name);
+ first = 0;
+ }
+ scnprintf(buf + len, sizeof(buf), ")");
+ print_error_buffers(m, buf,
error->active_bo[i],
error->active_bo_count[i]);
-
- print_error_buffers(m, "Pinned",
- error->pinned_bo[i],
- error->pinned_bo_count[i]);
}
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- obj = error->ring[i].batchbuffer;
+ print_error_buffers(m, "Pinned (global)",
+ error->pinned_bo,
+ error->pinned_bo_count);
+
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ struct drm_i915_error_engine *ee = &error->engine[i];
+
+ obj = ee->batchbuffer;
if (obj) {
err_puts(m, dev_priv->engine[i].name);
- if (error->ring[i].pid != -1)
+ if (ee->pid != -1)
err_printf(m, " (submitted by %s [%d])",
- error->ring[i].comm,
- error->ring[i].pid);
+ ee->comm,
+ ee->pid);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
- obj = error->ring[i].wa_batchbuffer;
+ obj = ee->wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
dev_priv->engine[i].name,
@@ -451,38 +475,43 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, obj);
}
- if (error->ring[i].num_requests) {
+ if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
dev_priv->engine[i].name,
- error->ring[i].num_requests);
- for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
- error->ring[i].requests[j].seqno,
- error->ring[i].requests[j].jiffies,
- error->ring[i].requests[j].tail);
+ ee->num_requests);
+ for (j = 0; j < ee->num_requests; j++) {
+ err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
+ ee->requests[j].pid,
+ ee->requests[j].seqno,
+ ee->requests[j].jiffies,
+ ee->requests[j].head,
+ ee->requests[j].tail);
}
}
- if (error->ring[i].num_waiters) {
+ if (IS_ERR(ee->waiters)) {
+ err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
+ dev_priv->engine[i].name);
+ } else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
dev_priv->engine[i].name,
- error->ring[i].num_waiters);
- for (j = 0; j < error->ring[i].num_waiters; j++) {
+ ee->num_waiters);
+ for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
- error->ring[i].waiters[j].seqno,
- error->ring[i].waiters[j].comm,
- error->ring[i].waiters[j].pid);
+ ee->waiters[j].seqno,
+ ee->waiters[j].comm,
+ ee->waiters[j].pid);
}
}
- if ((obj = error->ring[i].ringbuffer)) {
+ if ((obj = ee->ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
- if ((obj = error->ring[i].hws_page)) {
+ if ((obj = ee->hws_page)) {
u64 hws_offset = obj->gtt_offset;
u32 *hws_page = &obj->pages[0][0];
@@ -504,7 +533,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- obj = error->ring[i].wa_ctx;
+ obj = ee->wa_ctx;
if (obj) {
u64 wa_ctx_offset = obj->gtt_offset;
u32 *wa_ctx_page = &obj->pages[0][0];
@@ -526,7 +555,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = error->ring[i].ctx)) {
+ if ((obj = ee->ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
@@ -534,7 +563,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- if ((obj = error->semaphore_obj)) {
+ if ((obj = error->semaphore)) {
err_printf(m, "Semaphore page = 0x%08x\n",
lower_32_bits(obj->gtt_offset));
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
@@ -611,26 +640,27 @@ static void i915_error_state_free(struct kref *error_ref)
typeof(*error), ref);
int i;
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- i915_error_object_free(error->ring[i].batchbuffer);
- i915_error_object_free(error->ring[i].wa_batchbuffer);
- i915_error_object_free(error->ring[i].ringbuffer);
- i915_error_object_free(error->ring[i].hws_page);
- i915_error_object_free(error->ring[i].ctx);
- i915_error_object_free(error->ring[i].wa_ctx);
- kfree(error->ring[i].requests);
- kfree(error->ring[i].waiters);
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ struct drm_i915_error_engine *ee = &error->engine[i];
+
+ i915_error_object_free(ee->batchbuffer);
+ i915_error_object_free(ee->wa_batchbuffer);
+ i915_error_object_free(ee->ringbuffer);
+ i915_error_object_free(ee->hws_page);
+ i915_error_object_free(ee->ctx);
+ i915_error_object_free(ee->wa_ctx);
+
+ kfree(ee->requests);
+ if (!IS_ERR_OR_NULL(ee->waiters))
+ kfree(ee->waiters);
}
- i915_error_object_free(error->semaphore_obj);
+ i915_error_object_free(error->semaphore);
- for (i = 0; i < error->vm_count; i++)
+ for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
-
- kfree(error->active_bo);
- kfree(error->active_bo_count);
kfree(error->pinned_bo);
- kfree(error->pinned_bo_count);
+
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -638,46 +668,45 @@ static void i915_error_state_free(struct kref *error_ref)
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src,
- struct i915_address_space *vm)
+ struct i915_vma *vma)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_gem_object *src;
struct drm_i915_error_object *dst;
- struct i915_vma *vma = NULL;
int num_pages;
bool use_ggtt;
int i = 0;
u64 reloc_offset;
- if (src == NULL || src->pages == NULL)
+ if (!vma)
+ return NULL;
+
+ src = vma->obj;
+ if (!src->pages)
return NULL;
num_pages = src->base.size >> PAGE_SHIFT;
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
- if (dst == NULL)
+ if (!dst)
return NULL;
- if (i915_gem_obj_bound(src, vm))
- dst->gtt_offset = i915_gem_obj_offset(src, vm);
- else
- dst->gtt_offset = -1;
+ dst->gtt_offset = vma->node.start;
+ dst->gtt_size = vma->node.size;
reloc_offset = dst->gtt_offset;
- if (i915_is_ggtt(vm))
- vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
- vma && (vma->bound & GLOBAL_BIND) &&
+ (vma->flags & I915_VMA_GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
- if (!(vma && vma->bound & GLOBAL_BIND))
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND))
goto unwind;
- reloc_offset = i915_gem_obj_ggtt_offset(src);
+ reloc_offset = vma->node.start;
if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
}
@@ -705,7 +734,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(ggtt->mappable,
+ s = io_mapping_map_atomic_wc(&ggtt->mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
@@ -737,8 +766,24 @@ unwind:
kfree(dst);
return NULL;
}
-#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
+
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+ return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+ struct intel_engine_cs *engine;
+
+ engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+ return engine ? engine->id : -1;
+}
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
@@ -748,32 +793,34 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
+
for (i = 0; i < I915_NUM_ENGINES; i++)
- err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
- err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+ err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+ err->wseqno = __active_get_seqno(&obj->last_write);
+ err->engine = __active_get_engine_id(&obj->last_write);
+
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (i915_gem_obj_is_pinned(obj))
- err->pinned = 1;
- err->tiling = obj->tiling_mode;
+ err->fence_reg = vma->fence ? vma->fence->id : -1;
+ err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->last_write_req ?
- i915_gem_request_get_engine(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level;
}
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
+static u32 capture_error_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head,
+ bool pinned_only)
{
struct i915_vma *vma;
int i = 0;
list_for_each_entry(vma, head, vm_link) {
+ if (pinned_only && !i915_vma_is_pinned(vma))
+ continue;
+
capture_bo(err++, vma);
if (++i == count)
break;
@@ -782,28 +829,6 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
return i;
}
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head,
- struct i915_address_space *vm)
-{
- struct drm_i915_gem_object *obj;
- struct drm_i915_error_buffer * const first = err;
- struct drm_i915_error_buffer * const last = err + count;
-
- list_for_each_entry(obj, head, global_list) {
- struct i915_vma *vma;
-
- if (err == last)
- break;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == vm && vma->pin_count > 0)
- capture_bo(err++, vma);
- }
-
- return err - first;
-}
-
/* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
* grossly estimating a GPU error state.
@@ -815,7 +840,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
- int *ring_id)
+ int *engine_id)
{
uint32_t error_code = 0;
int i;
@@ -826,11 +851,11 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* strictly a client bug. Use instdone to differentiate those some.
*/
for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
- if (ring_id)
- *ring_id = i;
+ if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (engine_id)
+ *engine_id = i;
- return error->ring[i].ipehr ^ error->ring[i].instdone;
+ return error->engine[i].ipehr ^ error->engine[i].instdone;
}
}
@@ -855,22 +880,17 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
}
-static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error,
+static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
- struct drm_i915_error_ring *ering)
+ struct drm_i915_error_engine *ee)
{
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!i915_semaphore_is_enabled(dev_priv))
+ if (!error->semaphore)
return;
- if (!error->semaphore_obj)
- error->semaphore_obj =
- i915_error_ggtt_object_create(dev_priv,
- dev_priv->semaphore_obj);
-
for_each_engine_id(to, dev_priv, id) {
int idx;
u16 signal_offset;
@@ -879,44 +899,52 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
if (engine == to)
continue;
- signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
- / 4;
- tmp = error->semaphore_obj->pages[0];
- idx = intel_ring_sync_index(engine, to);
+ signal_offset =
+ (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
+ tmp = error->semaphore->pages[0];
+ idx = intel_engine_sync_index(engine, to);
- ering->semaphore_mboxes[idx] = tmp[signal_offset];
- ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
+ ee->semaphore_mboxes[idx] = tmp[signal_offset];
+ ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
-static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine,
- struct drm_i915_error_ring *ering)
+static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
{
- ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
- ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+ ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+ ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+ ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv)) {
- ering->semaphore_mboxes[2] =
+ ee->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base));
- ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
+ ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
}
}
-static void engine_record_waiters(struct intel_engine_cs *engine,
- struct drm_i915_error_ring *ering)
+static void error_record_engine_waiters(struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_error_waiter *waiter;
struct rb_node *rb;
int count;
- ering->num_waiters = 0;
- ering->waiters = NULL;
+ ee->num_waiters = 0;
+ ee->waiters = NULL;
+
+ if (RB_EMPTY_ROOT(&b->waiters))
+ return;
+
+ if (!spin_trylock(&b->lock)) {
+ ee->waiters = ERR_PTR(-EDEADLK);
+ return;
+ }
- spin_lock(&b->lock);
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
@@ -930,9 +958,13 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- ering->waiters = waiter;
+ if (!spin_trylock(&b->lock)) {
+ kfree(waiter);
+ ee->waiters = ERR_PTR(-EDEADLK);
+ return;
+ }
- spin_lock(&b->lock);
+ ee->waiters = waiter;
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = container_of(rb, typeof(*w), node);
@@ -941,57 +973,59 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
waiter->seqno = w->seqno;
waiter++;
- if (++ering->num_waiters == count)
+ if (++ee->num_waiters == count)
break;
}
spin_unlock(&b->lock);
}
-static void i915_record_ring_state(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error,
- struct intel_engine_cs *engine,
- struct drm_i915_error_ring *ering)
+static void error_record_engine_registers(struct drm_i915_error_state *error,
+ struct intel_engine_cs *engine,
+ struct drm_i915_error_engine *ee)
{
+ struct drm_i915_private *dev_priv = engine->i915;
+
if (INTEL_GEN(dev_priv) >= 6) {
- ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
+ ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+ ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_GEN(dev_priv) >= 8)
- gen8_record_semaphore_state(dev_priv, error, engine,
- ering);
+ gen8_record_semaphore_state(error, engine, ee);
else
- gen6_record_semaphore_state(dev_priv, engine, ering);
+ gen6_record_semaphore_state(engine, ee);
}
if (INTEL_GEN(dev_priv) >= 4) {
- ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
- ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
- ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
- ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
- ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+ ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+ ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+ ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) {
- ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
- ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+ ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+ ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
- ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+ ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
} else {
- ering->faddr = I915_READ(DMA_FADD_I8XX);
- ering->ipeir = I915_READ(IPEIR);
- ering->ipehr = I915_READ(IPEHR);
- ering->instdone = I915_READ(GEN2_INSTDONE);
+ ee->faddr = I915_READ(DMA_FADD_I8XX);
+ ee->ipeir = I915_READ(IPEIR);
+ ee->ipehr = I915_READ(IPEHR);
+ ee->instdone = I915_READ(GEN2_INSTDONE);
}
- ering->waiting = intel_engine_has_waiter(engine);
- ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
- ering->acthd = intel_ring_get_active_head(engine);
- ering->seqno = intel_engine_get_seqno(engine);
- ering->last_seqno = engine->last_submitted_seqno;
- ering->start = I915_READ_START(engine);
- ering->head = I915_READ_HEAD(engine);
- ering->tail = I915_READ_TAIL(engine);
- ering->ctl = I915_READ_CTL(engine);
-
- if (I915_NEED_GFX_HWS(dev_priv)) {
+ ee->waiting = intel_engine_has_waiter(engine);
+ ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ee->acthd = intel_engine_get_active_head(engine);
+ ee->seqno = intel_engine_get_seqno(engine);
+ ee->last_seqno = engine->last_submitted_seqno;
+ ee->start = I915_READ_START(engine);
+ ee->head = I915_READ_HEAD(engine);
+ ee->tail = I915_READ_TAIL(engine);
+ ee->ctl = I915_READ_CTL(engine);
+ if (INTEL_GEN(dev_priv) > 2)
+ ee->mode = I915_READ_MODE(engine);
+
+ if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
if (IS_GEN7(dev_priv)) {
@@ -1017,107 +1051,150 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
mmio = RING_HWS_PGA(engine->mmio_base);
}
- ering->hws = I915_READ(mmio);
+ ee->hws = I915_READ(mmio);
}
- ering->hangcheck_score = engine->hangcheck.score;
- ering->hangcheck_action = engine->hangcheck.action;
+ ee->hangcheck_score = engine->hangcheck.score;
+ ee->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev_priv)) {
int i;
- ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
+ ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev_priv))
- ering->vm_info.pp_dir_base =
+ ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev_priv))
- ering->vm_info.pp_dir_base =
+ ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
- ering->vm_info.pdp[i] =
+ ee->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
- ering->vm_info.pdp[i] <<= 32;
- ering->vm_info.pdp[i] |=
+ ee->vm_info.pdp[i] <<= 32;
+ ee->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(engine, i));
}
}
}
-
-static void i915_gem_record_active_context(struct intel_engine_cs *engine,
- struct drm_i915_error_state *error,
- struct drm_i915_error_ring *ering)
+static void engine_record_requests(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *first,
+ struct drm_i915_error_engine *ee)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_request *request;
+ int count;
- /* Currently render ring is the only HW context user */
- if (engine->id != RCS || !error->ccid)
+ count = 0;
+ request = first;
+ list_for_each_entry_from(request, &engine->request_list, link)
+ count++;
+ if (!count)
return;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_ggtt_bound(obj))
- continue;
+ ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+ if (!ee->requests)
+ return;
- if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
- ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+ ee->num_requests = count;
+
+ count = 0;
+ request = first;
+ list_for_each_entry_from(request, &engine->request_list, link) {
+ struct drm_i915_error_request *erq;
+
+ if (count >= ee->num_requests) {
+ /*
+ * If the ring request list was changed in
+ * between the point where the error request
+ * list was created and dimensioned and this
+ * point then just exit early to avoid crashes.
+ *
+ * We don't need to communicate that the
+ * request list changed state during error
+ * state capture and that the error state is
+ * slightly incorrect as a consequence since we
+ * are typically only interested in the request
+ * list state at the point of error state
+ * capture, not in any changes happening during
+ * the capture.
+ */
break;
}
+
+ erq = &ee->requests[count++];
+ erq->seqno = request->fence.seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->head = request->head;
+ erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
}
+ ee->num_requests = count;
}
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct drm_i915_gem_request *request;
- int i, count;
+ int i;
+
+ error->semaphore =
+ i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
+ struct drm_i915_error_engine *ee = &error->engine[i];
+ struct drm_i915_gem_request *request;
- error->ring[i].pid = -1;
+ ee->pid = -1;
+ ee->engine_id = -1;
if (!intel_engine_initialized(engine))
continue;
- error->ring[i].valid = true;
+ ee->engine_id = i;
- i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
- engine_record_waiters(engine, &error->ring[i]);
+ error_record_engine_registers(error, engine, ee);
+ error_record_engine_waiters(engine, ee);
request = i915_gem_find_active_request(engine);
if (request) {
- struct i915_address_space *vm;
- struct intel_ringbuffer *rb;
+ struct intel_ring *ring;
+ struct pid *pid;
- vm = request->ctx->ppgtt ?
+ ee->vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
*/
- error->ring[i].batchbuffer =
+ ee->batchbuffer =
i915_error_object_create(dev_priv,
- request->batch_obj,
- vm);
+ request->batch);
if (HAS_BROKEN_CS_TLB(dev_priv))
- error->ring[i].wa_batchbuffer =
- i915_error_ggtt_object_create(dev_priv,
- engine->scratch.obj);
+ ee->wa_batchbuffer =
+ i915_error_object_create(dev_priv,
+ engine->scratch);
+
+ ee->ctx =
+ i915_error_object_create(dev_priv,
+ request->ctx->engine[i].state);
- if (request->pid) {
+ pid = request->ctx->pid;
+ if (pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->pid, PIDTYPE_PID);
+ task = pid_task(pid, PIDTYPE_PID);
if (task) {
- strcpy(error->ring[i].comm, task->comm);
- error->ring[i].pid = task->pid;
+ strcpy(ee->comm, task->comm);
+ ee->pid = task->pid;
}
rcu_read_unlock();
}
@@ -1125,153 +1202,106 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
error->simulated |=
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
- rb = request->ringbuf;
- error->ring[i].cpu_ring_head = rb->head;
- error->ring[i].cpu_ring_tail = rb->tail;
- error->ring[i].ringbuffer =
- i915_error_ggtt_object_create(dev_priv,
- rb->obj);
- }
-
- error->ring[i].hws_page =
- i915_error_ggtt_object_create(dev_priv,
- engine->status_page.obj);
+ ring = request->ring;
+ ee->cpu_ring_head = ring->head;
+ ee->cpu_ring_tail = ring->tail;
+ ee->ringbuffer =
+ i915_error_object_create(dev_priv, ring->vma);
- if (engine->wa_ctx.obj) {
- error->ring[i].wa_ctx =
- i915_error_ggtt_object_create(dev_priv,
- engine->wa_ctx.obj);
+ engine_record_requests(engine, request, ee);
}
- i915_gem_record_active_context(engine, error, &error->ring[i]);
-
- count = 0;
- list_for_each_entry(request, &engine->request_list, list)
- count++;
-
- error->ring[i].num_requests = count;
- error->ring[i].requests =
- kcalloc(count, sizeof(*error->ring[i].requests),
- GFP_ATOMIC);
- if (error->ring[i].requests == NULL) {
- error->ring[i].num_requests = 0;
- continue;
- }
-
- count = 0;
- list_for_each_entry(request, &engine->request_list, list) {
- struct drm_i915_error_request *erq;
-
- if (count >= error->ring[i].num_requests) {
- /*
- * If the ring request list was changed in
- * between the point where the error request
- * list was created and dimensioned and this
- * point then just exit early to avoid crashes.
- *
- * We don't need to communicate that the
- * request list changed state during error
- * state capture and that the error state is
- * slightly incorrect as a consequence since we
- * are typically only interested in the request
- * list state at the point of error state
- * capture, not in any changes happening during
- * the capture.
- */
- break;
- }
+ ee->hws_page =
+ i915_error_object_create(dev_priv,
+ engine->status_page.vma);
- erq = &error->ring[i].requests[count++];
- erq->seqno = request->seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->tail = request->postfix;
- }
+ ee->wa_ctx =
+ i915_error_object_create(dev_priv, engine->wa_ctx.vma);
}
}
-/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
- * VM.
- */
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct i915_address_space *vm,
- const int ndx)
+ int idx)
{
- struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_error_buffer *active_bo;
struct i915_vma *vma;
- int i;
+ int count;
- i = 0;
+ count = 0;
list_for_each_entry(vma, &vm->active_list, vm_link)
- i++;
- error->active_bo_count[ndx] = i;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == vm && vma->pin_count > 0)
- i++;
- }
- error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
-
- if (i) {
- active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
- if (active_bo)
- pinned_bo = active_bo + error->active_bo_count[ndx];
- }
+ count++;
+ active_bo = NULL;
+ if (count)
+ active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
if (active_bo)
- error->active_bo_count[ndx] =
- capture_active_bo(active_bo,
- error->active_bo_count[ndx],
- &vm->active_list);
-
- if (pinned_bo)
- error->pinned_bo_count[ndx] =
- capture_pinned_bo(pinned_bo,
- error->pinned_bo_count[ndx],
- &dev_priv->mm.bound_list, vm);
- error->active_bo[ndx] = active_bo;
- error->pinned_bo[ndx] = pinned_bo;
+ count = capture_error_bo(active_bo, count, &vm->active_list, false);
+ else
+ count = 0;
+
+ error->active_vm[idx] = vm;
+ error->active_bo[idx] = active_bo;
+ error->active_bo_count[idx] = count;
}
-static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
{
- struct i915_address_space *vm;
- int cnt = 0, i = 0;
-
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- cnt++;
-
- error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
- error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
- error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
- GFP_ATOMIC);
- error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
- GFP_ATOMIC);
-
- if (error->active_bo == NULL ||
- error->pinned_bo == NULL ||
- error->active_bo_count == NULL ||
- error->pinned_bo_count == NULL) {
- kfree(error->active_bo);
- kfree(error->active_bo_count);
- kfree(error->pinned_bo);
- kfree(error->pinned_bo_count);
-
- error->active_bo = NULL;
- error->active_bo_count = NULL;
- error->pinned_bo = NULL;
- error->pinned_bo_count = NULL;
- } else {
- list_for_each_entry(vm, &dev_priv->vm_list, global_link)
- i915_gem_capture_vm(dev_priv, error, vm, i++);
+ int cnt = 0, i, j;
- error->vm_count = cnt;
+ BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
+ BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
+ BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
+
+ /* Scan each engine looking for unique active contexts/vm */
+ for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+ struct drm_i915_error_engine *ee = &error->engine[i];
+ bool found;
+
+ if (!ee->vm)
+ continue;
+
+ found = false;
+ for (j = 0; j < i && !found; j++)
+ found = error->engine[j].vm == ee->vm;
+ if (!found)
+ i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
}
}
+static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
+ struct drm_i915_error_buffer *bo;
+ struct i915_vma *vma;
+ int count_inactive, count_active;
+
+ count_inactive = 0;
+ list_for_each_entry(vma, &vm->active_list, vm_link)
+ count_inactive++;
+
+ count_active = 0;
+ list_for_each_entry(vma, &vm->inactive_list, vm_link)
+ count_active++;
+
+ bo = NULL;
+ if (count_inactive + count_active)
+ bo = kcalloc(count_inactive + count_active,
+ sizeof(*bo), GFP_ATOMIC);
+ if (!bo)
+ return;
+
+ count_inactive = capture_error_bo(bo, count_inactive,
+ &vm->active_list, true);
+ count_active = capture_error_bo(bo + count_inactive, count_active,
+ &vm->inactive_list, true);
+ error->pinned_bo_count = count_inactive + count_active;
+ error->pinned_bo = bo;
+}
+
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
@@ -1352,20 +1382,20 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
const char *error_msg)
{
u32 ecode;
- int ring_id = -1, len;
+ int engine_id = -1, len;
- ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+ ecode = i915_error_generate_code(dev_priv, error, &engine_id);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x",
- INTEL_GEN(dev_priv), ring_id, ecode);
+ INTEL_GEN(dev_priv), engine_id, ecode);
- if (ring_id != -1 && error->ring[ring_id].pid != -1)
+ if (engine_id != -1 && error->engine[engine_id].pid != -1)
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->ring[ring_id].comm,
- error->ring[ring_id].pid);
+ error->engine[engine_id].comm,
+ error->engine[engine_id].pid);
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
@@ -1382,6 +1412,10 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
#endif
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
+
+ memcpy(&error->device_info,
+ INTEL_INFO(dev_priv),
+ sizeof(error->device_info));
}
/**
@@ -1415,9 +1449,10 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
- i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev_priv, error);
i915_gem_record_rings(dev_priv, error);
+ i915_capture_active_buffers(dev_priv, error);
+ i915_capture_pinned_buffers(dev_priv, error);
do_gettimeofday(&error->time);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index cf5a65be4fe0..a47e1e4aec03 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -103,9 +103,6 @@
#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
#define HOST2GUC_TRIGGER (1<<0)
-#define DRBMISC1 0x1984
-#define DOORBELL_ENABLE (1<<0)
-
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 2112e029db6a..3106dcc06fe9 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -59,7 +59,7 @@
* WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
* represents in-order queue. The kernel driver packs ring tail pointer and an
* ELSP context descriptor dword into Work Item.
- * See guc_add_workqueue_item()
+ * See guc_wq_item_append()
*
*/
@@ -114,10 +114,8 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
if (ret != -ETIMEDOUT)
ret = -EIO;
- DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
- "status=0x%08X response=0x%08X\n",
- data[0], ret, status,
- I915_READ(SOFT_SCRATCH(15)));
+ DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
+ data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
dev_priv->guc.action_fail += 1;
dev_priv->guc.action_err = ret;
@@ -183,7 +181,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
struct i915_guc_client *client,
u16 new_id)
{
- struct sg_table *sg = guc->ctx_pool_obj->pages;
+ struct sg_table *sg = guc->ctx_pool_vma->pages;
void *doorbell_bitmap = guc->doorbell_bitmap;
struct guc_doorbell_info *doorbell;
struct guc_context_desc desc;
@@ -290,7 +288,7 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
-static void guc_init_proc_desc(struct intel_guc *guc,
+static void guc_proc_desc_init(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
@@ -322,15 +320,15 @@ static void guc_init_proc_desc(struct intel_guc *guc,
* write queue, etc).
*/
-static void guc_init_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_init(struct intel_guc *guc,
struct i915_guc_client *client)
{
- struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
+ unsigned int tmp;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -340,10 +338,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_engine(engine, dev_priv) {
+ for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
- struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
- struct drm_i915_gem_object *obj;
+ uint32_t guc_engine_id = engine->guc_id;
+ struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -358,30 +356,29 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
- lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
+ lrc->ring_lcra =
+ i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
-
- obj = ce->ringbuf->obj;
- gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
- lrc->ring_begin = gfx_addr;
- lrc->ring_end = gfx_addr + obj->base.size - 1;
- lrc->ring_next_free_location = gfx_addr;
+ lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+ lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
+ lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << engine->guc_id);
+ desc.engines_used |= (1 << guc_engine_id);
}
+ DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
+ client->engines, desc.engines_used);
WARN_ON(desc.engines_used == 0);
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
- gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
- desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+ gfx_addr = i915_ggtt_offset(client->vma);
+ desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu = (uintptr_t)client->client_base +
client->doorbell_offset;
@@ -397,12 +394,12 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.desc_private = (uintptr_t)client;
/* Pool context is pinned already */
- sg = guc->ctx_pool_obj->pages;
+ sg = guc->ctx_pool_vma->pages;
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
sizeof(desc) * client->ctx_index);
}
-static void guc_fini_ctx_desc(struct intel_guc *guc,
+static void guc_ctx_desc_fini(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_context_desc desc;
@@ -410,13 +407,13 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
memset(&desc, 0, sizeof(desc));
- sg = guc->ctx_pool_obj->pages;
+ sg = guc->ctx_pool_vma->pages;
sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
sizeof(desc) * client->ctx_index);
}
/**
- * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
* @request: request associated with the commands
*
* Return: 0 if space is available
@@ -424,39 +421,56 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
*
* This function must be called (and must return 0) before a request
* is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it remains valid until (but only until)
- * the next call to submit().
+ * of 0 has been returned, it must be balanced by a corresponding
+ * call to submit().
*
- * This precheck allows the caller to determine in advance that space
+ * Reservation allows the caller to determine in advance that space
* will be available for the next submission before committing resources
* to it, and helps avoid late failures with complicated recovery paths.
*/
-int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
+int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = gc->client_base + gc->proc_desc_offset;
u32 freespace;
+ int ret;
- GEM_BUG_ON(gc == NULL);
+ spin_lock(&gc->wq_lock);
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ freespace -= gc->wq_rsvd;
+ if (likely(freespace >= wqi_size)) {
+ gc->wq_rsvd += wqi_size;
+ ret = 0;
+ } else {
+ gc->no_wq_space++;
+ ret = -EAGAIN;
+ }
+ spin_unlock(&gc->wq_lock);
- desc = gc->client_base + gc->proc_desc_offset;
+ return ret;
+}
- freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- if (likely(freespace >= wqi_size))
- return 0;
+void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
+{
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- gc->no_wq_space += 1;
+ GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
- return -EAGAIN;
+ spin_lock(&gc->wq_lock);
+ gc->wq_rsvd -= wqi_size;
+ spin_unlock(&gc->wq_lock);
}
-static void guc_add_workqueue_item(struct i915_guc_client *gc,
- struct drm_i915_gem_request *rq)
+/* Construct a Work Item and append it to the GuC's Work Queue */
+static void guc_wq_item_append(struct i915_guc_client *gc,
+ struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size/sizeof(u32) - 1;
+ struct intel_engine_cs *engine = rq->engine;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
@@ -464,7 +478,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
desc = gc->client_base + gc->proc_desc_offset;
- /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+ /* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
GEM_BUG_ON(freespace < wqi_size);
@@ -482,31 +496,32 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
+ GEM_BUG_ON(gc->wq_rsvd < wqi_size);
/* postincrement WQ tail for next time */
wq_off = gc->wq_tail;
+ GEM_BUG_ON(wq_off & (wqi_size - 1));
gc->wq_tail += wqi_size;
gc->wq_tail &= gc->wq_size - 1;
- GEM_BUG_ON(wq_off & (wqi_size - 1));
+ gc->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
wq_off &= PAGE_SIZE - 1;
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
+ base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page));
wqi = (struct guc_wq_item *)((char *)base + wq_off);
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
(wqi_len << WQ_LEN_SHIFT) |
- (rq->engine->guc_id << WQ_TARGET_SHIFT) |
+ (engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
- wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
- rq->engine);
+ wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = rq->seqno;
+ wqi->fence_id = rq->fence.seqno;
kunmap_atomic(base);
}
@@ -553,8 +568,8 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
if (db_ret.db_status == GUC_DOORBELL_DISABLED)
break;
- DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
- db_cmp.cookie, db_ret.cookie);
+ DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
+ db_cmp.cookie, db_ret.cookie);
/* update the cookie to newly read cookie from GuC */
db_cmp.cookie = db_ret.cookie;
@@ -573,26 +588,26 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
* Return: 0 on success, otherwise an errno.
* (Note: nonzero really shouldn't happen!)
*
- * The caller must have already called i915_guc_wq_check_space() above
- * with a result of 0 (success) since the last request submission. This
- * guarantees that there is space in the work queue for the new request,
- * so enqueuing the item cannot fail.
+ * The caller must have already called i915_guc_wq_reserve() above with
+ * a result of 0 (success), guaranteeing that there is space in the work
+ * queue for the new request, so enqueuing the item cannot fail.
*
* Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when check() says there's no space, or calls submit() multiple
- * times with no intervening check().
+ * submit() when _reserve() says there's no space, or calls _submit()
+ * a different number of times from (successful) calls to _reserve().
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
unsigned int engine_id = rq->engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
- guc_add_workqueue_item(client, rq);
+ spin_lock(&client->wq_lock);
+ guc_wq_item_append(client, rq);
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -601,9 +616,8 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1;
guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->seqno;
-
- return b_ret;
+ guc->last_seqno[engine_id] = rq->fence.seqno;
+ spin_unlock(&client->wq_lock);
}
/*
@@ -613,55 +627,48 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
*/
/**
- * gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev_priv: driver private data structure
- * @size: size of object
+ * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
*
- * This is a wrapper to create a gem obj. In order to use it inside GuC, the
- * object needs to be pinned lifetime. Also we must pin it to gtt space other
- * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
*
- * Return: A drm_i915_gem_object if successful, otherwise NULL.
+ * Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
-static struct drm_i915_gem_object *
-gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
+static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
obj = i915_gem_object_create(&dev_priv->drm, size);
if (IS_ERR(obj))
- return NULL;
+ return ERR_CAST(obj);
- if (i915_gem_object_get_pages(obj)) {
- drm_gem_object_unreference(&obj->base);
- return NULL;
- }
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err;
- if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
- drm_gem_object_unreference(&obj->base);
- return NULL;
+ ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+ PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
}
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- return obj;
-}
+ return vma;
-/**
- * gem_release_guc_obj() - Release gem object allocated for GuC usage
- * @obj: gem obj to be released
- */
-static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
-{
- if (!obj)
- return;
-
- if (i915_gem_obj_is_pinned(obj))
- i915_gem_object_ggtt_unpin(obj);
-
- drm_gem_object_unreference(&obj->base);
+err:
+ i915_gem_object_put(obj);
+ return vma;
}
static void
@@ -688,61 +695,74 @@ guc_client_free(struct drm_i915_private *dev_priv,
kunmap(kmap_to_page(client->client_base));
}
- gem_release_guc_obj(client->client_obj);
+ i915_vma_unpin_and_release(&client->vma);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
- guc_fini_ctx_desc(guc, client);
+ guc_ctx_desc_fini(guc, client);
ida_simple_remove(&guc->ctx_ids, client->ctx_index);
}
kfree(client);
}
+/* Check that a doorbell register is in the expected state */
+static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ i915_reg_t drbreg = GEN8_DRBREGL(db_id);
+ uint32_t value = I915_READ(drbreg);
+ bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
+ bool expected = test_bit(db_id, guc->doorbell_bitmap);
+
+ if (enabled == expected)
+ return true;
+
+ DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
+ db_id, drbreg.reg, value,
+ expected ? "active" : "inactive");
+
+ return false;
+}
+
/*
- * Borrow the first client to set up & tear down every doorbell
+ * Borrow the first client to set up & tear down each unused doorbell
* in turn, to ensure that all doorbell h/w is (re)initialised.
*/
static void guc_init_doorbell_hw(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_guc_client *client = guc->execbuf_client;
- uint16_t db_id, i;
- int err;
+ uint16_t db_id;
+ int i, err;
+ /* Save client's original doorbell selection */
db_id = client->doorbell_id;
for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
- i915_reg_t drbreg = GEN8_DRBREGL(i);
- u32 value = I915_READ(drbreg);
+ /* Skip if doorbell is OK */
+ if (guc_doorbell_check(guc, i))
+ continue;
err = guc_update_doorbell_id(guc, client, i);
-
- /* Report update failure or unexpectedly active doorbell */
- if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
- DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
- i, drbreg.reg, value, err);
+ if (err)
+ DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
+ i, err);
}
/* Restore to original value */
err = guc_update_doorbell_id(guc, client, db_id);
if (err)
- DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
- db_id, err);
+ DRM_WARN("Failed to restore doorbell to %d, err %d\n",
+ db_id, err);
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
- i915_reg_t drbreg = GEN8_DRBREGL(i);
- u32 value = I915_READ(drbreg);
-
- if (i != db_id && (value & GUC_DOORBELL_ENABLED))
- DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
- i, drbreg.reg, value);
-
- }
+ /* Read back & verify all doorbell registers */
+ for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
+ (void)guc_doorbell_check(guc, i);
}
/**
* guc_client_alloc() - Allocate an i915_guc_client
* @dev_priv: driver private data structure
+ * @engines: The set of engines to enable for this client
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -754,22 +774,24 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
*/
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
+ uint32_t engines,
uint32_t priority,
struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
struct intel_guc *guc = &dev_priv->guc;
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
- client->doorbell_id = GUC_INVALID_DOORBELL_ID;
- client->priority = priority;
client->owner = ctx;
client->guc = guc;
+ client->engines = engines;
+ client->priority = priority;
+ client->doorbell_id = GUC_INVALID_DOORBELL_ID;
client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
@@ -779,13 +801,15 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (!obj)
+ vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+ if (IS_ERR(vma))
goto err;
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
- client->client_obj = obj;
- client->client_base = kmap(i915_gem_object_get_page(obj, 0));
+ client->vma = vma;
+ client->client_base = kmap(i915_vma_first_page(vma));
+
+ spin_lock_init(&client->wq_lock);
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
@@ -806,29 +830,26 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- guc_init_proc_desc(guc, client);
- guc_init_ctx_desc(guc, client);
+ guc_proc_desc_init(guc, client);
+ guc_ctx_desc_init(guc, client);
if (guc_init_doorbell(guc, client, db_id))
goto err;
- DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
- priority, client, client->ctx_index);
+ DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
+ priority, client, client->engines, client->ctx_index);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
client->doorbell_id, client->doorbell_offset);
return client;
err:
- DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
-
guc_client_free(dev_priv, client);
return NULL;
}
-static void guc_create_log(struct intel_guc *guc)
+static void guc_log_create(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
@@ -844,16 +865,16 @@ static void guc_create_log(struct intel_guc *guc)
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
- obj = guc->log_obj;
- if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv, size);
- if (!obj) {
+ vma = guc->log_vma;
+ if (!vma) {
+ vma = guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
/* logging will be off */
i915.guc_log_level = -1;
return;
}
- guc->log_obj = obj;
+ guc->log_vma = vma;
}
/* each allocated unit is a page */
@@ -862,11 +883,11 @@ static void guc_create_log(struct intel_guc *guc)
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
- offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
+ offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
}
-static void init_guc_policies(struct guc_policies *policies)
+static void guc_policies_init(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
@@ -888,10 +909,10 @@ static void init_guc_policies(struct guc_policies *policies)
policies->is_valid = 1;
}
-static void guc_create_ads(struct intel_guc *guc)
+static void guc_addon_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
@@ -904,16 +925,16 @@ static void guc_create_ads(struct intel_guc *guc)
sizeof(struct guc_mmio_reg_state) +
GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
- obj = guc->ads_obj;
- if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
- if (!obj)
+ vma = guc->ads_vma;
+ if (!vma) {
+ vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+ if (IS_ERR(vma))
return;
- guc->ads_obj = obj;
+ guc->ads_vma = vma;
}
- page = i915_gem_object_get_page(obj, 0);
+ page = i915_vma_first_page(vma);
ads = kmap(page);
/*
@@ -924,17 +945,17 @@ static void guc_create_ads(struct intel_guc *guc)
* to find it.
*/
engine = &dev_priv->engine[RCS];
- ads->golden_context_lrca = engine->status_page.gfx_addr;
+ ads->golden_context_lrca = engine->status_page.ggtt_offset;
for_each_engine(engine, dev_priv)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
- init_guc_policies(policies);
+ guc_policies_init(policies);
- ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
- sizeof(struct guc_ads);
+ ads->scheduler_policies =
+ i915_ggtt_offset(vma) + sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
@@ -966,6 +987,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
+ struct i915_vma *vma;
/* Wipe bitmap & delete client in case of reinitialisation */
bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
@@ -974,16 +996,17 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
if (!i915.enable_guc_submission)
return 0; /* not enabled */
- if (guc->ctx_pool_obj)
+ if (guc->ctx_pool_vma)
return 0; /* already allocated */
- guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
- if (!guc->ctx_pool_obj)
- return -ENOMEM;
+ vma = guc_allocate_vma(guc, gemsize);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
- guc_create_log(guc);
- guc_create_ads(guc);
+ guc_log_create(guc);
+ guc_addon_create(guc);
return 0;
}
@@ -992,13 +1015,16 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
GUC_CTX_PRIORITY_KMD_NORMAL,
dev_priv->kernel_context);
if (!client) {
- DRM_ERROR("Failed to create execbuf guc_client\n");
+ DRM_ERROR("Failed to create normal GuC client!\n");
return -ENOMEM;
}
@@ -1006,6 +1032,18 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
host2guc_sample_forcewake(guc, client);
guc_init_doorbell_hw(guc);
+ /* Take over from manual control of ELSP (execlists) */
+ for_each_engine(engine, dev_priv) {
+ engine->submit_request = i915_guc_submit;
+
+ /* Replay the current set of previously submitted requests */
+ list_for_each_entry(request, &engine->request_list, link) {
+ client->wq_rsvd += sizeof(struct guc_wq_item);
+ if (i915_sw_fence_done(&request->submit))
+ i915_guc_submit(request);
+ }
+ }
+
return 0;
}
@@ -1013,6 +1051,12 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ if (!guc->execbuf_client)
+ return;
+
+ /* Revert back to manual ELSP submission */
+ intel_execlists_enable_submission(dev_priv);
+
guc_client_free(dev_priv, guc->execbuf_client);
guc->execbuf_client = NULL;
}
@@ -1021,16 +1065,12 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- gem_release_guc_obj(dev_priv->guc.ads_obj);
- guc->ads_obj = NULL;
-
- gem_release_guc_obj(dev_priv->guc.log_obj);
- guc->log_obj = NULL;
+ i915_vma_unpin_and_release(&guc->ads_vma);
+ i915_vma_unpin_and_release(&guc->log_vma);
- if (guc->ctx_pool_obj)
+ if (guc->ctx_pool_vma)
ida_destroy(&guc->ctx_ids);
- gem_release_guc_obj(guc->ctx_pool_obj);
- guc->ctx_pool_obj = NULL;
+ i915_vma_unpin_and_release(&guc->ctx_pool_vma);
}
/**
@@ -1053,7 +1093,7 @@ int intel_guc_suspend(struct drm_device *dev)
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
- data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
@@ -1078,7 +1118,7 @@ int intel_guc_resume(struct drm_device *dev)
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
- data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
return host2guc_action(guc, data, ARRAY_SIZE(data));
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1c2aec392412..3fc286cd1157 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,6 +350,9 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
+ if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@@ -368,10 +371,13 @@ u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
+ if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+ return;
+
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -656,12 +662,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
* of horizontal active on the first line of vertical active
*/
-static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- /* Gen2 doesn't have a hardware frame counter */
- return 0;
-}
-
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
@@ -978,10 +978,8 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
smp_store_mb(engine->breadcrumbs.irq_posted, true);
- if (intel_engine_wakeup(engine)) {
+ if (intel_engine_wakeup(engine))
trace_i915_gem_request_notify(engine);
- engine->breadcrumbs.irq_wakeups++;
- }
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1105,9 +1103,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit;
-
- if (client_boost) {
- new_delay = dev_priv->rps.max_freq_softlimit;
+ if (client_boost || any_waiters(dev_priv))
+ max = dev_priv->rps.max_freq;
+ if (client_boost && new_delay < dev_priv->rps.boost_freq) {
+ new_delay = dev_priv->rps.boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1122,7 +1121,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.efficient_freq;
adj = 0;
}
- } else if (any_waiters(dev_priv)) {
+ } else if (client_boost || any_waiters(dev_priv)) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@@ -2504,57 +2503,52 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- int ret;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
/*
- * Note that there's only one work item which does gpu resets, so we
- * need not worry about concurrent gpu resets potentially incrementing
- * error->reset_counter twice. We only need to take care of another
- * racing irq/hangcheck declaring the gpu dead for a second time. A
- * quick check for that is good enough: schedule_work ensures the
- * correct ordering between hang detection and this work item, and since
- * the reset in-progress bit is only ever set by code outside of this
- * work we don't need to worry about any other races.
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugs, so get an RPM reference.
*/
- if (i915_reset_in_progress(&dev_priv->gpu_error)) {
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
- /*
- * In most cases it's guaranteed that we get here with an RPM
- * reference held, for example because there is a pending GPU
- * request that won't finish until the reset is done. This
- * isn't the case at least when we get here by doing a
- * simulated reset via debugs, so get an RPM reference.
- */
- intel_runtime_pm_get(dev_priv);
-
- intel_prepare_reset(dev_priv);
+ intel_runtime_pm_get(dev_priv);
+ intel_prepare_reset(dev_priv);
+ do {
/*
* All state reset _must_ be completed before we update the
* reset counter, for otherwise waiters might miss the reset
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
- ret = i915_reset(dev_priv);
+ if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ i915_reset(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
- intel_finish_reset(dev_priv);
+ /* We need to wait for anyone holding the lock to wakeup */
+ } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+ I915_RESET_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE,
+ HZ));
- intel_runtime_pm_put(dev_priv);
+ intel_finish_reset(dev_priv);
+ intel_runtime_pm_put(dev_priv);
- if (ret == 0)
- kobject_uevent_env(kobj,
- KOBJ_CHANGE, reset_done_event);
+ if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+ kobject_uevent_env(kobj,
+ KOBJ_CHANGE, reset_done_event);
- /*
- * Note: The wake_up also serves as a memory barrier so that
- * waiters see the update value of the reset counter atomic_t.
- */
- wake_up_all(&dev_priv->gpu_error.reset_queue);
- }
+ /*
+ * Note: The wake_up also serves as a memory barrier so that
+ * waiters see the updated value of the dev_priv->gpu_error.
+ */
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
@@ -2673,25 +2667,26 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_report_and_clear_eir(dev_priv);
- if (engine_mask) {
- atomic_or(I915_RESET_IN_PROGRESS_FLAG,
- &dev_priv->gpu_error.reset_counter);
+ if (!engine_mask)
+ return;
- /*
- * Wakeup waiting processes so that the reset function
- * i915_reset_and_wakeup doesn't deadlock trying to grab
- * various locks. By bumping the reset counter first, the woken
- * processes will see a reset in progress and back off,
- * releasing their locks and then wait for the reset completion.
- * We must do this for _all_ gpu waiters that might hold locks
- * that the reset work needs to acquire.
- *
- * Note: The wake_up serves as the required memory barrier to
- * ensure that the waiters see the updated value of the reset
- * counter atomic_t.
- */
- i915_error_wake_up(dev_priv);
- }
+ if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+ &dev_priv->gpu_error.flags))
+ return;
+
+ /*
+ * Wakeup waiting processes so that the reset function
+ * i915_reset_and_wakeup doesn't deadlock trying to grab
+ * various locks. By bumping the reset counter first, the woken
+ * processes will see a reset in progress and back off,
+ * releasing their locks and then wait for the reset completion.
+ * We must do this for _all_ gpu waiters that might hold locks
+ * that the reset work needs to acquire.
+ *
+ * Note: The wake_up also provides a memory barrier to ensure that the
+ * waiters see the updated value of the reset flags.
+ */
+ i915_error_wake_up(dev_priv);
i915_reset_and_wakeup(dev_priv);
}
@@ -2804,13 +2799,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
}
static bool
-ring_idle(struct intel_engine_cs *engine, u32 seqno)
-{
- return i915_seqno_passed(seqno,
- READ_ONCE(engine->last_submitted_seqno));
-}
-
-static bool
ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
if (INTEL_GEN(engine->i915) >= 8) {
@@ -2834,7 +2822,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
if (engine == signaller)
continue;
- if (offset == signaller->semaphore.signal_ggtt[engine->id])
+ if (offset == signaller->semaphore.signal_ggtt[engine->hw_id])
return signaller;
}
} else {
@@ -2844,21 +2832,22 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
if(engine == signaller)
continue;
- if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
return signaller;
}
}
- DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
- engine->id, ipehr, offset);
+ DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x, offset 0x%016llx\n",
+ engine->name, ipehr, offset);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
+ void __iomem *vaddr;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2897,6 +2886,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
+ vaddr = (void __iomem *)engine->buffer->vaddr;
for (i = backwards; i; --i) {
/*
@@ -2907,7 +2897,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
head &= engine->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(engine->buffer->virtual_start + head);
+ cmd = ioread32(vaddr + head);
if (cmd == ipehr)
break;
@@ -2917,11 +2907,11 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
if (!i)
return NULL;
- *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+ *seqno = ioread32(vaddr + head + 4) + 1;
if (INTEL_GEN(dev_priv) >= 8) {
- offset = ioread32(engine->buffer->virtual_start + head + 12);
+ offset = ioread32(vaddr + head + 12);
offset <<= 32;
- offset = ioread32(engine->buffer->virtual_start + head + 8);
+ offset |= ioread32(vaddr + head + 8);
}
return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
@@ -2938,6 +2928,9 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller == NULL)
return -1;
+ if (IS_ERR(signaller))
+ return 0;
+
/* Prevent pathological recursion due to driver bugs */
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
@@ -2990,7 +2983,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
return stuck;
}
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
if (acthd != engine->hangcheck.acthd) {
@@ -3008,11 +3001,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG;
}
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
struct drm_i915_private *dev_priv = engine->i915;
- enum intel_ring_hangcheck_action ha;
+ enum intel_engine_hangcheck_action ha;
u32 tmp;
ha = head_stuck(engine, acthd);
@@ -3054,22 +3047,6 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG;
}
-static unsigned long kick_waiters(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
- unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
-
- if (engine->hangcheck.user_interrupts == irq_count &&
- !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
- if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
- DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- engine->name);
-
- intel_engine_enable_fake_irq(engine);
- }
-
- return irq_count;
-}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3107,7 +3084,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
- unsigned user_interrupts;
+ u32 submit;
semaphore_clear_deadlocks(dev_priv);
@@ -3121,29 +3098,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
- acthd = intel_ring_get_active_head(engine);
+ acthd = intel_engine_get_active_head(engine);
seqno = intel_engine_get_seqno(engine);
-
- /* Reset stuck interrupts between batch advances */
- user_interrupts = 0;
+ submit = READ_ONCE(engine->last_submitted_seqno);
if (engine->hangcheck.seqno == seqno) {
- if (ring_idle(engine, seqno)) {
+ if (i915_seqno_passed(seqno, submit)) {
engine->hangcheck.action = HANGCHECK_IDLE;
- if (busy) {
- /* Safeguard against driver failure */
- user_interrupts = kick_waiters(engine);
- engine->hangcheck.score += BUSY;
- }
} else {
/* We always increment the hangcheck score
- * if the ring is busy and still processing
+ * if the engine is busy and still processing
* the same request, so that no single request
* can run indefinitely (such as a chain of
* batches). The only time we do not increment
* the hangcheck score on this ring, if this
- * ring is in a legitimate wait for another
- * ring. In that case the waiting ring is a
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
* victim and we want to be sure we catch the
* right culprit. Then every time we do kick
* the ring, add a small increment to the
@@ -3151,8 +3121,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
- engine->hangcheck.action = ring_stuck(engine,
- acthd);
+ engine->hangcheck.action =
+ engine_stuck(engine, acthd);
switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
@@ -3195,12 +3165,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd;
- engine->hangcheck.user_interrupts = user_interrupts;
busy_count += busy;
}
if (hung) {
char msg[80];
+ unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
@@ -3210,7 +3180,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
"%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung)
+ for_each_engine_masked(engine, dev_priv, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
@@ -4536,14 +4506,15 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_INFO(dev_priv)->gen >= 8)
- dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+ dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
if (IS_GEN2(dev_priv)) {
+ /* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
- dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+ dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
new file mode 100644
index 000000000000..49a079494b68
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <asm/fpu/api.h>
+
+#include "i915_drv.h"
+
+static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
+
+#ifdef CONFIG_AS_MOVNTDQA
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ len >>= 4;
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movaps %%xmm0, (%1)\n"
+ "movaps %%xmm1, 16(%1)\n"
+ "movaps %%xmm2, 32(%1)\n"
+ "movaps %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movaps %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+#endif
+
+/**
+ * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * i915_memcpy_from_wc copies @len bytes from @src to @dst using
+ * non-temporal instructions where available. Note that all arguments
+ * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
+ * of 16.
+ *
+ * To test whether accelerated reads from WC are supported, use
+ * i915_memcpy_from_wc(NULL, NULL, 0);
+ *
+ * Returns true if the copy was successful, false if the preconditions
+ * are not met.
+ */
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
+{
+ if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
+ return false;
+
+#ifdef CONFIG_AS_MOVNTDQA
+ if (static_branch_likely(&has_movntdqa)) {
+ if (likely(len))
+ __memcpy_ntdqa(dst, src, len);
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
+{
+ if (static_cpu_has(X86_FEATURE_XMM4_1))
+ static_branch_enable(&has_movntdqa);
+}
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
new file mode 100644
index 000000000000..e4935dd1fd37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/io-mapping.h>
+
+#include <asm/pgtable.h>
+
+#include "i915_drv.h"
+
+struct remap_pfn {
+ struct mm_struct *mm;
+ unsigned long pfn;
+ pgprot_t prot;
+};
+
+static int remap_pfn(pte_t *pte, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+ r->pfn++;
+
+ return 0;
+}
+
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, unsigned long size,
+ struct io_mapping *iomap)
+{
+ struct remap_pfn r;
+ int err;
+
+ GEM_BUG_ON((vma->vm_flags &
+ (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
+ (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+
+ /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+ r.mm = vma->vm_mm;
+ r.pfn = pfn;
+ r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+ err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b6e404c91eed..768ad89d9cd4 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -45,6 +45,7 @@ struct i915_params i915 __read_mostly = {
.fastboot = 0,
.prefault_disable = 0,
.load_detect_test = 0,
+ .force_reset_modeset_test = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -161,6 +162,11 @@ MODULE_PARM_DESC(load_detect_test,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
+module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
+MODULE_PARM_DESC(force_reset_modeset_test,
+ "Force a modeset during gpu reset for testing (default:false). "
+ "For developers only.");
+
module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 0ad020b4a925..3a0dd78ddb38 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -57,6 +57,7 @@ struct i915_params {
bool fastboot;
bool prefault_disable;
bool load_detect_test;
+ bool force_reset_modeset_test;
bool reset;
bool disable_display;
bool verbose_state_checks;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 949c01686a66..31e6edd08dd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,207 +54,216 @@
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+#define GEN2_FEATURES \
+ .gen = 2, .num_pipes = 1, \
+ .has_overlay = 1, .overlay_needs_physical = 1, \
+ .has_gmch_display = 1, \
+ .hws_needs_physical = 1, \
+ .ring_mask = RENDER_RING, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN2_FEATURES,
+ .is_mobile = 1, .cursor_needs_physical = 1,
+ .num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_845g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN2_FEATURES,
};
static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+ GEN2_FEATURES,
+ .is_i85x = 1, .is_mobile = 1,
+ .num_pipes = 2, /* legal, last one wins */
.cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
.has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN2_FEATURES,
};
+#define GEN3_FEATURES \
+ .gen = 3, .num_pipes = 2, \
+ .has_gmch_display = 1, \
+ .ring_mask = RENDER_RING, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ GEN3_FEATURES,
+ .is_i915g = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_mobile = 1, .num_pipes = 2,
+ GEN3_FEATURES,
+ .is_mobile = 1,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ GEN3_FEATURES,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+ GEN3_FEATURES,
+ .is_i945gm = 1, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
.has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
+#define GEN4_FEATURES \
+ .gen = 4, .num_pipes = 2, \
+ .has_hotplug = 1, \
+ .has_gmch_display = 1, \
+ .ring_mask = RENDER_RING, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .num_pipes = 2,
- .has_hotplug = 1,
+ GEN4_FEATURES,
+ .is_broadwater = 1,
.has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .num_pipes = 2,
- .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ GEN4_FEATURES,
+ .is_crestline = 1,
+ .is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ .hws_needs_physical = 1,
};
static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ GEN3_FEATURES,
+ .is_g33 = 1,
+ .has_hotplug = 1,
.has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
+ GEN4_FEATURES,
+ .is_g4x = 1,
+ .has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_g4x = 1, .num_pipes = 2,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
+ GEN4_FEATURES,
+ .is_g4x = 1,
+ .is_mobile = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ GEN3_FEATURES,
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
+ .has_hotplug = 1,
.has_overlay = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
};
+#define GEN5_FEATURES \
+ .gen = 5, .num_pipes = 2, \
+ .has_hotplug = 1, \
+ .has_gmbus_irq = 1, \
+ .ring_mask = RENDER_RING | BSD_RING, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN5_FEATURES,
};
static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN5_FEATURES,
+ .is_mobile = 1,
};
+#define GEN6_FEATURES \
+ .gen = 6, .num_pipes = 2, \
+ .has_hotplug = 1, \
+ .has_fbc = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1, \
+ .has_rc6 = 1, \
+ .has_rc6p = 1, \
+ .has_gmbus_irq = 1, \
+ .has_hw_contexts = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN6_FEATURES,
};
static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ GEN6_FEATURES,
+ .is_mobile = 1,
};
#define GEN7_FEATURES \
.gen = 7, .num_pipes = 3, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_llc = 1, \
+ .has_rc6 = 1, \
+ .has_rc6p = 1, \
+ .has_gmbus_irq = 1, \
+ .has_hw_contexts = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
+ .has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.is_mobile = 1,
+ .has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.is_ivybridge = 1,
.num_pipes = 0, /* legal, last one wins */
+ .has_l3_dpf = 1,
};
#define VLV_FEATURES \
.gen = 7, .num_pipes = 2, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_psr = 1, \
+ .has_runtime_pm = 1, \
+ .has_rc6 = 1, \
+ .has_gmbus_irq = 1, \
+ .has_hw_contexts = 1, \
+ .has_gmch_display = 1, \
+ .has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.display_mmio_offset = VLV_DISPLAY_BASE, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_valleyview_m_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
- .is_mobile = 1,
-};
-
-static const struct intel_device_info intel_valleyview_d_info = {
+static const struct intel_device_info intel_valleyview_info = {
VLV_FEATURES,
.is_valleyview = 1,
};
@@ -263,54 +272,50 @@ static const struct intel_device_info intel_valleyview_d_info = {
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
- .has_fpga_dbg = 1
-
-static const struct intel_device_info intel_haswell_d_info = {
- HSW_FEATURES,
- .is_haswell = 1,
-};
-
-static const struct intel_device_info intel_haswell_m_info = {
+ .has_fpga_dbg = 1, \
+ .has_psr = 1, \
+ .has_resource_streamer = 1, \
+ .has_dp_mst = 1, \
+ .has_rc6p = 0 /* RC6p removed-by HSW */, \
+ .has_runtime_pm = 1
+
+static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES,
.is_haswell = 1,
- .is_mobile = 1,
+ .has_l3_dpf = 1,
};
#define BDW_FEATURES \
HSW_FEATURES, \
- BDW_COLORS
+ BDW_COLORS, \
+ .has_logical_ring_contexts = 1
-static const struct intel_device_info intel_broadwell_d_info = {
+static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
.gen = 8,
.is_broadwell = 1,
};
-static const struct intel_device_info intel_broadwell_m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
- .is_broadwell = 1,
-};
-
-static const struct intel_device_info intel_broadwell_gt3d_info = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_FEATURES,
.gen = 8,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_broadwell_gt3m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
- .is_broadwell = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
-
static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.is_cherryview = 1,
+ .has_psr = 1,
+ .has_runtime_pm = 1,
+ .has_resource_streamer = 1,
+ .has_rc6 = 1,
+ .has_gmbus_irq = 1,
+ .has_hw_contexts = 1,
+ .has_logical_ring_contexts = 1,
+ .has_gmch_display = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@@ -321,25 +326,41 @@ static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
+ .has_csr = 1,
+ .has_guc = 1,
+ .ddb_size = 896,
};
static const struct intel_device_info intel_skylake_gt3_info = {
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
+ .has_csr = 1,
+ .has_guc = 1,
+ .ddb_size = 896,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broxton_info = {
.is_broxton = 1,
.gen = 9,
- .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
+ .has_runtime_pm = 1,
.has_pooled_eu = 0,
+ .has_csr = 1,
+ .has_resource_streamer = 1,
+ .has_rc6 = 1,
+ .has_dp_mst = 1,
+ .has_gmbus_irq = 1,
+ .has_hw_contexts = 1,
+ .has_logical_ring_contexts = 1,
+ .has_guc = 1,
+ .ddb_size = 512,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
BDW_COLORS,
@@ -349,12 +370,18 @@ static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
+ .has_csr = 1,
+ .has_guc = 1,
+ .ddb_size = 896,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
+ .has_csr = 1,
+ .has_guc = 1,
+ .ddb_size = 896,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@@ -386,14 +413,10 @@ static const struct pci_device_id pciidlist[] = {
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
- INTEL_HSW_D_IDS(&intel_haswell_d_info),
- INTEL_HSW_M_IDS(&intel_haswell_m_info),
- INTEL_VLV_M_IDS(&intel_valleyview_m_info),
- INTEL_VLV_D_IDS(&intel_valleyview_d_info),
- INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
- INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
- INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
- INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+ INTEL_HSW_IDS(&intel_haswell_info),
+ INTEL_VLV_IDS(&intel_valleyview_info),
+ INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+ INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
@@ -408,9 +431,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
@@ -440,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915_driver_load(pdev, ent);
}
-extern void i915_driver_unload(struct drm_device *dev);
-
static void i915_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -450,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
drm_dev_unref(dev);
}
-extern const struct dev_pm_ops i915_pm_ops;
-
static struct pci_driver i915_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index bf2cad3f9e1f..70d96162def6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,13 +186,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN9_GRDOM_GUC (1 << 5)
#define GEN8_GRDOM_MEDIA2 (1 << 7)
-#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -1648,7 +1648,7 @@ enum skl_disp_power_wells {
#define ARB_MODE_BWGTLB_DISABLE (1<<9)
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
-#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@@ -1846,7 +1846,7 @@ enum skl_disp_power_wells {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_INTERRUPT_STEERING (1<<14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
@@ -3660,8 +3660,17 @@ enum {
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
-#define PP_STATUS _MMIO(0x61200)
-#define PP_ON (1 << 31)
+#define PPS_BASE 0x61200
+#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
+#define PCH_PPS_BASE 0xC7200
+
+#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \
+ PPS_BASE + (reg) + \
+ (pps_idx) * 0x100)
+
+#define _PP_STATUS 0x61200
+#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
+#define PP_ON (1 << 31)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -3669,14 +3678,14 @@ enum {
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
-#define PP_READY (1 << 30)
-#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_POWER_UP (1 << 28)
-#define PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define PP_SEQUENCE_MASK (3 << 28)
-#define PP_SEQUENCE_SHIFT 28
-#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_MASK 0x0000000f
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_POWER_UP (1 << 28)
+#define PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define PP_SEQUENCE_MASK (3 << 28)
+#define PP_SEQUENCE_SHIFT 28
+#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
+#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
@@ -3686,11 +3695,46 @@ enum {
#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
#define PP_SEQUENCE_STATE_RESET (0xf << 0)
-#define PP_CONTROL _MMIO(0x61204)
-#define POWER_TARGET_ON (1 << 0)
-#define PP_ON_DELAYS _MMIO(0x61208)
-#define PP_OFF_DELAYS _MMIO(0x6120c)
-#define PP_DIVISOR _MMIO(0x61210)
+
+#define _PP_CONTROL 0x61204
+#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
+#define PANEL_UNLOCK_REGS (0xabcd << 16)
+#define PANEL_UNLOCK_MASK (0xffff << 16)
+#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0
+#define BXT_POWER_CYCLE_DELAY_SHIFT 4
+#define EDP_FORCE_VDD (1 << 3)
+#define EDP_BLC_ENABLE (1 << 2)
+#define PANEL_POWER_RESET (1 << 1)
+#define PANEL_POWER_OFF (0 << 0)
+#define PANEL_POWER_ON (1 << 0)
+
+#define _PP_ON_DELAYS 0x61208
+#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
+#define PANEL_PORT_SELECT_SHIFT 30
+#define PANEL_PORT_SELECT_MASK (3 << 30)
+#define PANEL_PORT_SELECT_LVDS (0 << 30)
+#define PANEL_PORT_SELECT_DPA (1 << 30)
+#define PANEL_PORT_SELECT_DPC (2 << 30)
+#define PANEL_PORT_SELECT_DPD (3 << 30)
+#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
+#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000
+#define PANEL_POWER_UP_DELAY_SHIFT 16
+#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff
+#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+
+#define _PP_OFF_DELAYS 0x6120C
+#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
+#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000
+#define PANEL_POWER_DOWN_DELAY_SHIFT 16
+#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff
+#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+
+#define _PP_DIVISOR 0x61210
+#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
+#define PP_REFERENCE_DIVIDER_MASK 0xffffff00
+#define PP_REFERENCE_DIVIDER_SHIFT 8
+#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f
+#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
/* Panel fitting */
#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
@@ -6133,6 +6177,7 @@ enum {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
@@ -6749,77 +6794,6 @@ enum {
#define PCH_LVDS _MMIO(0xe1180)
#define LVDS_DETECTED (1 << 1)
-/* vlv has 2 sets of panel control regs. */
-#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
-#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
-#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
-#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
-#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
-#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
-
-#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
-#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
-#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
-#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
-#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
-
-#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS)
-#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL)
-#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS)
-#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS)
-#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR)
-
-#define _PCH_PP_STATUS 0xc7200
-#define _PCH_PP_CONTROL 0xc7204
-#define PANEL_UNLOCK_REGS (0xabcd << 16)
-#define PANEL_UNLOCK_MASK (0xffff << 16)
-#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0)
-#define BXT_POWER_CYCLE_DELAY_SHIFT 4
-#define EDP_FORCE_VDD (1 << 3)
-#define EDP_BLC_ENABLE (1 << 2)
-#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_OFF (0 << 0)
-#define PANEL_POWER_ON (1 << 0)
-#define _PCH_PP_ON_DELAYS 0xc7208
-#define PANEL_PORT_SELECT_MASK (3 << 30)
-#define PANEL_PORT_SELECT_LVDS (0 << 30)
-#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define PANEL_PORT_SELECT_DPC (2 << 30)
-#define PANEL_PORT_SELECT_DPD (3 << 30)
-#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
-#define PANEL_POWER_UP_DELAY_SHIFT 16
-#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
-#define PANEL_LIGHT_ON_DELAY_SHIFT 0
-
-#define _PCH_PP_OFF_DELAYS 0xc720c
-#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
-#define PANEL_POWER_DOWN_DELAY_SHIFT 16
-#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
-#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
-
-#define _PCH_PP_DIVISOR 0xc7210
-#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
-#define PP_REFERENCE_DIVIDER_SHIFT 8
-#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
-#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
-
-#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS)
-#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL)
-#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS)
-#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS)
-#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR)
-
-/* BXT PPS changes - 2nd set of PPS registers */
-#define _BXT_PP_STATUS2 0xc7300
-#define _BXT_PP_CONTROL2 0xc7304
-#define _BXT_PP_ON_DELAYS2 0xc7308
-#define _BXT_PP_OFF_DELAYS2 0xc730c
-
-#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2)
-#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2)
-#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2)
-#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2)
-
#define _PCH_DP_B 0xe4100
#define PCH_DP_B _MMIO(_PCH_DP_B)
#define _PCH_DPB_AUX_CH_CTL 0xe4110
@@ -6959,6 +6933,9 @@ enum {
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
#define VLV_SPAREG2H _MMIO(0xA194)
+#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0)
+#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
+#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
@@ -7059,12 +7036,13 @@ enum {
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
-#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_EI_MASK 0xffffff
+#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK
#define GEN6_RP_CUR_UP _MMIO(0xA054)
-#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK
#define GEN6_RP_PREV_UP _MMIO(0xA058)
#define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C)
-#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK
#define GEN6_RP_CUR_DOWN _MMIO(0xA060)
#define GEN6_RP_PREV_DOWN _MMIO(0xA064)
#define GEN6_RP_UP_EI _MMIO(0xA068)
@@ -7089,7 +7067,7 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_PMINTR_REDIRECT_TO_GUC (1<<31)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
@@ -7499,6 +7477,7 @@ enum {
#define _DDI_BUF_TRANS_A 0x64E00
#define _DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31)
#define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4)
/* Sideband Interface (SBI) is programmed indirectly, via
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5cfe4c7716b4..a0af170062b1 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -37,25 +37,6 @@ static void i915_save_display(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
- /* LVDS state */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
- else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
- dev_priv->regfile.saveLVDS = I915_READ(LVDS);
-
- /* Panel power sequencer */
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
- dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
- dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
- dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else if (INTEL_INFO(dev)->gen <= 4) {
- dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
- dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
- dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
- dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
- }
-
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
@@ -64,33 +45,11 @@ static void i915_save_display(struct drm_device *dev)
static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 mask = 0xffffffff;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
- mask = ~LVDS_PORT_EN;
-
- /* LVDS state */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
- else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
- I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
-
- /* Panel power sequencer */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
- I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
- I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
- I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- } else if (INTEL_INFO(dev)->gen <= 4) {
- I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
- I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
- I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- }
-
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
@@ -104,6 +63,7 @@ static void i915_restore_display(struct drm_device *dev)
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
mutex_lock(&dev->struct_mutex);
@@ -111,7 +71,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
if (IS_GEN4(dev))
- pci_read_config_word(dev->pdev, GCDGMBUS,
+ pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
@@ -149,6 +109,7 @@ int i915_save_state(struct drm_device *dev)
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
mutex_lock(&dev->struct_mutex);
@@ -156,7 +117,7 @@ int i915_restore_state(struct drm_device *dev)
i915_gem_restore_fences(dev);
if (IS_GEN4(dev))
- pci_write_config_word(dev->pdev, GCDGMBUS,
+ pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
new file mode 100644
index 000000000000..1e5cbc585ca2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -0,0 +1,362 @@
+/*
+ * (C) Copyright 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/slab.h>
+#include <linux/fence.h>
+#include <linux/reservation.h>
+
+#include "i915_sw_fence.h"
+
+static DEFINE_SPINLOCK(i915_sw_fence_lock);
+
+static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ i915_sw_fence_notify_t fn;
+
+ fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
+ return fn(fence, state);
+}
+
+static void i915_sw_fence_free(struct kref *kref)
+{
+ struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
+
+ WARN_ON(atomic_read(&fence->pending) > 0);
+
+ if (fence->flags & I915_SW_FENCE_MASK)
+ __i915_sw_fence_notify(fence, FENCE_FREE);
+ else
+ kfree(fence);
+}
+
+static void i915_sw_fence_put(struct i915_sw_fence *fence)
+{
+ kref_put(&fence->kref, i915_sw_fence_free);
+}
+
+static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
+{
+ kref_get(&fence->kref);
+ return fence;
+}
+
+static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+ struct list_head *continuation)
+{
+ wait_queue_head_t *x = &fence->wait;
+ wait_queue_t *pos, *next;
+ unsigned long flags;
+
+ atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
+
+ /*
+ * To prevent unbounded recursion as we traverse the graph of
+ * i915_sw_fences, we move the task_list from this, the next ready
+ * fence, to the tail of the original fence's task_list
+ * (and so added to the list to be woken).
+ */
+
+ spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
+ if (continuation) {
+ list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
+ if (pos->func == autoremove_wake_function)
+ pos->func(pos, TASK_NORMAL, 0, continuation);
+ else
+ list_move_tail(&pos->task_list, continuation);
+ }
+ } else {
+ LIST_HEAD(extra);
+
+ do {
+ list_for_each_entry_safe(pos, next,
+ &x->task_list, task_list)
+ pos->func(pos, TASK_NORMAL, 0, &extra);
+
+ if (list_empty(&extra))
+ break;
+
+ list_splice_tail_init(&extra, &x->task_list);
+ } while (1);
+ }
+ spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
+ struct list_head *continuation)
+{
+ if (!atomic_dec_and_test(&fence->pending))
+ return;
+
+ if (fence->flags & I915_SW_FENCE_MASK &&
+ __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+ return;
+
+ __i915_sw_fence_wake_up_all(fence, continuation);
+}
+
+static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+{
+ if (WARN_ON(i915_sw_fence_done(fence)))
+ return;
+
+ __i915_sw_fence_complete(fence, NULL);
+}
+
+static void i915_sw_fence_await(struct i915_sw_fence *fence)
+{
+ WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+}
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn)
+{
+ BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+
+ init_waitqueue_head(&fence->wait);
+ kref_init(&fence->kref);
+ atomic_set(&fence->pending, 1);
+ fence->flags = (unsigned long)fn;
+}
+
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_complete(fence);
+ i915_sw_fence_put(fence);
+}
+
+static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
+{
+ list_del(&wq->task_list);
+ __i915_sw_fence_complete(wq->private, key);
+ i915_sw_fence_put(wq->private);
+ return 0;
+}
+
+static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+ const struct i915_sw_fence * const signaler)
+{
+ wait_queue_t *wq;
+
+ if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+ return false;
+
+ if (fence == signaler)
+ return true;
+
+ list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+ if (wq->func != i915_sw_fence_wake)
+ continue;
+
+ if (__i915_sw_fence_check_if_after(wq->private, signaler))
+ return true;
+ }
+
+ return false;
+}
+
+static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
+{
+ wait_queue_t *wq;
+
+ if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
+ return;
+
+ list_for_each_entry(wq, &fence->wait.task_list, task_list) {
+ if (wq->func != i915_sw_fence_wake)
+ continue;
+
+ __i915_sw_fence_clear_checked_bit(wq->private);
+ }
+}
+
+static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
+ const struct i915_sw_fence * const signaler)
+{
+ unsigned long flags;
+ bool err;
+
+ if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+ return false;
+
+ spin_lock_irqsave(&i915_sw_fence_lock, flags);
+ err = __i915_sw_fence_check_if_after(fence, signaler);
+ __i915_sw_fence_clear_checked_bit(fence);
+ spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
+
+ return err;
+}
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *signaler,
+ wait_queue_t *wq)
+{
+ unsigned long flags;
+ int pending;
+
+ if (i915_sw_fence_done(signaler))
+ return 0;
+
+ /* The dependency graph must be acyclic. */
+ if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&wq->task_list);
+ wq->flags = 0;
+ wq->func = i915_sw_fence_wake;
+ wq->private = i915_sw_fence_get(fence);
+
+ i915_sw_fence_await(fence);
+
+ spin_lock_irqsave(&signaler->wait.lock, flags);
+ if (likely(!i915_sw_fence_done(signaler))) {
+ __add_wait_queue_tail(&signaler->wait, wq);
+ pending = 1;
+ } else {
+ i915_sw_fence_wake(wq, 0, 0, NULL);
+ pending = 0;
+ }
+ spin_unlock_irqrestore(&signaler->wait.lock, flags);
+
+ return pending;
+}
+
+struct dma_fence_cb {
+ struct fence_cb base;
+ struct i915_sw_fence *fence;
+ struct fence *dma;
+ struct timer_list timer;
+};
+
+static void timer_i915_sw_fence_wake(unsigned long data)
+{
+ struct dma_fence_cb *cb = (struct dma_fence_cb *)data;
+
+ printk(KERN_WARNING "asynchronous wait on fence %s:%s:%x timed out\n",
+ cb->dma->ops->get_driver_name(cb->dma),
+ cb->dma->ops->get_timeline_name(cb->dma),
+ cb->dma->seqno);
+ fence_put(cb->dma);
+ cb->dma = NULL;
+
+ i915_sw_fence_commit(cb->fence);
+ cb->timer.function = NULL;
+}
+
+static void dma_i915_sw_fence_wake(struct fence *dma, struct fence_cb *data)
+{
+ struct dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+ del_timer_sync(&cb->timer);
+ if (cb->timer.function)
+ i915_sw_fence_commit(cb->fence);
+ fence_put(cb->dma);
+
+ kfree(cb);
+}
+
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct fence *dma,
+ unsigned long timeout,
+ gfp_t gfp)
+{
+ struct dma_fence_cb *cb;
+ int ret;
+
+ if (fence_is_signaled(dma))
+ return 0;
+
+ cb = kmalloc(sizeof(*cb), gfp);
+ if (!cb) {
+ if (!gfpflags_allow_blocking(gfp))
+ return -ENOMEM;
+
+ return fence_wait(dma, false);
+ }
+
+ cb->fence = i915_sw_fence_get(fence);
+ i915_sw_fence_await(fence);
+
+ cb->dma = NULL;
+ __setup_timer(&cb->timer,
+ timer_i915_sw_fence_wake, (unsigned long)cb,
+ TIMER_IRQSAFE);
+ if (timeout) {
+ cb->dma = fence_get(dma);
+ mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
+ }
+
+ ret = fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake);
+ if (ret == 0) {
+ ret = 1;
+ } else {
+ dma_i915_sw_fence_wake(dma, &cb->base);
+ if (ret == -ENOENT) /* fence already signaled */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+ struct reservation_object *resv,
+ const struct fence_ops *exclude,
+ bool write,
+ unsigned long timeout,
+ gfp_t gfp)
+{
+ struct fence *excl;
+ int ret = 0, pending;
+
+ if (write) {
+ struct fence **shared;
+ unsigned int count, i;
+
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (shared[i]->ops == exclude)
+ continue;
+
+ pending = i915_sw_fence_await_dma_fence(fence,
+ shared[i],
+ timeout,
+ gfp);
+ if (pending < 0) {
+ ret = pending;
+ break;
+ }
+
+ ret |= pending;
+ }
+
+ for (i = 0; i < count; i++)
+ fence_put(shared[i]);
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
+ }
+
+ if (ret >= 0 && excl && excl->ops != exclude) {
+ pending = i915_sw_fence_await_dma_fence(fence,
+ excl,
+ timeout,
+ gfp);
+ if (pending < 0)
+ ret = pending;
+ else
+ ret |= pending;
+ }
+
+ fence_put(excl);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
new file mode 100644
index 000000000000..373141602ca4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -0,0 +1,65 @@
+/*
+ * i915_sw_fence.h - library routines for N:M synchronisation points
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _I915_SW_FENCE_H_
+#define _I915_SW_FENCE_H_
+
+#include <linux/gfp.h>
+#include <linux/kref.h>
+#include <linux/notifier.h> /* for NOTIFY_DONE */
+#include <linux/wait.h>
+
+struct completion;
+struct fence;
+struct fence_ops;
+struct reservation_object;
+
+struct i915_sw_fence {
+ wait_queue_head_t wait;
+ unsigned long flags;
+ struct kref kref;
+ atomic_t pending;
+};
+
+#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
+#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */
+#define I915_SW_FENCE_MASK (~3)
+
+enum i915_sw_fence_notify {
+ FENCE_COMPLETE,
+ FENCE_FREE
+};
+
+typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *,
+ enum i915_sw_fence_notify state);
+#define __i915_sw_fence_call __aligned(4)
+
+void i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn);
+void i915_sw_fence_commit(struct i915_sw_fence *fence);
+
+int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
+ struct i915_sw_fence *after,
+ wait_queue_t *wq);
+int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct fence *dma,
+ unsigned long timeout,
+ gfp_t gfp);
+int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
+ struct reservation_object *resv,
+ const struct fence_ops *exclude,
+ bool write,
+ unsigned long timeout,
+ gfp_t gfp);
+
+static inline bool i915_sw_fence_done(const struct i915_sw_fence *fence)
+{
+ return atomic_read(&fence->pending) < 0;
+}
+
+#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d61829e54f93..1012eeea1324 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,13 +32,16 @@
#include "intel_drv.h"
#include "i915_drv.h"
-#define dev_to_drm_minor(d) dev_get_drvdata((d))
+static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+{
+ struct drm_minor *minor = dev_get_drvdata(kdev);
+ return to_i915(minor->dev);
+}
#ifdef CONFIG_PM
-static u32 calc_residency(struct drm_device *dev,
+static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL;
u32 ret;
@@ -49,13 +52,13 @@ static u32 calc_residency(struct drm_device *dev,
intel_runtime_pm_get(dev_priv);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
units = 1;
div = dev_priv->czclk_freq;
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
units = 1;
div = 1200; /* 833.33ns */
}
@@ -76,32 +79,32 @@ show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
static ssize_t
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_get_drvdata(kdev);
- u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
static ssize_t
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_to_drm_minor(kdev);
- u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_to_drm_minor(kdev);
- u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
static ssize_t
show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_get_drvdata(kdev);
- u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
@@ -144,9 +147,9 @@ static struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_device *dev, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
{
- if (!HAS_L3_DPF(dev))
+ if (!HAS_L3_DPF(dev_priv))
return -EPERM;
if (offset % 4 != 0)
@@ -163,22 +166,21 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
- struct drm_minor *dminor = dev_to_drm_minor(dev);
- struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = to_i915(drm_dev);
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct drm_device *dev = &dev_priv->drm;
int slice = (int)(uintptr_t)attr->private;
int ret;
count = round_down(count, 4);
- ret = l3_access_valid(drm_dev, offset);
+ ret = l3_access_valid(dev_priv, offset);
if (ret)
return ret;
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
- ret = i915_mutex_lock_interruptible(drm_dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -189,7 +191,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
else
memset(buf, 0, count);
- mutex_unlock(&drm_dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return count;
}
@@ -199,30 +201,29 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
- struct drm_minor *dminor = dev_to_drm_minor(dev);
- struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = to_i915(drm_dev);
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_gem_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
- if (!HAS_HW_CONTEXTS(drm_dev))
+ if (!HAS_HW_CONTEXTS(dev_priv))
return -ENXIO;
- ret = l3_access_valid(drm_dev, offset);
+ ret = l3_access_valid(dev_priv, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(drm_dev);
+ ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
- mutex_unlock(&drm_dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
}
}
@@ -240,7 +241,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
list_for_each_entry(ctx, &dev_priv->context_list, link)
ctx->remap_slice |= (1<<slice);
- mutex_unlock(&drm_dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return count;
}
@@ -266,13 +267,9 @@ static struct bin_attribute dpf_attrs_1 = {
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
int ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
@@ -300,59 +297,70 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.cur_freq));
+}
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- intel_runtime_pm_get(dev_priv);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.boost_freq));
+}
+
+static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ u32 val;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* Validate against (static) hardware limits */
+ val = intel_freq_opcode(dev_priv, val);
+ if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+ return -EINVAL;
mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
+ dev_priv->rps.boost_freq = val;
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_runtime_pm_put(dev_priv);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return count;
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- return snprintf(buf, PAGE_SIZE,
- "%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 val;
ssize_t ret;
@@ -360,8 +368,6 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
@@ -400,27 +406,18 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_gpu_freq(dev_priv,
+ dev_priv->rps.min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 val;
ssize_t ret;
@@ -428,8 +425,6 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
@@ -465,6 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
@@ -478,9 +474,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
@@ -498,6 +492,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
static const struct attribute *gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_boost_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
&dev_attr_gt_RP0_freq_mhz.attr,
@@ -509,6 +504,7 @@ static const struct attribute *gen6_attrs[] = {
static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
+ &dev_attr_gt_boost_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
&dev_attr_gt_RP0_freq_mhz.attr,
@@ -524,8 +520,8 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct drm_device *dev = &dev_priv->drm;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
ssize_t ret_count = 0;
@@ -559,18 +555,10 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_minor *minor = dev_to_drm_minor(kdev);
- struct drm_device *dev = minor->dev;
- int ret;
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- i915_destroy_error_state(dev);
- mutex_unlock(&dev->struct_mutex);
+ i915_destroy_error_state(&dev_priv->drm);
return count;
}
@@ -583,37 +571,38 @@ static struct bin_attribute error_state_attr = {
.write = error_state_write,
};
-void i915_setup_sysfs(struct drm_device *dev)
+void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
+ struct device *kdev = dev_priv->drm.primary->kdev;
int ret;
#ifdef CONFIG_PM
- if (HAS_RC6(dev)) {
- ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ if (HAS_RC6(dev_priv)) {
+ ret = sysfs_merge_group(&kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
- if (HAS_RC6p(dev)) {
- ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ if (HAS_RC6p(dev_priv)) {
+ ret = sysfs_merge_group(&kdev->kobj,
&rc6p_attr_group);
if (ret)
DRM_ERROR("RC6p residency sysfs setup failed\n");
}
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- ret = sysfs_merge_group(&dev->primary->kdev->kobj,
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = sysfs_merge_group(&kdev->kobj,
&media_rc6_attr_group);
if (ret)
DRM_ERROR("Media RC6 residency sysfs setup failed\n");
}
#endif
- if (HAS_L3_DPF(dev)) {
- ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
+ if (HAS_L3_DPF(dev_priv)) {
+ ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
- if (NUM_L3_SLICES(dev) > 1) {
- ret = device_create_bin_file(dev->primary->kdev,
+ if (NUM_L3_SLICES(dev_priv) > 1) {
+ ret = device_create_bin_file(kdev,
&dpf_attrs_1);
if (ret)
DRM_ERROR("l3 parity slice 1 setup failed\n");
@@ -621,30 +610,32 @@ void i915_setup_sysfs(struct drm_device *dev)
}
ret = 0;
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
- else if (INTEL_INFO(dev)->gen >= 6)
- ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
- ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
+ ret = sysfs_create_bin_file(&kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
}
-void i915_teardown_sysfs(struct drm_device *dev)
+void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
- sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
+ struct device *kdev = dev_priv->drm.primary->kdev;
+
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sysfs_remove_files(&kdev->kobj, vlv_attrs);
else
- sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
- device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
- device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
+ sysfs_remove_files(&kdev->kobj, gen6_attrs);
+ device_remove_bin_file(kdev, &dpf_attrs_1);
+ device_remove_bin_file(kdev, &dpf_attrs);
#ifdef CONFIG_PM
- sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
- sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
+ sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
+ sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
#endif
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 534154e05fbe..178798002a73 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
);
TRACE_EVENT(i915_gem_evict,
- TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
- TP_ARGS(dev, size, align, flags),
+ TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+ TP_ARGS(vm, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(struct i915_address_space *, vm)
__field(u32, size)
__field(u32, align)
- __field(unsigned, flags)
+ __field(unsigned int, flags)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = vm->dev->primary->index;
+ __entry->vm = vm;
__entry->size = size;
__entry->align = align;
__entry->flags = flags;
),
- TP_printk("dev=%d, size=%d, align=%d %s",
- __entry->dev, __entry->size, __entry->align,
+ TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+ __entry->dev, __entry->vm, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
@@ -449,10 +451,9 @@ TRACE_EVENT(i915_gem_evict_vm,
);
TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct drm_i915_gem_request *to_req,
- struct intel_engine_cs *from,
- struct drm_i915_gem_request *req),
- TP_ARGS(to_req, from, req),
+ TP_PROTO(struct drm_i915_gem_request *to,
+ struct drm_i915_gem_request *from),
+ TP_ARGS(to, from),
TP_STRUCT__entry(
__field(u32, dev)
@@ -463,9 +464,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
- __entry->sync_from = from->id;
- __entry->sync_to = to_req->engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->sync_from = from->engine->id;
+ __entry->sync_to = to->engine->id;
+ __entry->seqno = from->fence.seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -488,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->seqno;
+ __entry->seqno = req->fence.seqno;
__entry->flags = flags;
- intel_engine_enable_signaling(req);
+ fence_enable_sw_signaling(&req->fence);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -533,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->seqno;
+ __entry->seqno = req->fence.seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -595,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->seqno;
+ __entry->seqno = req->fence.seqno;
__entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex);
),
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index b81cfb3b22ec..dae340cfc6c7 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -94,6 +94,7 @@ static struct _balloon_info_ bl_info;
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
+ * @dev_priv: i915 device private data
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
@@ -135,7 +136,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev: drm device
+ * @dev_priv: i915 device private data
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
@@ -152,27 +153,27 @@ static int vgt_balloon_space(struct drm_mm *mm,
* host point of view, the graphic address space is partitioned by multiple
* vGPUs in different VMs. ::
*
- * vGPU1 view Host view
- * 0 ------> +-----------+ +-----------+
- * ^ |###########| | vGPU3 |
- * | |###########| +-----------+
- * | |###########| | vGPU2 |
- * | +-----------+ +-----------+
- * mappable GM | available | ==> | vGPU1 |
- * | +-----------+ +-----------+
- * | |###########| | |
- * v |###########| | Host |
- * +=======+===========+ +===========+
- * ^ |###########| | vGPU3 |
- * | |###########| +-----------+
- * | |###########| | vGPU2 |
- * | +-----------+ +-----------+
- * unmappable GM | available | ==> | vGPU1 |
- * | +-----------+ +-----------+
- * | |###########| | |
- * | |###########| | Host |
- * v |###########| | |
- * total GM size ------> +-----------+ +-----------+
+ * vGPU1 view Host view
+ * 0 ------> +-----------+ +-----------+
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
+ * | +-----------+ +-----------+
+ * mappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |###########| | |
+ * v |###########| | Host |
+ * +=======+===========+ +===========+
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
+ * | +-----------+ +-----------+
+ * unmappable GM | available | ==> | vGPU1 |
+ * | +-----------+ +-----------+
+ * | |###########| | |
+ * | |###########| | Host |
+ * v |###########| | |
+ * total GM size ------> +-----------+ +-----------+
*
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 7de7721f65bc..b82de3072d4f 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -55,7 +55,7 @@ intel_create_plane_state(struct drm_plane *plane)
return NULL;
state->base.plane = plane;
- state->base.rotation = BIT(DRM_ROTATE_0);
+ state->base.rotation = DRM_ROTATE_0;
state->ckey.flags = I915_SET_COLORKEY_NONE;
return state;
@@ -134,20 +134,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
crtc_state = to_intel_crtc_state(drm_crtc_state);
- /*
- * The original src/dest coordinates are stored in state->base, but
- * we want to keep another copy internal to our driver that we can
- * clip/modify ourselves.
- */
- intel_state->src.x1 = state->src_x;
- intel_state->src.y1 = state->src_y;
- intel_state->src.x2 = state->src_x + state->src_w;
- intel_state->src.y2 = state->src_y + state->src_h;
- intel_state->dst.x1 = state->crtc_x;
- intel_state->dst.y1 = state->crtc_y;
- intel_state->dst.x2 = state->crtc_x + state->crtc_w;
- intel_state->dst.y2 = state->crtc_y + state->crtc_h;
-
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
@@ -157,6 +143,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+ char *format_name;
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
@@ -171,8 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
switch (state->fb->pixel_format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(state->fb->pixel_format));
+ format_name = drm_get_format_name(state->fb->pixel_format);
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name);
+ kfree(format_name);
return -EINVAL;
default:
@@ -180,7 +168,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
}
}
- intel_state->visible = false;
+ intel_state->base.visible = false;
ret = intel_plane->check_plane(plane, crtc_state, intel_state);
if (ret)
return ret;
@@ -196,7 +184,7 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
- if (intel_state->visible)
+ if (intel_state->base.visible)
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
intel_state);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d32f586f9c05..6c70a5bfd7d8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -51,10 +51,10 @@
* related registers. (The notable exception is the power management, not
* covered here.)
*
- * The struct i915_audio_component is used to interact between the graphics
- * and audio drivers. The struct i915_audio_component_ops *ops in it is
+ * The struct &i915_audio_component is used to interact between the graphics
+ * and audio drivers. The struct &i915_audio_component_ops @ops in it is
* defined in graphics driver and called in audio driver. The
- * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
+ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver.
*/
static const struct {
@@ -359,9 +359,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
+ enum port port = enc_to_dig_port(&encoder->base)->port;
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@@ -407,13 +405,10 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
- enum port port = intel_dig_port->port;
+ enum port port = enc_to_dig_port(&encoder->base)->port;
enum pipe pipe = intel_crtc->pipe;
uint8_t *eld = connector->eld;
- uint32_t eldv;
- uint32_t tmp;
+ uint32_t tmp, eldv;
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
@@ -581,26 +576,26 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
-static void i915_audio_component_get_power(struct device *dev)
+static void i915_audio_component_get_power(struct device *kdev)
{
- intel_display_power_get(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+ intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
}
-static void i915_audio_component_put_power(struct device *dev)
+static void i915_audio_component_put_power(struct device *kdev)
{
- intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
+ intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
}
-static void i915_audio_component_codec_wake_override(struct device *dev,
+static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
- struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return;
- i915_audio_component_get_power(dev);
+ i915_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
@@ -618,13 +613,13 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(dev);
+ i915_audio_component_put_power(kdev);
}
/* Get CDCLK in kHz */
-static int i915_audio_component_get_cdclk_freq(struct device *dev)
+static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
- struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
@@ -632,10 +627,10 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
return dev_priv->cdclk_freq;
}
-static int i915_audio_component_sync_audio_rate(struct device *dev,
+static int i915_audio_component_sync_audio_rate(struct device *kdev,
int port, int rate)
{
- struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
struct drm_display_mode *mode;
@@ -652,7 +647,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
!IS_HASWELL(dev_priv))
return 0;
- i915_audio_component_get_power(dev);
+ i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = dev_priv->dig_port_map[port];
@@ -703,15 +698,15 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
unlock:
mutex_unlock(&dev_priv->av_mutex);
- i915_audio_component_put_power(dev);
+ i915_audio_component_put_power(kdev);
return err;
}
-static int i915_audio_component_get_eld(struct device *dev, int port,
+static int i915_audio_component_get_eld(struct device *kdev, int port,
bool *enabled,
unsigned char *buf, int max_bytes)
{
- struct drm_i915_private *dev_priv = dev_to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
const u8 *eld;
@@ -745,11 +740,11 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
.get_eld = i915_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *i915_dev,
- struct device *hda_dev, void *data)
+static int i915_audio_component_bind(struct device *i915_kdev,
+ struct device *hda_kdev, void *data)
{
struct i915_audio_component *acomp = data;
- struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
if (WARN_ON(acomp->ops || acomp->dev))
@@ -757,7 +752,7 @@ static int i915_audio_component_bind(struct device *i915_dev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = &i915_audio_component_ops;
- acomp->dev = i915_dev;
+ acomp->dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -767,11 +762,11 @@ static int i915_audio_component_bind(struct device *i915_dev,
return 0;
}
-static void i915_audio_component_unbind(struct device *i915_dev,
- struct device *hda_dev, void *data)
+static void i915_audio_component_unbind(struct device *i915_kdev,
+ struct device *hda_kdev, void *data)
{
struct i915_audio_component *acomp = data;
- struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = NULL;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cfa83..cf2560708e03 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_ddc_pin)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(p), i->alternate_ddc_pin,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the
+ * pin, then dvi/hdmi couldn't exist on the shared
+ * port. Otherwise they share the same ddc bin and
+ * system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dvi = false;
+ i->supports_hdmi = false;
+ i->alternate_ddc_pin = 0;
+ }
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_aux_channel)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_aux_channel != i->alternate_aux_channel)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(p), i->alternate_aux_channel,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedlt sharing the
+ * aux channel, then DP couldn't exist on the shared
+ * port. Otherwise they share the same aux channel
+ * and system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dp = false;
+ i->alternate_aux_channel = 0;
+ }
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -1072,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->raw[25];
+ aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
@@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- if (port == PORT_E) {
- info->alternate_ddc_pin = ddc_pin;
- /* if DDIE share ddc pin with other port, then
- * dvi/hdmi couldn't exist on the shared port.
- * Otherwise they share the same ddc bin and system
- * couldn't communicate with them seperately. */
- if (ddc_pin == DDC_PIN_B) {
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_C) {
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_D) {
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
- }
- } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
- else if (ddc_pin == DDC_PIN_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
- else if (ddc_pin == DDC_PIN_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ info->alternate_ddc_pin = ddc_pin;
+
+ sanitize_ddc_pin(dev_priv, port);
}
if (is_dp) {
- if (port == PORT_E) {
- info->alternate_aux_channel = aux_channel;
- /* if DDIE share aux channel with other port, then
- * DP couldn't exist on the shared port. Otherwise
- * they share the same aux channel and system
- * couldn't communicate with them seperately. */
- if (aux_channel == DP_AUX_A)
- dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
- else if (aux_channel == DP_AUX_B)
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
- else if (aux_channel == DP_AUX_C)
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
- else if (aux_channel == DP_AUX_D)
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
- }
- else if (aux_channel == DP_AUX_A && port != PORT_A)
- DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- else if (aux_channel == DP_AUX_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- else if (aux_channel == DP_AUX_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- else if (aux_channel == DP_AUX_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ info->alternate_aux_channel = aux_channel;
+
+ sanitize_aux_ch(dev_priv, port);
}
if (bdb->version >= 158) {
@@ -1641,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+ enum port port)
{
static const struct {
u16 dp, hdmi;
@@ -1655,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
};
- int i;
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
+ if (p_child->common.dvo_port == port_mapping[port].dp)
+ return true;
+
+ /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
+ if (p_child->common.dvo_port == port_mapping[port].hdmi &&
+ p_child->common.aux_channel != 0)
+ return true;
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ if (child_dev_is_dp_dual_mode(p_child, port))
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index b074f3d6d127..495611b7068d 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -26,6 +26,40 @@
#include "i915_drv.h"
+static void intel_breadcrumbs_hangcheck(unsigned long data)
+{
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ if (!b->irq_enabled)
+ return;
+
+ if (time_before(jiffies, b->timeout)) {
+ mod_timer(&b->hangcheck, b->timeout);
+ return;
+ }
+
+ DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
+ set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+
+ /* Ensure that even if the GPU hangs, we get woken up.
+ *
+ * However, note that if no one is waiting, we never notice
+ * a gpu hang. Eventually, we will have to wait for a resource
+ * held by the GPU and so trigger a hangcheck. In the most
+ * pathological case, this will be upon memory starvation! To
+ * prevent this, we also queue the hangcheck from the retire
+ * worker.
+ */
+ i915_queue_hangcheck(engine->i915);
+}
+
+static unsigned long wait_timeout(void)
+{
+ return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+}
+
static void intel_breadcrumbs_fake_irq(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
@@ -37,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data)
* every jiffie in order to kick the oldest waiter to do the
* coherent seqno check.
*/
- rcu_read_lock();
if (intel_engine_wakeup(engine))
mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
- rcu_read_unlock();
}
static void irq_enable(struct intel_engine_cs *engine)
@@ -91,17 +123,13 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
}
if (!b->irq_enabled ||
- test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+ test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
mod_timer(&b->fake_irq, jiffies + 1);
-
- /* Ensure that even if the GPU hangs, we get woken up.
- *
- * However, note that if no one is waiting, we never notice
- * a gpu hang. Eventually, we will have to wait for a resource
- * held by the GPU and so trigger a hangcheck. In the most
- * pathological case, this will be upon memory starvation!
- */
- i915_queue_hangcheck(i915);
+ } else {
+ /* Ensure we never sleep indefinitely */
+ GEM_BUG_ON(!time_after(b->timeout, jiffies));
+ mod_timer(&b->hangcheck, b->timeout);
+ }
}
static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
@@ -204,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
}
rb_link_node(&wait->node, parent, p);
rb_insert_color(&wait->node, &b->waiters);
- GEM_BUG_ON(!first && !b->irq_seqno_bh);
+ GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
if (completed) {
struct rb_node *next = rb_next(completed);
@@ -212,8 +240,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(!next && !first);
if (next && next != &wait->node) {
GEM_BUG_ON(first);
+ b->timeout = wait_timeout();
b->first_wait = to_wait(next);
- smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
/* As there is a delay between reading the current
* seqno, processing the completed tasks and selecting
* the next waiter, we may have missed the interrupt
@@ -238,8 +267,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
if (first) {
GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+ b->timeout = wait_timeout();
b->first_wait = wait;
- smp_store_mb(b->irq_seqno_bh, wait->tsk);
+ rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
/* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt.
* Either we miss the interrupt whilst programming the hardware,
@@ -250,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
*/
__intel_breadcrumbs_enable_irq(b);
}
- GEM_BUG_ON(!b->irq_seqno_bh);
+ GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
GEM_BUG_ON(!b->first_wait);
GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
@@ -270,11 +300,6 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
return first;
}
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
-{
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-}
-
static inline bool chain_wakeup(struct rb_node *rb, int priority)
{
return rb && to_wait(rb)->tsk->prio <= priority;
@@ -310,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
const int priority = wakeup_priority(b, wait->tsk);
struct rb_node *next;
- GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+ GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
/* We are the current bottom-half. Find the next candidate,
* the first waiter in the queue on the remaining oldest
@@ -352,14 +377,15 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
* the interrupt, or if we have to handle an
* exception rather than a seqno completion.
*/
+ b->timeout = wait_timeout();
b->first_wait = to_wait(next);
- smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
if (b->first_wait->seqno != wait->seqno)
__intel_breadcrumbs_enable_irq(b);
- wake_up_process(b->irq_seqno_bh);
+ wake_up_process(b->first_wait->tsk);
} else {
b->first_wait = NULL;
- WRITE_ONCE(b->irq_seqno_bh, NULL);
+ rcu_assign_pointer(b->irq_seqno_bh, NULL);
__intel_breadcrumbs_disable_irq(b);
}
} else {
@@ -373,7 +399,7 @@ out_unlock:
GEM_BUG_ON(b->first_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL));
- GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+ GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
spin_unlock(&b->lock);
}
@@ -437,6 +463,10 @@ static int intel_breadcrumbs_signaler(void *arg)
intel_engine_remove_wait(engine,
&request->signaling.wait);
+ local_bh_disable();
+ fence_signal(&request->fence);
+ local_bh_enable(); /* kick start the tasklets */
+
/* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may
* have installed an even older signal than the one
@@ -452,7 +482,7 @@ static int intel_breadcrumbs_signaler(void *arg)
rb_erase(&request->signaling.node, &b->signals);
spin_unlock(&b->lock);
- i915_gem_request_unreference(request);
+ i915_gem_request_put(request);
} else {
if (kthread_should_stop())
break;
@@ -472,18 +502,14 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct rb_node *parent, **p;
bool first, wakeup;
- if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
- return;
-
- spin_lock(&b->lock);
- if (unlikely(request->signaling.wait.tsk)) {
- wakeup = false;
- goto unlock;
- }
+ /* locked by fence_enable_sw_signaling() */
+ assert_spin_locked(&request->lock);
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->seqno;
- i915_gem_request_reference(request);
+ request->signaling.wait.seqno = request->fence.seqno;
+ i915_gem_request_get(request);
+
+ spin_lock(&b->lock);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
@@ -504,8 +530,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->seqno,
- to_signaler(parent)->seqno)) {
+ if (i915_seqno_passed(request->fence.seqno,
+ to_signaler(parent)->fence.seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -517,7 +543,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
if (first)
smp_store_mb(b->first_signal, request);
-unlock:
spin_unlock(&b->lock);
if (wakeup)
@@ -533,6 +558,9 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
setup_timer(&b->fake_irq,
intel_breadcrumbs_fake_irq,
(unsigned long)engine);
+ setup_timer(&b->hangcheck,
+ intel_breadcrumbs_hangcheck,
+ (unsigned long)engine);
/* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current
@@ -550,6 +578,36 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
return 0;
}
+static void cancel_fake_irq(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ del_timer_sync(&b->hangcheck);
+ del_timer_sync(&b->fake_irq);
+ clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+}
+
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ cancel_fake_irq(engine);
+ spin_lock(&b->lock);
+
+ __intel_breadcrumbs_disable_irq(b);
+ if (intel_engine_has_waiter(engine)) {
+ b->timeout = wait_timeout();
+ __intel_breadcrumbs_enable_irq(b);
+ if (READ_ONCE(b->irq_posted))
+ wake_up_process(b->first_wait->tsk);
+ } else {
+ /* sanitize the IMR and unmask any auxiliary interrupts */
+ irq_disable(engine);
+ }
+
+ spin_unlock(&b->lock);
+}
+
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
@@ -557,7 +615,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
if (!IS_ERR_OR_NULL(b->signaler))
kthread_stop(b->signaler);
- del_timer_sync(&b->fake_irq);
+ cancel_fake_irq(engine);
}
unsigned int intel_kick_waiters(struct drm_i915_private *i915)
@@ -570,11 +628,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915)
* RCU lock, i.e. as we call wake_up_process() we must be holding the
* rcu_read_lock().
*/
- rcu_read_lock();
for_each_engine(engine, i915)
if (unlikely(intel_engine_wakeup(engine)))
mask |= intel_engine_flag(engine);
- rcu_read_unlock();
return mask;
}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index bc0fef3d3335..95a72771eea6 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -100,13 +100,14 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
+ struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
if (crtc_state->ctm) {
struct drm_color_ctm *ctm =
(struct drm_color_ctm *)crtc_state->ctm->data;
uint64_t input[9] = { 0, };
- if (intel_crtc->config->limited_color_range) {
+ if (intel_crtc_state->limited_color_range) {
ctm_mult_by_limited(input, ctm->matrix);
} else {
for (i = 0; i < ARRAY_SIZE(input); i++)
@@ -158,7 +159,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
* into consideration.
*/
for (i = 0; i < 3; i++) {
- if (intel_crtc->config->limited_color_range)
+ if (intel_crtc_state->limited_color_range)
coeffs[i * 3 + i] =
I9XX_CSC_COEFF_LIMITED_RANGE;
else
@@ -182,7 +183,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
- if (intel_crtc->config->limited_color_range)
+ if (intel_crtc_state->limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
@@ -193,7 +194,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
- if (intel_crtc->config->limited_color_range)
+ if (intel_crtc_state->limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
@@ -263,7 +264,8 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
/* Loads the legacy palette/gamma unit for the CRTC. */
static void i9xx_load_luts_internal(struct drm_crtc *crtc,
- struct drm_property_blob *blob)
+ struct drm_property_blob *blob,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -272,7 +274,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
int i;
if (HAS_GMCH_DISPLAY(dev)) {
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -305,7 +307,8 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+ i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
+ to_intel_crtc_state(crtc_state));
}
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
@@ -323,7 +326,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+ if (IS_HASWELL(dev) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
reenable_ips = true;
@@ -436,7 +439,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
- i9xx_load_luts_internal(crtc, state->gamma_lut);
+ i9xx_load_luts_internal(crtc, state->gamma_lut,
+ to_intel_crtc_state(state));
return;
}
@@ -479,7 +483,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
* Also program a linear LUT in the legacy block (behind the
* CGM block).
*/
- i9xx_load_luts_internal(crtc, NULL);
+ i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
}
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 827b6ef4e9ae..dfbcf16b41df 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,13 +143,15 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
-static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+static void intel_crt_set_dpms(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int mode)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 adpa;
if (INTEL_INFO(dev)->gen >= 5)
@@ -193,23 +195,45 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
I915_WRITE(crt->adpa_reg, adpa);
}
-static void intel_disable_crt(struct intel_encoder *encoder)
+static void intel_disable_crt(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+ intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
-static void pch_disable_crt(struct intel_encoder *encoder)
+static void pch_disable_crt(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_crt(struct intel_encoder *encoder)
+static void pch_post_disable_crt(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_disable_crt(encoder);
+ intel_disable_crt(encoder, old_crtc_state, old_conn_state);
}
-static void intel_enable_crt(struct intel_encoder *encoder)
+static void hsw_post_disable_crt(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
+
+ lpt_disable_pch_transcoder(dev_priv);
+ lpt_disable_iclkip(dev_priv);
+
+ intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status
@@ -253,7 +277,8 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
@@ -894,6 +919,7 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.post_disable = hsw_post_disable_crt;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index c3b33a10c15c..1ea0e1f43397 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -32,13 +32,6 @@
* onwards to drive newly added DMC (Display microcontroller) in display
* engine to save and restore the state of display engine when it enter into
* low-power state and comes back to normal.
- *
- * Firmware loading status will be one of the below states: FW_UNINITIALIZED,
- * FW_LOADED, FW_FAILED.
- *
- * Once the firmware is written into the registers status will be moved from
- * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
- * be moved to FW_FAILED.
*/
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 1a7efac65fd5..15d47c87def6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,45 +301,34 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
{ 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
};
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type);
-
-static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
- struct intel_digital_port **dig_port,
- enum port *port)
+enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
{
- struct drm_encoder *encoder = &intel_encoder->base;
-
- switch (intel_encoder->type) {
+ switch (encoder->type) {
case INTEL_OUTPUT_DP_MST:
- *dig_port = enc_to_mst(encoder)->primary;
- *port = (*dig_port)->port;
- break;
- default:
- WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
- /* fallthrough and treat as unknown */
+ return enc_to_mst(&encoder->base)->primary->port;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_UNKNOWN:
- *dig_port = enc_to_dig_port(encoder);
- *port = (*dig_port)->port;
- break;
+ return enc_to_dig_port(&encoder->base)->port;
case INTEL_OUTPUT_ANALOG:
- *dig_port = NULL;
- *port = PORT_E;
- break;
+ return PORT_E;
+ default:
+ MISSING_CASE(encoder->type);
+ return PORT_A;
}
}
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const struct ddi_buf_trans *
+bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- struct intel_digital_port *dig_port;
- enum port port;
-
- ddi_get_encoder_port(intel_encoder, &dig_port, &port);
-
- return port;
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+ return bdw_ddi_translations_edp;
+ } else {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ }
}
static const struct ddi_buf_trans *
@@ -424,37 +413,22 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
- * values in advance. The buffer values are different for FDI and DP modes,
- * but the HDMI/DVI fields are shared among those. So we program the DDI
- * in either FDI or DP modes only, as HDMI connections will work with both
- * of those
+ * values in advance. This function programs the correct values for
+ * DP/eDP/FDI use cases.
*/
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
- size;
- int hdmi_level;
- enum port port;
+ int i, n_dp_entries, n_edp_entries, size;
+ enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
- const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- port = intel_ddi_get_encoder_port(encoder);
- hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
-
- if (IS_BROXTON(dev_priv)) {
- if (encoder->type != INTEL_OUTPUT_HDMI)
- return;
-
- /* Vswing programming for HDMI */
- bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
- INTEL_OUTPUT_HDMI);
+ if (IS_BROXTON(dev_priv))
return;
- }
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ddi_translations_fdi = NULL;
@@ -462,12 +436,10 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
ddi_translations_edp =
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
- ddi_translations_hdmi =
- skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+
/* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
- dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = 1<<31;
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
port != PORT_A && port != PORT_E &&
@@ -476,35 +448,20 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
-
- if (dev_priv->vbt.edp.low_vswing) {
- ddi_translations_edp = bdw_ddi_translations_edp;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- } else {
- ddi_translations_edp = bdw_ddi_translations_dp;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- }
-
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-
+ ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
- ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
}
switch (encoder->type) {
@@ -513,7 +470,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
size = n_edp_entries;
break;
case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
break;
@@ -531,14 +487,48 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
ddi_translations[i].trans2);
}
+}
+
+/*
+ * Starting with Haswell, DDI port buffers must be programmed with correct
+ * values in advance. This function programs the correct values for
+ * HDMI/DVI use cases.
+ */
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 iboost_bit = 0;
+ int n_hdmi_entries, hdmi_level;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ const struct ddi_buf_trans *ddi_translations_hdmi;
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (IS_BROXTON(dev_priv))
return;
+ hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+ } else if (IS_BROADWELL(dev_priv)) {
+ ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ } else if (IS_HASWELL(dev_priv)) {
+ ddi_translations_hdmi = hsw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ ddi_translations_hdmi = bdw_ddi_translations_hdmi;
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ }
+
/* Entry 9 is for HDMI: */
- I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+ I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+ I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
ddi_translations_hdmi[hdmi_level].trans2);
}
@@ -556,6 +546,27 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+ switch (pll->id) {
+ case DPLL_ID_WRPLL1:
+ return PORT_CLK_SEL_WRPLL1;
+ case DPLL_ID_WRPLL2:
+ return PORT_CLK_SEL_WRPLL2;
+ case DPLL_ID_SPLL:
+ return PORT_CLK_SEL_SPLL;
+ case DPLL_ID_LCPLL_810:
+ return PORT_CLK_SEL_LCPLL_810;
+ case DPLL_ID_LCPLL_1350:
+ return PORT_CLK_SEL_LCPLL_1350;
+ case DPLL_ID_LCPLL_2700:
+ return PORT_CLK_SEL_LCPLL_2700;
+ default:
+ MISSING_CASE(pll->id);
+ return PORT_CLK_SEL_NONE;
+ }
+}
+
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
* both the DDI port and PCH receiver for the desired DDI buffer settings.
@@ -571,11 +582,11 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- u32 temp, i, rx_ctl_val;
+ u32 temp, i, rx_ctl_val, ddi_pll_sel;
for_each_encoder_on_crtc(dev, crtc, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_ddi_buffer(encoder);
+ intel_prepare_dp_ddi_buffers(encoder);
}
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@@ -602,8 +613,9 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
- WARN_ON(intel_crtc->config->ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+ WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -880,7 +892,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
int link_clock = 0;
uint32_t dpll_ctl1, dpll;
- dpll = pipe_config->ddi_pll_sel;
+ dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
dpll_ctl1 = I915_READ(DPLL_CTRL1);
@@ -928,7 +940,7 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
int link_clock = 0;
u32 val, pll;
- val = pipe_config->ddi_pll_sel;
+ val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
@@ -1136,7 +1148,6 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
@@ -1202,29 +1213,15 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
-
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
-
} else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (intel_dp->is_mst) {
- temp |= TRANS_DDI_MODE_SELECT_DP_MST;
- } else
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
-
- if (intel_dp->is_mst) {
- temp |= TRANS_DDI_MODE_SELECT_DP_MST;
- } else
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
-
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
@@ -1611,13 +1608,15 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
}
void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
+ if (WARN_ON(!pll))
+ return;
+
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val;
/* DDI -> PLL mapping */
@@ -1625,65 +1624,91 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
- val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
+ val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
I915_WRITE(DPLL_CTRL2, val);
} else if (INTEL_INFO(dev_priv)->gen < 9) {
- WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
}
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ int link_rate, uint32_t lane_count,
+ struct intel_shared_dpll *pll,
+ bool link_mst)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
-
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- }
-
- intel_prepare_ddi_buffer(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_dp_set_link_params(intel_dp, link_rate, lane_count,
+ link_mst);
+ if (encoder->type == INTEL_OUTPUT_EDP)
intel_edp_panel_on(intel_dp);
- }
-
- intel_ddi_clk_select(intel_encoder, crtc->config);
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_ddi_clk_select(encoder, pll);
+ intel_prepare_dp_ddi_buffers(encoder);
+ intel_ddi_init_dp_buf_reg(encoder);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_start_link_train(intel_dp);
+ if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+ intel_dp_stop_link_train(intel_dp);
+}
- intel_dp_set_link_params(intel_dp, crtc->config);
+static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
+ bool has_hdmi_sink,
+ struct drm_display_mode *adjusted_mode,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_encoder *drm_encoder = &encoder->base;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ int level = intel_ddi_hdmi_level(dev_priv, port);
- intel_ddi_init_dp_buf_reg(intel_encoder);
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+ intel_ddi_clk_select(encoder, pll);
+ intel_prepare_hdmi_ddi_buffers(encoder);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(encoder, level);
+ else if (IS_BROXTON(dev_priv))
+ bxt_ddi_vswing_sequence(dev_priv, level, port,
+ INTEL_OUTPUT_HDMI);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
- intel_dp_stop_link_train(intel_dp);
- } else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- int level = intel_ddi_hdmi_level(dev_priv, port);
+ intel_hdmi->set_infoframes(drm_encoder,
+ has_hdmi_sink,
+ adjusted_mode);
+}
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_ddi_set_iboost(intel_encoder, level);
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ int type = intel_encoder->type;
- intel_hdmi->set_infoframes(encoder,
- crtc->config->has_hdmi_sink,
- &crtc->config->base.adjusted_mode);
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
+ intel_ddi_pre_enable_dp(intel_encoder,
+ crtc->config->port_clock,
+ crtc->config->lane_count,
+ crtc->config->shared_dpll,
+ intel_crtc_has_type(crtc->config,
+ INTEL_OUTPUT_DP_MST));
+ }
+ if (type == INTEL_OUTPUT_HDMI) {
+ intel_ddi_pre_enable_hdmi(intel_encoder,
+ crtc->config->has_hdmi_sink,
+ &crtc->config->base.adjusted_mode,
+ crtc->config->shared_dpll);
}
}
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
@@ -1693,6 +1718,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
uint32_t val;
bool wait = false;
+ /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@@ -1728,7 +1755,42 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
}
}
-static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ uint32_t val;
+
+ /*
+ * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+ * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+ * step 13 is the correct place for it. Step 18 is where it was
+ * originally before the BUN.
+ */
+ val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_ENABLE;
+ I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+ intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+
+ val = I915_READ(FDI_RX_MISC(PIPE_A));
+ val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+ val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+ I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+
+ val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_PCDCLK;
+ I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+
+ val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
@@ -1757,7 +1819,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_edp_backlight_on(intel_dp);
intel_psr_enable(intel_dp);
- intel_edp_drrs_enable(intel_dp);
+ intel_edp_drrs_enable(intel_dp, pipe_config);
}
if (intel_crtc->config->has_audio) {
@@ -1766,7 +1828,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
}
}
-static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+static void intel_disable_ddi(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
@@ -1783,7 +1847,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- intel_edp_drrs_disable(intel_dp);
+ intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
@@ -2072,7 +2136,9 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
}
}
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2144,7 +2210,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (intel_dp->is_mst)
+ if (intel_dp->link_mst)
val |= DP_TP_CTL_MODE_MST;
else {
val |= DP_TP_CTL_MODE_SST;
@@ -2161,38 +2227,6 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
-void intel_ddi_fdi_disable(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- uint32_t val;
-
- /*
- * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
- * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
- * step 13 is the correct place for it. Step 18 is where it was
- * originally before the BUN.
- */
- val = I915_READ(FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
- intel_ddi_post_disable(intel_encoder);
-
- val = I915_READ(FDI_RX_MISC(PIPE_A));
- val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), val);
-
- val = I915_READ(FDI_RX_CTL(PIPE_A));
- val &= ~FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-
- val = I915_READ(FDI_RX_CTL(PIPE_A));
- val &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
-}
-
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2292,7 +2326,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int type = encoder->type;
@@ -2305,9 +2340,9 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (type == INTEL_OUTPUT_HDMI)
- ret = intel_hdmi_compute_config(encoder, pipe_config);
+ ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
- ret = intel_dp_compute_config(encoder, pipe_config);
+ ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (IS_BROXTON(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
@@ -2358,6 +2393,45 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
+struct intel_shared_dpll *
+intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
+{
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = connector->encoder;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_shared_dpll *pll = NULL;
+ struct intel_shared_dpll_config tmp_pll_config;
+ enum intel_dpll_id dpll_id;
+
+ if (IS_BROXTON(dev_priv)) {
+ dpll_id = (enum intel_dpll_id)dig_port->port;
+ /*
+ * Select the required PLL. This works for platforms where
+ * there is no shared DPLL.
+ */
+ pll = &dev_priv->shared_dplls[dpll_id];
+ if (WARN_ON(pll->active_mask)) {
+
+ DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
+ pll->active_mask);
+ return NULL;
+ }
+ tmp_pll_config = pll->config;
+ if (!bxt_ddi_dp_set_dpll_hw_state(clock,
+ &pll->config.hw_state)) {
+ DRM_ERROR("Could not setup DPLL\n");
+ pll->config = tmp_pll_config;
+ return NULL;
+ }
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ pll = skl_find_link_pll(dev_priv, clock);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ pll = hsw_ddi_dp_get_dpll(encoder, clock);
+ }
+ return pll;
+}
+
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index cba137f9ad3e..1b20e160bc1f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -46,71 +46,70 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
u32 fuse, eu_dis;
fuse = I915_READ(CHV_FUSE_GT);
- info->slice_total = 1;
+ sseu->slice_mask = BIT(0);
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- info->subslice_per_slice++;
+ sseu->subslice_mask |= BIT(0);
eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
CHV_FGT_EU_DIS_SS0_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
+ sseu->eu_total += 8 - hweight32(eu_dis);
}
if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- info->subslice_per_slice++;
+ sseu->subslice_mask |= BIT(1);
eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
+ sseu->eu_total += 8 - hweight32(eu_dis);
}
- info->subslice_total = info->subslice_per_slice;
/*
* CHV expected to always have a uniform distribution of EU
* across subslices.
*/
- info->eu_per_subslice = info->subslice_total ?
- info->eu_total / info->subslice_total :
+ sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_total / sseu_subslice_total(sseu) :
0;
/*
* CHV supports subslice power gating on devices with more than
* one subslice, and supports EU power gating on devices with
* more than one EU pair per subslice.
*/
- info->has_slice_pg = 0;
- info->has_subslice_pg = (info->subslice_total > 1);
- info->has_eu_pg = (info->eu_per_subslice > 2);
+ sseu->has_slice_pg = 0;
+ sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
}
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct sseu_dev_info *sseu = &info->sseu;
int s_max = 3, ss_max = 4, eu_max = 8;
int s, ss;
- u32 fuse2, s_enable, ss_disable, eu_disable;
+ u32 fuse2, eu_disable;
u8 eu_mask = 0xff;
fuse2 = I915_READ(GEN8_FUSE2);
- s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- info->slice_total = hweight32(s_enable);
/*
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- info->subslice_per_slice = ss_max - hweight32(ss_disable);
- info->subslice_total = info->slice_total * info->subslice_per_slice;
+ sseu->subslice_mask = (1 << ss_max) - 1;
+ sseu->subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT);
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
for (s = 0; s < s_max; s++) {
- if (!(s_enable & BIT(s)))
+ if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
@@ -118,7 +117,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
for (ss = 0; ss < ss_max; ss++) {
int eu_per_ss;
- if (ss_disable & BIT(ss))
+ if (!(sseu->subslice_mask & BIT(ss)))
/* skip disabled subslice */
continue;
@@ -131,9 +130,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices if they are unbalanced.
*/
if (eu_per_ss == 7)
- info->subslice_7eu[s] |= BIT(ss);
+ sseu->subslice_7eu[s] |= BIT(ss);
- info->eu_total += eu_per_ss;
+ sseu->eu_total += eu_per_ss;
}
}
@@ -144,9 +143,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/
- info->eu_per_subslice = info->subslice_total ?
- DIV_ROUND_UP(info->eu_total,
- info->subslice_total) : 0;
+ sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total,
+ sseu_subslice_total(sseu)) : 0;
/*
* SKL supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
@@ -155,15 +154,15 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* supports EU power gating on devices with more than one EU
* pair per subslice.
*/
- info->has_slice_pg =
+ sseu->has_slice_pg =
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- info->slice_total > 1;
- info->has_subslice_pg =
- IS_BROXTON(dev_priv) && info->subslice_total > 1;
- info->has_eu_pg = info->eu_per_subslice > 2;
+ hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg =
+ IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+ sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_BROXTON(dev_priv)) {
-#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
+#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
/*
* There is a HW issue in 2x6 fused down parts that requires
* Pooled EU to be enabled as a WA. The pool configuration
@@ -171,19 +170,18 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* doesn't affect if the device has all 3 subslices enabled.
*/
/* WaEnablePooledEuFor2x6:bxt */
- info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
- (info->subslice_per_slice == 2 &&
+ info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
+ (hweight8(sseu->subslice_mask) == 2 &&
INTEL_REVID(dev_priv) < BXT_REVID_C0));
- info->min_eu_in_pool = 0;
+ sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
- if (IS_SS_DISABLED(ss_disable, 0) ||
- IS_SS_DISABLED(ss_disable, 2))
- info->min_eu_in_pool = 3;
- else if (IS_SS_DISABLED(ss_disable, 1))
- info->min_eu_in_pool = 6;
+ if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
+ sseu->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(1))
+ sseu->min_eu_in_pool = 6;
else
- info->min_eu_in_pool = 9;
+ sseu->min_eu_in_pool = 9;
}
#undef IS_SS_DISABLED
}
@@ -191,14 +189,20 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
const int s_max = 3, ss_max = 3, eu_max = 8;
int s, ss;
- u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+ u32 fuse2, eu_disable[3]; /* s_max */
fuse2 = I915_READ(GEN8_FUSE2);
- s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+ sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ sseu->subslice_mask = BIT(ss_max) - 1;
+ sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
+ GEN8_F2_SS_DIS_SHIFT);
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
@@ -208,28 +212,19 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
(32 - GEN8_EU_DIS1_S2_SHIFT));
- info->slice_total = hweight32(s_enable);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- info->subslice_per_slice = ss_max - hweight32(ss_disable);
- info->subslice_total = info->slice_total * info->subslice_per_slice;
-
/*
* Iterate through enabled slices and subslices to
* count the total enabled EU.
*/
for (s = 0; s < s_max; s++) {
- if (!(s_enable & (0x1 << s)))
+ if (!(sseu->slice_mask & BIT(s)))
/* skip disabled slice */
continue;
for (ss = 0; ss < ss_max; ss++) {
u32 n_disabled;
- if (ss_disable & (0x1 << ss))
+ if (!(sseu->subslice_mask & BIT(ss)))
/* skip disabled subslice */
continue;
@@ -239,9 +234,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* Record which subslices have 7 EUs.
*/
if (eu_max - n_disabled == 7)
- info->subslice_7eu[s] |= 1 << ss;
+ sseu->subslice_7eu[s] |= 1 << ss;
- info->eu_total += eu_max - n_disabled;
+ sseu->eu_total += eu_max - n_disabled;
}
}
@@ -250,16 +245,17 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery.
*/
- info->eu_per_subslice = info->subslice_total ?
- DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+ sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total,
+ sseu_subslice_total(sseu)) : 0;
/*
* BDW supports slice power gating on devices with more than
* one slice.
*/
- info->has_slice_pg = (info->slice_total > 1);
- info->has_subslice_pg = 0;
- info->has_eu_pg = 0;
+ sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
+ sseu->has_subslice_pg = 0;
+ sseu->has_eu_pg = 0;
}
/*
@@ -374,15 +370,19 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
info->has_snoop = false;
- DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
- DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
- DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
+ DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
+ DRM_DEBUG_DRIVER("subslice total: %u\n",
+ sseu_subslice_total(&info->sseu));
+ DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n",
+ hweight8(info->sseu.subslice_mask));
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->has_slice_pg ? "y" : "n");
+ info->sseu.has_slice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->has_subslice_pg ? "y" : "n");
+ info->sseu.has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->has_eu_pg ? "y" : "n");
+ info->sseu.has_eu_pg ? "y" : "n");
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 175595fc3e45..81c11499bcf0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -34,6 +34,7 @@
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_dmabuf.h"
@@ -1201,8 +1202,8 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
if (HAS_PCH_SPLIT(dev)) {
u32 port_sel;
- pp_reg = PCH_PP_CONTROL;
- port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(0);
+ port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
if (port_sel == PANEL_PORT_SELECT_LVDS &&
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
@@ -1210,10 +1211,10 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
/* XXX: else fix for eDP */
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/* presumably write lock depends on pipe, not port select */
- pp_reg = VLV_PIPE_PP_CONTROL(pipe);
+ pp_reg = PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
- pp_reg = PP_CONTROL;
+ pp_reg = PP_CONTROL(0);
if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
}
@@ -1906,7 +1907,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
}
}
-static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1958,12 +1959,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH_DISPLAY(dev_priv))
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
- else {
+ } else {
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
@@ -2146,33 +2147,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
}
}
-static void
-intel_fill_fb_info(struct drm_i915_private *dev_priv,
- struct drm_framebuffer *fb)
-{
- struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
- unsigned int tile_size, tile_width, tile_height, cpp;
-
- tile_size = intel_tile_size(dev_priv);
-
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[0], cpp);
-
- info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
- info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
-
- if (info->pixel_format == DRM_FORMAT_NV12) {
- cpp = drm_format_plane_cpp(fb->pixel_format, 1);
- intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier[1], cpp);
-
- info->uv_offset = fb->offsets[1];
- info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
- info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
- }
-}
-
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2205,16 +2179,15 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv
}
}
-int
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation)
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
+ struct i915_vma *vma;
u32 alignment;
- int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -2239,75 +2212,112 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/
intel_runtime_pm_get(dev_priv);
- ret = i915_gem_object_pin_to_display_plane(obj, alignment,
- &view);
- if (ret)
- goto err_pm;
-
- /* Install a fence for tiled scan-out. Pre-i965 always needs a
- * fence, whereas 965+ only requires a fence if using
- * framebuffer compression. For simplicity, we always install
- * a fence as the cost is not that onerous.
- */
- if (view.type == I915_GGTT_VIEW_NORMAL) {
- ret = i915_gem_object_get_fence(obj);
- if (ret == -EDEADLK) {
- /*
- * -EDEADLK means there are no free fences
- * no pending flips.
- *
- * This is propagated to atomic, but it uses
- * -EDEADLK to force a locking recovery, so
- * change the returned error to -EBUSY.
- */
- ret = -EBUSY;
- goto err_unpin;
- } else if (ret)
- goto err_unpin;
+ vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
+ if (IS_ERR(vma))
+ goto err;
- i915_gem_object_pin_fence(obj);
+ if (i915_vma_is_map_and_fenceable(vma)) {
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always, when
+ * possible, install a fence as the cost is not that onerous.
+ *
+ * If we fail to fence the tiled scanout, then either the
+ * modeset will reject the change (which is highly unlikely as
+ * the affected systems, all but one, do not have unmappable
+ * space) or we will not be able to enable full powersaving
+ * techniques (also likely not to apply due to various limits
+ * FBC and the like impose on the size of the buffer, which
+ * presumably we violated anyway with this unmappable buffer).
+ * Anyway, it is presumably better to stumble onwards with
+ * something and try to run the system in a "less than optimal"
+ * mode that matches the user configuration.
+ */
+ if (i915_vma_get_fence(vma) == 0)
+ i915_vma_pin_fence(vma);
}
+err:
intel_runtime_pm_put(dev_priv);
- return 0;
-
-err_unpin:
- i915_gem_object_unpin_from_display_plane(obj, &view);
-err_pm:
- intel_runtime_pm_put(dev_priv);
- return ret;
+ return vma;
}
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
+ struct i915_vma *vma;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
intel_fill_fb_ggtt_view(&view, fb, rotation);
+ vma = i915_gem_object_to_ggtt(obj, &view);
- if (view.type == I915_GGTT_VIEW_NORMAL)
- i915_gem_object_unpin_fence(obj);
+ i915_vma_unpin_fence(vma);
+ i915_gem_object_unpin_from_display_plane(vma);
+}
- i915_gem_object_unpin_from_display_plane(obj, &view);
+static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
+ unsigned int rotation)
+{
+ if (intel_rotation_90_or_270(rotation))
+ return to_intel_framebuffer(fb)->rotated[plane].pitch;
+ else
+ return fb->pitches[plane];
+}
+
+/*
+ * Convert the x/y offsets into a linear offset.
+ * Only valid with 0/180 degree rotation, which is fine since linear
+ * offset is only used with linear buffers on pre-hsw and tiled buffers
+ * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
+ */
+u32 intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane)
+{
+ const struct drm_framebuffer *fb = state->base.fb;
+ unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int pitch = fb->pitches[plane];
+
+ return y * pitch + x * cpp;
+}
+
+/*
+ * Add the x/y offsets derived from fb->offsets[] to the user
+ * specified plane src x/y offsets. The resulting x/y offsets
+ * specify the start of scanout from the beginning of the gtt mapping.
+ */
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state,
+ int plane)
+
+{
+ const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
+ unsigned int rotation = state->base.rotation;
+
+ if (intel_rotation_90_or_270(rotation)) {
+ *x += intel_fb->rotated[plane].x;
+ *y += intel_fb->rotated[plane].y;
+ } else {
+ *x += intel_fb->normal[plane].x;
+ *y += intel_fb->normal[plane].y;
+ }
}
/*
- * Adjust the tile offset by moving the difference into
- * the x/y offsets.
- *
* Input tile dimensions and pitch must already be
* rotated to match x and y, and in pixel units.
*/
-static u32 intel_adjust_tile_offset(int *x, int *y,
- unsigned int tile_width,
- unsigned int tile_height,
- unsigned int tile_size,
- unsigned int pitch_tiles,
- u32 old_offset,
- u32 new_offset)
-{
+static u32 _intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
+{
+ unsigned int pitch_pixels = pitch_tiles * tile_width;
unsigned int tiles;
WARN_ON(old_offset & (tile_size - 1));
@@ -2319,6 +2329,54 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
*y += tiles / pitch_tiles * tile_height;
*x += tiles % pitch_tiles * tile_width;
+ /* minimize x in case it got needlessly big */
+ *y += *x / pitch_pixels * tile_height;
+ *x %= pitch_pixels;
+
+ return new_offset;
+}
+
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ const struct intel_plane_state *state, int plane,
+ u32 old_offset, u32 new_offset)
+{
+ const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+ const struct drm_framebuffer *fb = state->base.fb;
+ unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int rotation = state->base.rotation;
+ unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
+
+ WARN_ON(new_offset > old_offset);
+
+ if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) {
+ unsigned int tile_size, tile_width, tile_height;
+ unsigned int pitch_tiles;
+
+ tile_size = intel_tile_size(dev_priv);
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[plane], cpp);
+
+ if (intel_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
+
+ _intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ old_offset, new_offset);
+ } else {
+ old_offset += *y * pitch + *x * cpp;
+
+ *y = (old_offset - new_offset) / pitch;
+ *x = ((old_offset - new_offset) - *y * pitch) / cpp;
+ }
+
return new_offset;
}
@@ -2329,18 +2387,24 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
* In the 90/270 rotated case, x and y are assumed
* to be already rotated to match the rotated GTT view, and
* pitch is the tile_height aligned framebuffer height.
+ *
+ * This function is used when computing the derived information
+ * under intel_framebuffer, so using any of that information
+ * here is not allowed. Anything under drm_framebuffer can be
+ * used. This is why the user has to pass in the pitch since it
+ * is specified in the rotated orientation.
*/
-u32 intel_compute_tile_offset(int *x, int *y,
- const struct drm_framebuffer *fb, int plane,
- unsigned int pitch,
- unsigned int rotation)
+static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
+ int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int pitch,
+ unsigned int rotation,
+ u32 alignment)
{
- const struct drm_i915_private *dev_priv = to_i915(fb->dev);
uint64_t fb_modifier = fb->modifier[plane];
unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
- u32 offset, offset_aligned, alignment;
+ u32 offset, offset_aligned;
- alignment = intel_surf_alignment(dev_priv, fb_modifier);
if (alignment)
alignment--;
@@ -2368,9 +2432,9 @@ u32 intel_compute_tile_offset(int *x, int *y,
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
offset_aligned = offset & ~alignment;
- intel_adjust_tile_offset(x, y, tile_width, tile_height,
- tile_size, pitch_tiles,
- offset, offset_aligned);
+ _intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
} else {
offset = *y * pitch + *x * cpp;
offset_aligned = offset & ~alignment;
@@ -2382,6 +2446,177 @@ u32 intel_compute_tile_offset(int *x, int *y,
return offset_aligned;
}
+u32 intel_compute_tile_offset(int *x, int *y,
+ const struct intel_plane_state *state,
+ int plane)
+{
+ const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+ const struct drm_framebuffer *fb = state->base.fb;
+ unsigned int rotation = state->base.rotation;
+ int pitch = intel_fb_pitch(fb, plane, rotation);
+ u32 alignment;
+
+ /* AUX_DIST needs only 4K alignment */
+ if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
+ alignment = 4096;
+ else
+ alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]);
+
+ return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
+ rotation, alignment);
+}
+
+/* Convert the fb->offset[] linear offset into x/y offsets */
+static void intel_fb_offset_to_xy(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int pitch = fb->pitches[plane];
+ u32 linear_offset = fb->offsets[plane];
+
+ *y = linear_offset / pitch;
+ *x = linear_offset % pitch / cpp;
+}
+
+static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_X_TILED:
+ return I915_TILING_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ return I915_TILING_Y;
+ default:
+ return I915_TILING_NONE;
+ }
+}
+
+static int
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+ struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ u32 gtt_offset_rotated = 0;
+ unsigned int max_size = 0;
+ uint32_t format = fb->pixel_format;
+ int i, num_planes = drm_format_num_planes(format);
+ unsigned int tile_size = intel_tile_size(dev_priv);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width, height;
+ unsigned int cpp, size;
+ u32 offset;
+ int x, y;
+
+ cpp = drm_format_plane_cpp(format, i);
+ width = drm_format_plane_width(fb->width, format, i);
+ height = drm_format_plane_height(fb->height, format, i);
+
+ intel_fb_offset_to_xy(&x, &y, fb, i);
+
+ /*
+ * The fence (if used) is aligned to the start of the object
+ * so having the framebuffer wrap around across the edge of the
+ * fenced region doesn't really work. We have no API to configure
+ * the fence start offset within the object (nor could we probably
+ * on gen2/3). So it's just easier if we just require that the
+ * fb layout agrees with the fence layout. We already check that the
+ * fb stride matches the fence stride elsewhere.
+ */
+ if (i915_gem_object_is_tiled(intel_fb->obj) &&
+ (x + width) * cpp > fb->pitches[i]) {
+ DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
+ return -EINVAL;
+ }
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the normal gtt mapping.
+ */
+ intel_fb->normal[i].x = x;
+ intel_fb->normal[i].y = y;
+
+ offset = _intel_compute_tile_offset(dev_priv, &x, &y,
+ fb, 0, fb->pitches[i],
+ DRM_ROTATE_0, tile_size);
+ offset /= tile_size;
+
+ if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) {
+ unsigned int tile_width, tile_height;
+ unsigned int pitch_tiles;
+ struct drm_rect r;
+
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[i], cpp);
+
+ rot_info->plane[i].offset = offset;
+ rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
+ rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
+ rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
+
+ intel_fb->rotated[i].pitch =
+ rot_info->plane[i].height * tile_height;
+
+ /* how many tiles does this plane need */
+ size = rot_info->plane[i].stride * rot_info->plane[i].height;
+ /*
+ * If the plane isn't horizontally tile aligned,
+ * we need one more tile.
+ */
+ if (x != 0)
+ size++;
+
+ /* rotate the x/y offsets to match the GTT view */
+ r.x1 = x;
+ r.y1 = y;
+ r.x2 = x + width;
+ r.y2 = y + height;
+ drm_rect_rotate(&r,
+ rot_info->plane[i].width * tile_width,
+ rot_info->plane[i].height * tile_height,
+ DRM_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ /* rotate the tile dimensions to match the GTT view */
+ pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
+ swap(tile_width, tile_height);
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ _intel_adjust_tile_offset(&x, &y, tile_size,
+ tile_width, tile_height, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
+
+ gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the rotated gtt mapping.
+ */
+ intel_fb->rotated[i].x = x;
+ intel_fb->rotated[i].y = y;
+ } else {
+ size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
+ x * cpp, tile_size);
+ }
+
+ /* how many tiles in total needed in the bo */
+ max_size = max(max_size, offset + size);
+ }
+
+ if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
+ DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
+ max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int i9xx_format_to_fourcc(int format)
{
switch (format) {
@@ -2465,9 +2700,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
}
- obj->tiling_mode = plane_config->tiling;
- if (obj->tiling_mode == I915_TILING_X)
- obj->stride = fb->pitches[0];
+ if (plane_config->tiling == I915_TILING_X)
+ obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
mode_cmd.pixel_format = fb->pixel_format;
mode_cmd.width = fb->width;
@@ -2488,7 +2722,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return true;
out_unref_obj:
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex);
return false;
}
@@ -2552,7 +2786,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
obj = intel_fb_obj(fb);
- if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
+ if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
drm_framebuffer_reference(fb);
goto valid_fb;
}
@@ -2565,7 +2799,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- to_intel_plane_state(plane_state)->visible = false;
+ to_intel_plane_state(plane_state)->base.visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2583,24 +2817,189 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->src.x1 = plane_state->src_x;
- intel_state->src.y1 = plane_state->src_y;
- intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
- intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
- intel_state->dst.x1 = plane_state->crtc_x;
- intel_state->dst.y1 = plane_state->crtc_y;
- intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
- intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+ intel_state->base.src.x1 = plane_state->src_x;
+ intel_state->base.src.y1 = plane_state->src_y;
+ intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w;
+ intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h;
+ intel_state->base.dst.x1 = plane_state->crtc_x;
+ intel_state->base.dst.y1 = plane_state->crtc_y;
+ intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+ intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
obj = intel_fb_obj(fb);
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
- obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+ atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+ &obj->frontbuffer_bits);
+}
+
+static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
+ unsigned int rotation)
+{
+ int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+ switch (fb->modifier[plane]) {
+ case DRM_FORMAT_MOD_NONE:
+ case I915_FORMAT_MOD_X_TILED:
+ switch (cpp) {
+ case 8:
+ return 4096;
+ case 4:
+ case 2:
+ case 1:
+ return 8192;
+ default:
+ MISSING_CASE(cpp);
+ break;
+ }
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ switch (cpp) {
+ case 8:
+ return 2048;
+ case 4:
+ return 4096;
+ case 2:
+ case 1:
+ return 8192;
+ default:
+ MISSING_CASE(cpp);
+ break;
+ }
+ break;
+ default:
+ MISSING_CASE(fb->modifier[plane]);
+ }
+
+ return 2048;
+}
+
+static int skl_check_main_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int x = plane_state->base.src.x1 >> 16;
+ int y = plane_state->base.src.y1 >> 16;
+ int w = drm_rect_width(&plane_state->base.src) >> 16;
+ int h = drm_rect_height(&plane_state->base.src) >> 16;
+ int max_width = skl_max_plane_width(fb, 0, rotation);
+ int max_height = 4096;
+ u32 alignment, offset, aux_offset = plane_state->aux.offset;
+
+ if (w > max_width || h > max_height) {
+ DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
+ offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+ alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
+
+ /*
+ * AUX surface offset is specified as the distance from the
+ * main surface offset, and it must be non-negative. Make
+ * sure that is what we will get.
+ */
+ if (offset > aux_offset)
+ offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+ offset, aux_offset & ~(alignment - 1));
+
+ /*
+ * When using an X-tiled surface, the plane blows up
+ * if the x offset + width exceed the stride.
+ *
+ * TODO: linear and Y-tiled seem fine, Yf untested,
+ */
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ while ((x + w) * cpp > fb->pitches[0]) {
+ if (offset == 0) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
+ return -EINVAL;
+ }
+
+ offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+ }
+
+ plane_state->main.offset = offset;
+ plane_state->main.x = x;
+ plane_state->main.y = y;
+
+ return 0;
+}
+
+static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int max_width = skl_max_plane_width(fb, 1, rotation);
+ int max_height = 4096;
+ int x = plane_state->base.src.x1 >> 17;
+ int y = plane_state->base.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->base.src) >> 17;
+ int h = drm_rect_height(&plane_state->base.src) >> 17;
+ u32 offset;
+
+ intel_add_fb_offsets(&x, &y, plane_state, 1);
+ offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+
+ /* FIXME not quite sure how/if these apply to the chroma plane */
+ if (w > max_width || h > max_height) {
+ DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
+ return -EINVAL;
+ }
+
+ plane_state->aux.offset = offset;
+ plane_state->aux.x = x;
+ plane_state->aux.y = y;
+
+ return 0;
+}
+
+int skl_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int ret;
+
+ /* Rotate src coordinates to match rotated GTT view */
+ if (intel_rotation_90_or_270(rotation))
+ drm_rect_rotate(&plane_state->base.src,
+ fb->width << 16, fb->height << 16,
+ DRM_ROTATE_270);
+
+ /*
+ * Handle the AUX surface first since
+ * the main surface setup depends on it.
+ */
+ if (fb->pixel_format == DRM_FORMAT_NV12) {
+ ret = skl_check_nv12_aux_surface(plane_state);
+ if (ret)
+ return ret;
+ } else {
+ plane_state->aux.offset = ~0xfff;
+ plane_state->aux.x = 0;
+ plane_state->aux.y = 0;
+ }
+
+ ret = skl_check_main_surface(plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void i9xx_update_primary_plane(struct drm_plane *primary,
@@ -2617,9 +3016,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
unsigned int rotation = plane_state->base.rotation;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- int x = plane_state->src.x1 >> 16;
- int y = plane_state->src.y1 >> 16;
+ int x = plane_state->base.src.x1 >> 16;
+ int y = plane_state->base.src.y1 >> 16;
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -2670,37 +3068,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
BUG();
}
- if (INTEL_INFO(dev)->gen >= 4 &&
- obj->tiling_mode != I915_TILING_NONE)
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- linear_offset = y * fb->pitches[0] + x * cpp;
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_INFO(dev)->gen >= 4)
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, fb, 0,
- fb->pitches[0], rotation);
- linear_offset -= intel_crtc->dspaddr_offset;
- } else {
- intel_crtc->dspaddr_offset = linear_offset;
- }
+ intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == DRM_ROTATE_180) {
dspcntr |= DISPPLANE_ROTATE_180;
x += (crtc_state->pipe_src_w - 1);
y += (crtc_state->pipe_src_h - 1);
-
- /* Finding the last pixel of the last line of the display
- data and adding to linear_offset*/
- linear_offset +=
- (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
- (crtc_state->pipe_src_w - 1) * cpp;
}
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
+ if (INTEL_INFO(dev)->gen < 4)
+ intel_crtc->dspaddr_offset = linear_offset;
+
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
@@ -2709,11 +3101,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset);
POSTING_READ(reg);
}
@@ -2741,15 +3134,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
unsigned int rotation = plane_state->base.rotation;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- int x = plane_state->src.x1 >> 16;
- int y = plane_state->src.y1 >> 16;
+ int x = plane_state->base.src.x1 >> 16;
+ int y = plane_state->base.src.y1 >> 16;
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
@@ -2780,32 +3171,28 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
BUG();
}
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
- linear_offset = y * fb->pitches[0] + x * cpp;
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
+
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, fb, 0,
- fb->pitches[0], rotation);
- linear_offset -= intel_crtc->dspaddr_offset;
- if (rotation == BIT(DRM_ROTATE_180)) {
+ intel_compute_tile_offset(&x, &y, plane_state, 0);
+
+ if (rotation == DRM_ROTATE_180) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += (crtc_state->pipe_src_w - 1);
y += (crtc_state->pipe_src_h - 1);
-
- /* Finding the last pixel of the last line of the display
- data and adding to linear_offset*/
- linear_offset +=
- (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
- (crtc_state->pipe_src_w - 1) * cpp;
}
}
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
@@ -2813,7 +3200,8 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
- i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
+ intel_fb_gtt_offset(fb, rotation) +
+ intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2835,32 +3223,21 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
}
}
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
- struct drm_i915_gem_object *obj,
- unsigned int plane)
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
+ unsigned int rotation)
{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
struct i915_vma *vma;
- u64 offset;
- intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
- intel_plane->base.state->rotation);
+ intel_fill_fb_ggtt_view(&view, fb, rotation);
- vma = i915_gem_obj_to_ggtt_view(obj, &view);
+ vma = i915_gem_object_to_ggtt(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
- view.type))
+ view.type))
return -1;
- offset = vma->node.start;
-
- if (plane == 1) {
- offset += vma->ggtt_view.params.rotated.uv_start_page *
- PAGE_SIZE;
- }
-
- WARN_ON(upper_32_bits(offset));
-
- return lower_32_bits(offset);
+ return i915_ggtt_offset(vma);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -2890,6 +3267,28 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+ unsigned int rotation)
+{
+ const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 stride = intel_fb_pitch(fb, plane, rotation);
+
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (intel_rotation_90_or_270(rotation)) {
+ int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+
+ stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
+ } else {
+ stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+ fb->pixel_format);
+ }
+
+ return stride;
+}
+
u32 skl_plane_ctl_format(uint32_t pixel_format)
{
switch (pixel_format) {
@@ -2952,17 +3351,17 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
break;
/*
* DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
* while i915 HW rotation is clockwise, thats why this swapping.
*/
- case BIT(DRM_ROTATE_90):
+ case DRM_ROTATE_90:
return PLANE_CTL_ROTATE_270;
- case BIT(DRM_ROTATE_180):
+ case DRM_ROTATE_180:
return PLANE_CTL_ROTATE_180;
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
MISSING_CASE(rotation);
@@ -2979,22 +3378,21 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride_div, stride;
- u32 tile_height, plane_offset, plane_size;
+ u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
- int x_offset, y_offset;
- u32 surf_addr;
+ u32 stride = skl_plane_stride(fb, 0, rotation);
+ u32 surf_addr = plane_state->main.offset;
int scaler_id = plane_state->scaler_id;
- int src_x = plane_state->src.x1 >> 16;
- int src_y = plane_state->src.y1 >> 16;
- int src_w = drm_rect_width(&plane_state->src) >> 16;
- int src_h = drm_rect_height(&plane_state->src) >> 16;
- int dst_x = plane_state->dst.x1;
- int dst_y = plane_state->dst.y1;
- int dst_w = drm_rect_width(&plane_state->dst);
- int dst_h = drm_rect_height(&plane_state->dst);
+ int src_x = plane_state->main.x;
+ int src_y = plane_state->main.y;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ int dst_x = plane_state->base.dst.x1;
+ int dst_y = plane_state->base.dst.y1;
+ int dst_w = drm_rect_width(&plane_state->base.dst);
+ int dst_h = drm_rect_height(&plane_state->base.dst);
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3005,36 +3403,24 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
- stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
- fb->pixel_format);
- surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ dst_w--;
+ dst_h--;
- WARN_ON(drm_rect_width(&plane_state->src) == 0);
+ intel_crtc->dspaddr_offset = surf_addr;
- if (intel_rotation_90_or_270(rotation)) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ intel_crtc->adjusted_x = src_x;
+ intel_crtc->adjusted_y = src_y;
- /* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
- stride = DIV_ROUND_UP(fb->height, tile_height);
- x_offset = stride * tile_height - src_y - src_h;
- y_offset = src_x;
- plane_size = (src_w - 1) << 16 | (src_h - 1);
- } else {
- stride = fb->pitches[0] / stride_div;
- x_offset = src_x;
- y_offset = src_y;
- plane_size = (src_h - 1) << 16 | (src_w - 1);
- }
- plane_offset = y_offset << 16 | x_offset;
-
- intel_crtc->adjusted_x = x_offset;
- intel_crtc->adjusted_y = y_offset;
+ if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+ skl_write_plane_wm(intel_crtc, wm, 0);
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
- I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+ I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+ I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
@@ -3051,7 +3437,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
}
- I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
+ I915_WRITE(PLANE_SURF(pipe, 0),
+ intel_fb_gtt_offset(fb, rotation) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -3061,7 +3448,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * We only populate skl_results on watermark updates, and if the
+ * plane's visiblity isn't actually changing neither is its watermarks.
+ */
+ if (!crtc->primary->state->visible)
+ skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -3096,7 +3491,7 @@ static void intel_update_primary_planes(struct drm_device *dev)
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->visible)
+ if (plane_state->base.visible)
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
@@ -3135,6 +3530,12 @@ __intel_display_resume(struct drm_device *dev,
return ret;
}
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+ return intel_has_gpu_reset(dev_priv) &&
+ INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+}
+
void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -3142,10 +3543,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state;
int ret;
- /* no reset support for gen2 */
- if (IS_GEN2(dev_priv))
- return;
-
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
@@ -3161,7 +3558,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
}
/* reset doesn't touch the display, but flips might get nuked anyway, */
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
return;
/*
@@ -3204,24 +3602,28 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
*/
intel_complete_page_flips(dev_priv);
- /* no reset support for gen2 */
- if (IS_GEN2(dev_priv))
- return;
+ dev_priv->modeset_restore_state = NULL;
dev_priv->modeset_restore_state = NULL;
/* reset doesn't touch the display */
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
- /*
- * Flips in the rings have been nuked by the reset,
- * so update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * FIXME: Atomic will make this obsolete since we won't schedule
- * CS-based flips (which might get lost in gpu resets) any more.
- */
- intel_update_primary_planes(dev);
+ if (!gpu_reset_clobbers_display(dev_priv)) {
+ if (!state) {
+ /*
+ * Flips in the rings have been nuked by the reset,
+ * so update the base address of all primary
+ * planes to the the last fb to make sure we're
+ * showing the correct fb after a reset.
+ *
+ * FIXME: Atomic will make this obsolete since we won't schedule
+ * CS-based flips (which might get lost in gpu resets) any more.
+ */
+ intel_update_primary_planes(dev);
+ } else {
+ ret = __intel_display_resume(dev, state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ }
} else {
/*
* The display has been reset as well,
@@ -3230,6 +3632,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
spin_lock_irq(&dev_priv->irq_lock);
@@ -3249,15 +3652,26 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
}
+static bool abort_flip_on_reset(struct intel_crtc *crtc)
+{
+ struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
+
+ if (i915_reset_in_progress(error))
+ return true;
+
+ if (crtc->reset_count != i915_reset_count(error))
+ return true;
+
+ return false;
+}
+
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned reset_counter;
bool pending;
- reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
- if (intel_crtc->reset_counter != reset_counter)
+ if (abort_flip_on_reset(intel_crtc))
return false;
spin_lock_irq(&dev->event_lock);
@@ -3900,7 +4314,7 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
return 0;
}
-static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
@@ -4323,7 +4737,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
+ &state->scaler_state.scaler_id, DRM_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4348,7 +4762,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
- bool force_detach = !fb || !plane_state->visible;
+ bool force_detach = !fb || !plane_state->base.visible;
DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
intel_plane->base.base.id, intel_plane->base.name,
@@ -4358,10 +4772,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
plane_state->base.rotation,
- drm_rect_width(&plane_state->src) >> 16,
- drm_rect_height(&plane_state->src) >> 16,
- drm_rect_width(&plane_state->dst),
- drm_rect_height(&plane_state->dst));
+ drm_rect_width(&plane_state->base.src) >> 16,
+ drm_rect_height(&plane_state->base.src) >> 16,
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -4639,12 +5053,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
- struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
- intel_frontbuffer_flip(dev, pipe_config->fb_bits);
+ intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
crtc->wm.cxsr_allowed = true;
@@ -4659,9 +5072,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
intel_fbc_post_update(crtc);
- if (primary_state->visible &&
+ if (primary_state->base.visible &&
(needs_modeset(&pipe_config->base) ||
- !old_primary_state->visible))
+ !old_primary_state->base.visible))
intel_post_enable_primary(&crtc->base);
}
}
@@ -4687,8 +5100,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_fbc_pre_update(crtc, pipe_config, primary_state);
- if (old_primary_state->visible &&
- (modeset || !primary_state->visible))
+ if (old_primary_state->base.visible &&
+ (modeset || !primary_state->base.visible))
intel_pre_disable_primary(&crtc->base);
}
@@ -4767,18 +5180,140 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
* to compute the mask of flip planes precisely. For the time being
* consider this a flip to a NULL plane.
*/
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct drm_connector_state *conn_state = conn->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder, crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct drm_connector_state *conn_state = conn->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder, crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_enable(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct drm_connector_state *conn_state = conn->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ encoder->enable(encoder, crtc_state, conn_state);
+ intel_opregion_notify_encoder(encoder, true);
+ }
+}
+
+static void intel_encoders_disable(struct drm_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != crtc)
+ continue;
+
+ intel_opregion_notify_encoder(encoder, false);
+ encoder->disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
+static void intel_encoders_post_disable(struct drm_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->post_disable)
+ encoder->post_disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
+static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->post_pll_disable)
+ encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
+static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
if (WARN_ON(intel_crtc->active))
return;
@@ -4816,9 +5351,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -4848,8 +5381,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
+ intel_encoders_enable(crtc, pipe_config, old_state);
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -4867,16 +5399,15 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
}
-static void haswell_crtc_enable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
if (WARN_ON(intel_crtc->active))
return;
@@ -4885,9 +5416,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
+ intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
@@ -4925,10 +5454,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
else
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
- }
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
@@ -4969,10 +5495,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- encoder->enable(encoder);
- intel_opregion_notify_encoder(encoder, true);
- }
+ intel_encoders_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder) {
intel_wait_for_vblank(dev, pipe);
@@ -5006,12 +5529,13 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
}
}
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
/*
@@ -5024,8 +5548,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
+ intel_encoders_disable(crtc, old_crtc_state, old_state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -5037,9 +5560,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
+ intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (intel_crtc->config->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -5069,22 +5590,20 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void haswell_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- intel_opregion_notify_encoder(encoder, false);
- encoder->disable(encoder);
- }
+ intel_encoders_disable(crtc, old_crtc_state, old_state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -5107,18 +5626,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_pipe_clock(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
- if (intel_crtc->config->has_pch_encoder) {
- lpt_disable_pch_transcoder(dev_priv);
- lpt_disable_iclkip(dev_priv);
- intel_ddi_fdi_disable(crtc);
+ intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ if (old_crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
- }
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -6174,14 +6686,13 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
-static void valleyview_crtc_enable(struct drm_crtc *crtc)
+static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
int pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
@@ -6206,9 +6717,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
+ intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
@@ -6218,9 +6727,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
vlv_enable_pll(intel_crtc, intel_crtc->config);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
i9xx_pfit_enable(intel_crtc);
@@ -6232,8 +6739,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
+ intel_encoders_enable(crtc, pipe_config, old_state);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -6245,14 +6751,13 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
}
-static void i9xx_crtc_enable(struct drm_crtc *crtc)
+static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
@@ -6273,9 +6778,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
+ intel_encoders_pre_enable(crtc, pipe_config, old_state);
i9xx_enable_pll(intel_crtc);
@@ -6289,8 +6792,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
+ intel_encoders_enable(crtc, pipe_config, old_state);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6308,12 +6810,13 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
I915_WRITE(PFIT_CONTROL, 0);
}
-static void i9xx_crtc_disable(struct drm_crtc *crtc)
+static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
+ struct drm_atomic_state *old_state)
{
+ struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
/*
@@ -6323,8 +6826,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_wait_for_vblank(dev, pipe);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->disable(encoder);
+ intel_encoders_disable(crtc, old_crtc_state, old_state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6333,9 +6835,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
i9xx_pfit_disable(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
+ intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
@@ -6346,9 +6846,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
i9xx_disable_pll(intel_crtc);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_pll_disable)
- encoder->post_pll_disable(encoder);
+ intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6361,20 +6859,34 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
unsigned long domains;
+ struct drm_atomic_state *state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
if (!intel_crtc->active)
return;
- if (to_intel_plane_state(crtc->primary->state)->visible) {
+ if (to_intel_plane_state(crtc->primary->state)->base.visible) {
WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- to_intel_plane_state(crtc->primary->state)->visible = false;
+ to_intel_plane_state(crtc->primary->state)->base.visible = false;
}
- dev_priv->display.crtc_disable(crtc);
+ state = drm_atomic_state_alloc(crtc->dev);
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+
+ /* Everything's already locked, -EDEADLK can't happen. */
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+
+ WARN_ON(IS_ERR(crtc_state) || ret);
+
+ dev_priv->display.crtc_disable(crtc_state, state);
+
+ drm_atomic_state_free(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@@ -6889,9 +7401,10 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
static int pnv_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
u16 gcfgc = 0;
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
@@ -6913,9 +7426,10 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
u16 gcfgc = 0;
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133333;
@@ -6937,6 +7451,7 @@ static int i865_get_display_clock_speed(struct drm_device *dev)
static int i85x_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
u16 hpllcc = 0;
/*
@@ -6944,10 +7459,10 @@ static int i85x_get_display_clock_speed(struct drm_device *dev)
* encoding is different :(
* FIXME is this the right way to detect 852GM/852GMV?
*/
- if (dev->pdev->revision == 0x1)
+ if (pdev->revision == 0x1)
return 133333;
- pci_bus_read_config_word(dev->pdev->bus,
+ pci_bus_read_config_word(pdev->bus,
PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
/* Assume that the hardware is in the high speed state. This
@@ -7048,10 +7563,11 @@ static unsigned int intel_hpll_vco(struct drm_device *dev)
static int gm45_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
- pci_read_config_word(dev->pdev, GCFGC, &tmp);
+ pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = (tmp >> 12) & 0x1;
@@ -7070,6 +7586,7 @@ static int gm45_get_display_clock_speed(struct drm_device *dev)
static int i965gm_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
static const uint8_t div_3200[] = { 16, 10, 8 };
static const uint8_t div_4000[] = { 20, 12, 10 };
static const uint8_t div_5333[] = { 24, 16, 14 };
@@ -7077,7 +7594,7 @@ static int i965gm_get_display_clock_speed(struct drm_device *dev)
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
- pci_read_config_word(dev->pdev, GCFGC, &tmp);
+ pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
@@ -7107,6 +7624,7 @@ fail:
static int g33_get_display_clock_speed(struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -7115,7 +7633,7 @@ static int g33_get_display_clock_speed(struct drm_device *dev)
unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
uint16_t tmp = 0;
- pci_read_config_word(dev->pdev, GCFGC, &tmp);
+ pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = (tmp >> 4) & 0x7;
@@ -8995,6 +9513,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
/* compute bitmask from p1 value */
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
@@ -9281,7 +9817,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
return;
error:
- kfree(fb);
+ kfree(intel_fb);
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -9487,7 +10023,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev))
@@ -9526,7 +10062,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
- DRM_ERROR("Failed to write to D_COMP\n");
+ DRM_DEBUG_KMS("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
I915_WRITE(D_COMP_BDW, val);
@@ -9707,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bxt_set_cdclk(to_i915(dev), req_cdclk);
}
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+ int pixel_rate)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than
+ * 432 MHz, audio enabled, port width x4, and link rate
+ * HBR2 (5.4 GHz), or else there may be audio corruption or
+ * screen corruption."
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4)
+ pixel_rate = max(432000, pixel_rate);
+
+ return pixel_rate;
+}
+
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
@@ -9732,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = ilk_pipe_pixel_rate(crtc_state);
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+ pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
+ pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
}
@@ -9934,15 +10493,12 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
switch (port) {
case PORT_A:
- pipe_config->ddi_pll_sel = SKL_DPLL0;
id = DPLL_ID_SKL_DPLL0;
break;
case PORT_B:
- pipe_config->ddi_pll_sel = SKL_DPLL1;
id = DPLL_ID_SKL_DPLL1;
break;
case PORT_C:
- pipe_config->ddi_pll_sel = SKL_DPLL2;
id = DPLL_ID_SKL_DPLL2;
break;
default:
@@ -9961,25 +10517,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
- pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
+ id = temp >> (port * 3 + 1);
- switch (pipe_config->ddi_pll_sel) {
- case SKL_DPLL0:
- id = DPLL_ID_SKL_DPLL0;
- break;
- case SKL_DPLL1:
- id = DPLL_ID_SKL_DPLL1;
- break;
- case SKL_DPLL2:
- id = DPLL_ID_SKL_DPLL2;
- break;
- case SKL_DPLL3:
- id = DPLL_ID_SKL_DPLL3;
- break;
- default:
- MISSING_CASE(pipe_config->ddi_pll_sel);
+ if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
return;
- }
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
@@ -9989,10 +10530,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
+ uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
- pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
- switch (pipe_config->ddi_pll_sel) {
+ switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
id = DPLL_ID_WRPLL1;
break;
@@ -10012,7 +10552,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
id = DPLL_ID_LCPLL_2700;
break;
default:
- MISSING_CASE(pipe_config->ddi_pll_sel);
+ MISSING_CASE(ddi_pll_sel);
/* fall through */
case PORT_CLK_SEL_NONE:
return;
@@ -10245,7 +10785,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
- if (plane_state && plane_state->visible) {
+ if (plane_state && plane_state->base.visible) {
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -10306,10 +10846,14 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (plane_state && plane_state->visible) {
+ if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+ skl_write_cursor_wm(intel_crtc, wm);
+
+ if (plane_state && plane_state->base.visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
case 64:
@@ -10330,7 +10874,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
if (HAS_DDI(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+ if (plane_state->base.rotation == DRM_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
}
@@ -10376,7 +10920,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
- plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ plane_state->base.rotation == DRM_ROTATE_180) {
base += (plane_state->base.crtc_h *
plane_state->base.crtc_w - 1) * 4;
}
@@ -10509,7 +11053,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
return fb;
}
@@ -11020,13 +11564,13 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
- drm_gem_object_unreference(&work->pending_flip_obj->base);
-
- if (work->flip_queued_req)
- i915_gem_request_assign(&work->flip_queued_req, NULL);
+ i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
- intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+ i915_gem_request_put(work->flip_queued_req);
+
+ intel_frontbuffer_flip_complete(to_i915(dev),
+ to_intel_plane(primary)->frontbuffer_bit);
intel_fbc_post_update(crtc);
drm_framebuffer_unreference(work->old_fb);
@@ -11047,10 +11591,8 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned reset_counter;
- reset_counter = i915_reset_counter(&dev_priv->gpu_error);
- if (crtc->reset_counter != reset_counter)
+ if (abort_flip_on_reset(crtc))
return true;
/*
@@ -11191,7 +11733,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11207,13 +11749,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_DISPLAY_FLIP |
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(engine, 0); /* aux display base address, unused */
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
return 0;
}
@@ -11225,7 +11767,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11238,13 +11780,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+ intel_ring_emit(ring, MI_NOOP);
return 0;
}
@@ -11256,7 +11798,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11270,11 +11812,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(engine, MI_DISPLAY_FLIP |
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
- obj->tiling_mode);
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
+ intel_fb_modifier_to_tiling(fb->modifier[0]));
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -11282,7 +11824,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(engine, pf | pipesrc);
+ intel_ring_emit(ring, pf | pipesrc);
return 0;
}
@@ -11294,7 +11836,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11304,10 +11846,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
if (ret)
return ret;
- intel_ring_emit(engine, MI_DISPLAY_FLIP |
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
+ intel_ring_emit(ring, fb->pitches[0] |
+ intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11317,7 +11860,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(engine, pf | pipesrc);
+ intel_ring_emit(ring, pf | pipesrc);
return 0;
}
@@ -11329,7 +11872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@@ -11350,7 +11893,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
}
len = 4;
- if (engine->id == RCS) {
+ if (req->engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11388,30 +11931,32 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
- if (engine->id == RCS) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, DERRMR);
- intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ if (req->engine->id == RCS) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, DERRMR);
+ intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
- intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
- intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(engine, DERRMR);
- intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
+ intel_ring_emit_reg(ring, DERRMR);
+ intel_ring_emit(ring,
+ i915_ggtt_offset(req->engine->scratch) + 256);
if (IS_GEN8(dev)) {
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
}
}
- intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(engine, (MI_NOOP));
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(ring, fb->pitches[0] |
+ intel_fb_modifier_to_tiling(fb->modifier[0]));
+ intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+ intel_ring_emit(ring, (MI_NOOP));
return 0;
}
@@ -11446,7 +11991,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true;
- return engine != i915_gem_request_get_engine(obj->last_write_req);
+ return engine != i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11457,7 +12003,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
- u32 ctl, stride, tile_height;
+ u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
@@ -11478,20 +12024,6 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
}
/*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (intel_rotation_90_or_270(rotation)) {
- /* stride = Surface height in tiles */
- tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
- stride = DIV_ROUND_UP(fb->height, tile_height);
- } else {
- stride = fb->pitches[0] /
- intel_fb_stride_alignment(dev_priv, fb->modifier[0],
- fb->pixel_format);
- }
-
- /*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
* PLANE_SURF updates, the update is then guaranteed to be atomic.
*/
@@ -11507,15 +12039,13 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(intel_crtc->base.primary->fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
i915_reg_t reg = DSPCNTR(intel_crtc->plane);
u32 dspcntr;
dspcntr = I915_READ(reg);
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -11538,9 +12068,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
struct reservation_object *resv;
if (work->flip_queued_req)
- WARN_ON(__i915_wait_request(work->flip_queued_req,
- false, NULL,
- &dev_priv->rps.mmioflips));
+ WARN_ON(i915_wait_request(work->flip_queued_req,
+ 0, NULL, NO_WAITBOOST));
/* For framebuffer backed by dmabuf, wait for fence */
resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -11651,7 +12180,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_flip_work *work;
struct intel_engine_cs *engine;
bool mmio_flip;
- struct drm_i915_gem_request *request = NULL;
+ struct drm_i915_gem_request *request;
+ struct i915_vma *vma;
int ret;
/*
@@ -11717,22 +12247,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Reference the objects for the scheduled work. */
drm_framebuffer_reference(work->old_fb);
- drm_gem_object_reference(&obj->base);
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
- intel_fbc_pre_update(intel_crtc, intel_crtc->config,
- to_intel_plane_state(primary->state));
-
- work->pending_flip_obj = obj;
+ work->pending_flip_obj = i915_gem_object_get(obj);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
- intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
- if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+ intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
goto cleanup;
}
@@ -11744,13 +12270,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
engine = &dev_priv->engine[BCS];
- if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
+ if (fb->modifier[0] != old_fb->modifier[0])
/* vlv: DISPLAY_FLIP fails to change tiling */
engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- engine = i915_gem_request_get_engine(obj->last_write_req);
+ engine = i915_gem_active_get_engine(&obj->last_write,
+ &obj->base.dev->struct_mutex);
if (engine == NULL || engine->id != RCS)
engine = &dev_priv->engine[BCS];
} else {
@@ -11759,47 +12286,52 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
mmio_flip = use_mmio_flip(engine, obj);
- /* When using CS flips, we want to emit semaphores between rings.
- * However, when using mmio flips we will create a task to do the
- * synchronisation, so all we want here is to pin the framebuffer
- * into the display plane and skip any waits.
- */
- if (!mmio_flip) {
- ret = i915_gem_object_sync(obj, engine, &request);
- if (!ret && !request) {
- request = i915_gem_request_alloc(engine, NULL);
- ret = PTR_ERR_OR_ZERO(request);
- }
-
- if (ret)
- goto cleanup_pending;
- }
-
- ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
- if (ret)
+ vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto cleanup_pending;
+ }
- work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
- obj, 0);
+ work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
work->gtt_offset += intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
+ /*
+ * There's the potential that the next frame will not be compatible with
+ * FBC, so we want to call pre_update() before the actual page flip.
+ * The problem is that pre_update() caches some information about the fb
+ * object, so we want to do this only after the object is pinned. Let's
+ * be on the safe side and do this immediately before scheduling the
+ * flip.
+ */
+ intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+ to_intel_plane_state(primary->state));
+
if (mmio_flip) {
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
- i915_gem_request_assign(&work->flip_queued_req,
- obj->last_write_req);
-
+ work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
schedule_work(&work->mmio_work);
} else {
- i915_gem_request_assign(&work->flip_queued_req, request);
+ request = i915_gem_request_alloc(engine, engine->last_context);
+ if (IS_ERR(request)) {
+ ret = PTR_ERR(request);
+ goto cleanup_unpin;
+ }
+
+ ret = i915_gem_request_await_object(request, obj, false);
+ if (ret)
+ goto cleanup_request;
+
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
- goto cleanup_unpin;
+ goto cleanup_request;
intel_mark_page_flip_active(intel_crtc, work);
+ work->flip_queued_req = i915_gem_request_get(request);
i915_add_request_no_flush(request);
}
@@ -11807,25 +12339,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
- intel_frontbuffer_flip_prepare(dev,
+ intel_frontbuffer_flip_prepare(to_i915(dev),
to_intel_plane(primary)->frontbuffer_bit);
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
+cleanup_request:
+ i915_add_request_no_flush(request);
cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
- if (!IS_ERR_OR_NULL(request))
- i915_add_request_no_flush(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
crtc->primary->fb = old_fb;
update_state_fb(crtc->primary);
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
@@ -11893,7 +12425,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
/* Update watermarks on tiling or size changes. */
- if (new->visible != cur->visible)
+ if (new->base.visible != cur->base.visible)
return true;
if (!cur->base.fb || !new->base.fb)
@@ -11901,10 +12433,10 @@ static bool intel_wm_need_update(struct drm_plane *plane,
if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
cur->base.rotation != new->base.rotation ||
- drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
- drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
- drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
- drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
+ drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
+ drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
+ drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
+ drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
return true;
return false;
@@ -11912,10 +12444,10 @@ static bool intel_wm_need_update(struct drm_plane *plane,
static bool needs_scaling(struct intel_plane_state *state)
{
- int src_w = drm_rect_width(&state->src) >> 16;
- int src_h = drm_rect_height(&state->src) >> 16;
- int dst_w = drm_rect_width(&state->dst);
- int dst_h = drm_rect_height(&state->dst);
+ int src_w = drm_rect_width(&state->base.src) >> 16;
+ int src_h = drm_rect_height(&state->base.src) >> 16;
+ int dst_w = drm_rect_width(&state->base.dst);
+ int dst_h = drm_rect_height(&state->base.dst);
return (src_w != dst_w || src_h != dst_h);
}
@@ -11946,8 +12478,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
return ret;
}
- was_visible = old_plane_state->visible;
- visible = to_intel_plane_state(plane_state)->visible;
+ was_visible = old_plane_state->base.visible;
+ visible = to_intel_plane_state(plane_state)->base.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11963,7 +12495,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
- to_intel_plane_state(plane_state)->visible = visible = false;
+ to_intel_plane_state(plane_state)->base.visible = visible = false;
if (!was_visible && !visible)
return 0;
@@ -12167,22 +12699,22 @@ static void
connected_sink_compute_bpp(struct intel_connector *connector,
struct intel_crtc_state *pipe_config)
{
+ const struct drm_display_info *info = &connector->base.display_info;
int bpp = pipe_config->pipe_bpp;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
- connector->base.base.id,
- connector->base.name);
+ connector->base.base.id,
+ connector->base.name);
/* Don't use an invalid EDID bpc value */
- if (connector->base.display_info.bpc &&
- connector->base.display_info.bpc * 3 < bpp) {
+ if (info->bpc != 0 && info->bpc * 3 < bpp) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
- bpp, connector->base.display_info.bpc*3);
- pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ bpp, info->bpc * 3);
+ pipe_config->pipe_bpp = info->bpc * 3;
}
/* Clamp bpp to 8 on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ if (info->bpc == 0 && bpp > 24) {
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
bpp);
pipe_config->pipe_bpp = 24;
@@ -12301,10 +12833,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
if (IS_BROXTON(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ebb0,
pipe_config->dpll_hw_state.ebb4,
pipe_config->dpll_hw_state.pll0,
@@ -12317,15 +12848,13 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.pll10,
pipe_config->dpll_hw_state.pcsdw12);
} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+ DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.ctrl1,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
} else if (HAS_DDI(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- pipe_config->ddi_pll_sel,
+ DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
} else {
@@ -12339,6 +12868,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ char *format_name;
intel_plane = to_intel_plane(plane);
if (intel_plane->pipe != crtc->pipe)
continue;
@@ -12351,19 +12881,23 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
continue;
}
+ format_name = drm_get_format_name(fb->pixel_format);
+
DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
plane->base.id, plane->name);
DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->pixel_format));
+ fb->base.id, fb->width, fb->height, format_name);
DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
state->scaler_id,
- state->src.x1 >> 16, state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst),
- drm_rect_height(&state->dst));
+ state->base.src.x1 >> 16,
+ state->base.src.y1 >> 16,
+ drm_rect_width(&state->base.src) >> 16,
+ drm_rect_height(&state->base.src) >> 16,
+ state->base.dst.x1, state->base.dst.y1,
+ drm_rect_width(&state->base.dst),
+ drm_rect_height(&state->base.dst));
+
+ kfree(format_name);
}
}
@@ -12372,6 +12906,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_connector *connector;
unsigned int used_ports = 0;
+ unsigned int used_mst_ports = 0;
/*
* Walk the connector list instead of the encoder
@@ -12408,11 +12943,20 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
return false;
used_ports |= port_mask;
+ break;
+ case INTEL_OUTPUT_DP_MST:
+ used_mst_ports |=
+ 1 << enc_to_mst(&encoder->base)->primary->port;
+ break;
default:
break;
}
}
+ /* can't mix MST and SST/HDMI on the same port */
+ if (used_ports & used_mst_ports)
+ return false;
+
return true;
}
@@ -12423,7 +12967,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_crtc_scaler_state scaler_state;
struct intel_dpll_hw_state dpll_hw_state;
struct intel_shared_dpll *shared_dpll;
- uint32_t ddi_pll_sel;
bool force_thru;
/* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12435,7 +12978,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
scaler_state = crtc_state->scaler_state;
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
- ddi_pll_sel = crtc_state->ddi_pll_sel;
force_thru = crtc_state->pch_pfit.force_thru;
memset(crtc_state, 0, sizeof *crtc_state);
@@ -12444,7 +12986,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->scaler_state = scaler_state;
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
- crtc_state->ddi_pll_sel = ddi_pll_sel;
crtc_state->pch_pfit.force_thru = force_thru;
}
@@ -12532,7 +13073,7 @@ encoder_retry:
encoder = to_intel_encoder(connector_state->best_encoder);
- if (!(encoder->compute_config(encoder, pipe_config))) {
+ if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
@@ -12620,12 +13161,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
return false;
}
-#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
- list_for_each_entry((intel_crtc), \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if (mask & (1 <<(intel_crtc)->pipe))
-
static bool
intel_compare_m_n(unsigned int m, unsigned int n,
unsigned int m2, unsigned int n2,
@@ -12873,8 +13408,6 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide);
- PIPE_CONF_CHECK_X(ddi_pll_sel);
-
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
@@ -12956,16 +13489,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_entry->start, hw_entry->end);
}
- /* cursor */
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ /*
+ * cursor
+ * If the cursor plane isn't active, we may not have updated it's ddb
+ * allocation. In that case since the ddb allocation will be updated
+ * once the plane becomes visible, we can skip this check
+ */
+ if (intel_crtc->cursor_addr) {
+ hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
}
}
@@ -13580,8 +14120,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ I915_WAIT_INTERRUPTIBLE,
+ NULL, NULL);
if (ret) {
/* Any hang should be swallowed by the wait */
WARN_ON(ret == -EIO);
@@ -13671,6 +14212,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
return false;
}
+static void intel_update_crtc(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_crtc_state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+ bool modeset = needs_modeset(crtc->state);
+
+ if (modeset) {
+ update_scanline_offset(intel_crtc);
+ dev_priv->display.crtc_enable(pipe_config, state);
+ } else {
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+ }
+
+ if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+ intel_fbc_enable(
+ intel_crtc, pipe_config,
+ to_intel_plane_state(crtc->primary->state));
+ }
+
+ drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+ if (needs_vblank_wait(pipe_config))
+ *crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if (!crtc->state->active)
+ continue;
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ crtc_vblank_mask);
+ }
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ unsigned int updated = 0;
+ bool progress;
+ enum pipe pipe;
+
+ /*
+ * Whenever the number of active pipes changes, we need to make sure we
+ * update the pipes in the right order so that their ddb allocations
+ * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+ * cause pipe underruns and other bad stuff.
+ */
+ do {
+ int i;
+ progress = false;
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ bool vbl_wait = false;
+ unsigned int cmask = drm_crtc_mask(crtc);
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ if (updated & cmask || !crtc->state->active)
+ continue;
+ if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+ pipe))
+ continue;
+
+ updated |= cmask;
+
+ /*
+ * If this is an already active pipe, it's DDB changed,
+ * and this isn't the last pipe that needs updating
+ * then we need to wait for a vblank to pass for the
+ * new ddb allocation to take effect.
+ */
+ if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ !crtc->state->active_changed &&
+ intel_state->wm_results.dirty_pipes != updated)
+ vbl_wait = true;
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ crtc_vblank_mask);
+
+ if (vbl_wait)
+ intel_wait_for_vblank(dev, pipe);
+
+ progress = true;
+ }
+ } while (progress);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -13688,13 +14334,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
+ to_intel_plane_state(plane->state);
if (!intel_plane_state->wait_req)
continue;
- ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ ret = i915_wait_request(intel_plane_state->wait_req,
+ 0, NULL, NULL);
/* EIO should be eaten, and we can't get interrupted in the
* worker, and blocking commits have waited already. */
WARN_ON(ret);
@@ -13730,7 +14376,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (old_crtc_state->active) {
intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
- dev_priv->display.crtc_disable(crtc);
+ dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
intel_disable_shared_dpll(intel_crtc);
@@ -13763,23 +14409,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* SKL workaround: bspec recommends we disable the SAGV when we
* have more then one pipe enabled
*/
- if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
- skl_disable_sagv(dev_priv);
+ if (!intel_can_enable_sagv(state))
+ intel_disable_sagv(dev_priv);
intel_modeset_verify_disabled(dev);
}
- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ /* Complete the events for pipes that have now been disabled */
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
-
- if (modeset && crtc->state->active) {
- update_scanline_offset(to_intel_crtc(crtc));
- dev_priv->display.crtc_enable(crtc);
- }
/* Complete events for now disable pipes here. */
if (modeset && !crtc->state->active && crtc->state->event) {
@@ -13789,21 +14427,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
crtc->state->event = NULL;
}
-
- if (!modeset)
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
- if (crtc->state->active &&
- drm_atomic_get_existing_plane_state(state, crtc->primary))
- intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
- if (crtc->state->active)
- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
- if (pipe_config->base.active && needs_vblank_wait(pipe_config))
- crtc_vblank_mask |= 1 << i;
}
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
* fix this:
@@ -13839,9 +14467,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
- if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
- skl_can_enable_sagv(state))
- skl_enable_sagv(dev_priv);
+ if (intel_state->modeset && intel_can_enable_sagv(state))
+ intel_enable_sagv(dev_priv);
drm_atomic_helper_commit_hw_done(state);
@@ -13882,19 +14509,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state;
struct drm_plane *plane;
- struct drm_i915_gem_object *obj, *old_obj;
- struct intel_plane *intel_plane;
int i;
- mutex_lock(&state->dev->struct_mutex);
- for_each_plane_in_state(state, plane, old_plane_state, i) {
- obj = intel_fb_obj(plane->state->fb);
- old_obj = intel_fb_obj(old_plane_state->fb);
- intel_plane = to_intel_plane(plane);
-
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
- }
- mutex_unlock(&state->dev->struct_mutex);
+ for_each_plane_in_state(state, plane, old_plane_state, i)
+ i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+ intel_fb_obj(plane->state->fb),
+ to_intel_plane(plane)->frontbuffer_bit);
}
/**
@@ -13990,8 +14610,6 @@ out:
drm_atomic_state_free(state);
}
-#undef for_each_intel_crtc_masked
-
/*
* FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
* drm_atomic_helper_legacy_gamma_set() directly.
@@ -14060,7 +14678,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
+ struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct drm_framebuffer *fb = new_state->fb;
@@ -14119,15 +14737,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ if (IS_ERR(vma))
+ ret = PTR_ERR(vma);
}
if (ret == 0) {
- struct intel_plane_state *plane_state =
- to_intel_plane_state(new_state);
-
- i915_gem_request_assign(&plane_state->wait_req,
- obj->last_write_req);
+ to_intel_plane_state(new_state)->wait_req =
+ i915_gem_active_get(&obj->last_write,
+ &obj->base.dev->struct_mutex);
}
return ret;
@@ -14144,10 +14764,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
+ struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct intel_plane_state *old_intel_state;
+ struct intel_plane_state *intel_state = to_intel_plane_state(plane->state);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -14160,6 +14781,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+ i915_gem_request_assign(&intel_state->wait_req, NULL);
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
@@ -14194,13 +14816,14 @@ intel_check_primary_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_crtc *crtc = state->base.crtc;
- struct drm_framebuffer *fb = state->base.fb;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
bool can_position = false;
+ int ret;
- if (INTEL_INFO(plane->dev)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
min_scale = 1;
@@ -14209,22 +14832,35 @@ intel_check_primary_plane(struct drm_plane *plane,
can_position = true;
}
- return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
- &state->dst, &state->clip,
- state->base.rotation,
- min_scale, max_scale,
- can_position, true,
- &state->visible);
+ ret = drm_plane_helper_check_state(&state->base,
+ &state->clip,
+ min_scale, max_scale,
+ can_position, true);
+ if (ret)
+ return ret;
+
+ if (!state->base.fb)
+ return 0;
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ ret = skl_check_plane_surface(state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *old_intel_state =
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
+ enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
@@ -14239,8 +14875,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_INFO(dev)->gen >= 9)
+ else if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(intel_crtc);
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe),
+ dev_priv->wm.skl_hw.wm_linetime[pipe]);
+ }
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -14374,11 +15014,11 @@ fail:
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
{
if (!dev->mode_config.rotation_property) {
- unsigned long flags = BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_180);
+ unsigned long flags = DRM_ROTATE_0 |
+ DRM_ROTATE_180;
if (INTEL_INFO(dev)->gen >= 9)
- flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+ flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev, flags);
@@ -14394,19 +15034,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
unsigned stride;
int ret;
- ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
- &state->dst, &state->clip,
- state->base.rotation,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true, &state->visible);
+ ret = drm_plane_helper_check_state(&state->base,
+ &state->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
if (ret)
return ret;
@@ -14443,7 +15081,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
- state->visible && state->base.crtc_x < 0) {
+ state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -14475,7 +15113,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
- addr = i915_gem_obj_ggtt_offset(obj);
+ addr = i915_gem_object_ggtt_offset(obj, NULL);
else
addr = obj->phys_handle->busaddr;
@@ -14521,8 +15159,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_180));
+ DRM_ROTATE_0 |
+ DRM_ROTATE_180);
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
@@ -14728,12 +15366,50 @@ static bool intel_crt_present(struct drm_device *dev)
return true;
}
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+{
+ int pps_num;
+ int pps_idx;
+
+ if (HAS_DDI(dev_priv))
+ return;
+ /*
+ * This w/a is needed at least on CPT/PPT, but to be sure apply it
+ * everywhere where registers can be write protected.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_num = 2;
+ else
+ pps_num = 1;
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ u32 val = I915_READ(PP_CONTROL(pps_idx));
+
+ val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
+ I915_WRITE(PP_CONTROL(pps_idx), val);
+ }
+}
+
+static void intel_pps_init(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
+ dev_priv->pps_mmio_base = PCH_PPS_BASE;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->pps_mmio_base = VLV_PPS_BASE;
+ else
+ dev_priv->pps_mmio_base = PPS_BASE;
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ intel_pps_init(dev_priv);
+
/*
* intel_edp_init_connector() depends on this completing first, to
* prevent the registeration of both eDP and LVDS and the incorrect
@@ -14921,7 +15597,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
- drm_gem_object_unreference(&intel_fb->obj->base);
+ i915_gem_object_put(intel_fb->obj);
mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
@@ -15001,24 +15677,27 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned int aligned_height;
+ unsigned int tiling = i915_gem_object_get_tiling(obj);
int ret;
u32 pitch_limit, stride_alignment;
+ char *format_name;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /* Enforce that fb modifier and tiling mode match, but only for
- * X-tiled. This is needed for FBC. */
- if (!!(obj->tiling_mode == I915_TILING_X) !=
- !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
return -EINVAL;
}
} else {
- if (obj->tiling_mode == I915_TILING_X)
+ if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- else if (obj->tiling_mode == I915_TILING_Y) {
+ } else if (tiling == I915_TILING_Y) {
DRM_DEBUG("No Y tiling for legacy addfb\n");
return -EINVAL;
}
@@ -15042,6 +15721,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (INTEL_INFO(dev_priv)->gen < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
stride_alignment = intel_fb_stride_alignment(dev_priv,
mode_cmd->modifier[0],
mode_cmd->pixel_format);
@@ -15061,10 +15750,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
- if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
- mode_cmd->pitches[0] != obj->stride) {
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], obj->stride);
+ mode_cmd->pitches[0],
+ i915_gem_object_get_stride(obj));
return -EINVAL;
}
@@ -15077,16 +15771,18 @@ static int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
if (INTEL_INFO(dev)->gen > 3) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
INTEL_INFO(dev)->gen < 9) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
break;
@@ -15094,15 +15790,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
break;
@@ -15111,14 +15809,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 5) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
break;
default:
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format));
+ format_name = drm_get_format_name(mode_cmd->pixel_format);
+ DRM_DEBUG("unsupported pixel format: %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -15126,17 +15826,12 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- aligned_height = intel_fb_align_height(dev, mode_cmd->height,
- mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- /* FIXME drm helper for size checks (especially planar formats)? */
- if (obj->base.size < aligned_height * mode_cmd->pitches[0])
- return -EINVAL;
-
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
- intel_fill_fb_info(dev_priv, &intel_fb->base);
+ ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
+ if (ret)
+ return ret;
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
@@ -15158,13 +15853,13 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
- if (&obj->base == NULL)
+ obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+ if (!obj)
return ERR_PTR(-ENOENT);
fb = intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb))
- drm_gem_object_unreference_unlocked(&obj->base);
+ i915_gem_object_put_unlocked(obj);
return fb;
}
@@ -15347,6 +16042,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
skl_modeset_calc_cdclk;
}
+ if (dev_priv->info.gen >= 9)
+ dev_priv->display.update_crtcs = skl_update_crtcs;
+ else
+ dev_priv->display.update_crtcs = intel_update_crtcs;
+
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
@@ -15548,15 +16248,16 @@ static void intel_init_quirks(struct drm_device *dev)
static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u8 sr1;
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
@@ -15572,7 +16273,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_init_clock_gating(dev);
- intel_enable_gt_powersave(dev_priv);
}
/*
@@ -15839,15 +16539,22 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
return false;
}
-static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
+static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_connector *connector;
for_each_connector_on_encoder(dev, &encoder->base, connector)
- return true;
+ return connector;
- return false;
+ return NULL;
+}
+
+static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
+ enum transcoder pch_transcoder)
+{
+ return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+ (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
}
static void intel_sanitize_crtc(struct intel_crtc *crtc)
@@ -15893,7 +16600,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
- to_intel_plane_state(crtc->base.primary->state)->visible = true;
+ to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
@@ -15928,14 +16635,23 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* worst a fifo underrun happens which also sets this to false.
*/
crtc->cpu_fifo_underrun_disabled = true;
- crtc->pch_fifo_underrun_disabled = true;
+ /*
+ * We track the PCH trancoder underrun reporting state
+ * within the crtc. With crtc for pipe A housing the underrun
+ * reporting state for PCH transcoder A, crtc for pipe B housing
+ * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
+ * and marking underrun reporting as disabled for the non-existing
+ * PCH transcoders B and C would prevent enabling the south
+ * error interrupt (see cpt_can_enable_serr_int()).
+ */
+ if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+ crtc->pch_fifo_underrun_disabled = true;
}
}
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
- struct drm_device *dev = encoder->base.dev;
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
@@ -15943,7 +16659,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
- if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
+ connector = intel_encoder_find_connector(encoder);
+ if (connector && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
encoder->base.name);
@@ -15952,12 +16669,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* fallout from our resume register restoring. Disable
* the encoder manually again. */
if (encoder->base.crtc) {
+ struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
- encoder->disable(encoder);
+ encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
if (encoder->post_disable)
- encoder->post_disable(encoder);
+ encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
}
encoder->base.crtc = NULL;
@@ -15965,12 +16684,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
- for_each_intel_connector(dev, connector) {
- if (connector->encoder != encoder)
- continue;
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- }
+
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
}
/* Enabled encoders without active connectors will be fixed in
* the crtc fixup. */
@@ -16020,10 +16736,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
struct intel_plane_state *plane_state =
to_intel_plane_state(primary->state);
- plane_state->visible = crtc->active &&
+ plane_state->base.visible = crtc->active &&
primary_get_hw_state(to_intel_plane(primary));
- if (plane_state->visible)
+ if (plane_state->base.visible)
crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
}
@@ -16282,7 +16998,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
- int ret;
intel_init_gt_powersave(dev_priv);
@@ -16296,15 +17011,17 @@ void intel_modeset_gem_init(struct drm_device *dev)
* for this.
*/
for_each_crtc(dev, c) {
+ struct i915_vma *vma;
+
obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+ vma = intel_pin_and_fence_fb_obj(c->primary->fb,
c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
- if (ret) {
+ if (IS_ERR(vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 21b04c3eda41..bf344d08356a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -190,6 +190,29 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 8) / 10;
}
+static int
+intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &intel_dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int max_dotclk = dev_priv->max_dotclk_freq;
+ int ds_max_dotclk;
+
+ int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+
+ if (type != DP_DS_PORT_TYPE_VGA)
+ return max_dotclk;
+
+ ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
+ if (ds_max_dotclk != 0)
+ max_dotclk = min(max_dotclk, ds_max_dotclk);
+
+ return max_dotclk;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -199,7 +222,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int max_dotclk;
+
+ max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
@@ -256,6 +281,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp);
+static void
+intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
@@ -463,13 +490,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
+ return I915_READ(PP_STATUS(pipe)) & PP_ON;
}
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
}
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -486,7 +513,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+ u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -583,30 +610,21 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp,
struct pps_registers *regs)
{
+ int pps_idx = 0;
+
memset(regs, 0, sizeof(*regs));
- if (IS_BROXTON(dev_priv)) {
- int idx = bxt_power_sequencer_idx(intel_dp);
-
- regs->pp_ctrl = BXT_PP_CONTROL(idx);
- regs->pp_stat = BXT_PP_STATUS(idx);
- regs->pp_on = BXT_PP_ON_DELAYS(idx);
- regs->pp_off = BXT_PP_OFF_DELAYS(idx);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- regs->pp_ctrl = PCH_PP_CONTROL;
- regs->pp_stat = PCH_PP_STATUS;
- regs->pp_on = PCH_PP_ON_DELAYS;
- regs->pp_off = PCH_PP_OFF_DELAYS;
- regs->pp_div = PCH_PP_DIVISOR;
- } else {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ if (IS_BROXTON(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pps_idx = vlv_power_sequencer_pipe(intel_dp);
- regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
- regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
- regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
- regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
- regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
- }
+ regs->pp_ctrl = PP_CONTROL(pps_idx);
+ regs->pp_stat = PP_STATUS(pps_idx);
+ regs->pp_on = PP_ON_DELAYS(pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(pps_idx);
+ if (!IS_BROXTON(dev_priv))
+ regs->pp_div = PP_DIVISOR(pps_idx);
}
static i915_reg_t
@@ -651,8 +669,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
i915_reg_t pp_ctrl_reg, pp_div_reg;
u32 pp_div;
- pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
- pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
+ pp_ctrl_reg = PP_CONTROL(pipe);
+ pp_div_reg = PP_DIVISOR(pipe);
pp_div = I915_READ(pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
@@ -1041,10 +1059,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (WARN_ON(txsize > 20))
return -E2BIG;
+ WARN_ON(!msg->buffer != !msg->size);
+
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
- else
- WARN_ON(msg->size);
ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
if (ret > 0) {
@@ -1090,6 +1108,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static enum port intel_aux_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port aux_port;
+
+ if (!info->alternate_aux_channel) {
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ port_name(port), port_name(port));
+ return port;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_port = PORT_A;
+ break;
+ case DP_AUX_B:
+ aux_port = PORT_B;
+ break;
+ case DP_AUX_C:
+ aux_port = PORT_C;
+ break;
+ case DP_AUX_D:
+ aux_port = PORT_D;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_port = PORT_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ port_name(aux_port), port_name(port));
+
+ return aux_port;
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1150,36 +1206,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
}
-/*
- * On SKL we don't have Aux for port E so we rely
- * on VBT to set a proper alternate aux channel.
- */
-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[PORT_E];
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- return PORT_A;
- case DP_AUX_B:
- return PORT_B;
- case DP_AUX_C:
- return PORT_C;
- case DP_AUX_D:
- return PORT_D;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- return PORT_A;
- }
-}
-
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1195,9 +1224,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1235,7 +1261,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = intel_aux_port(dev_priv,
+ dp_to_dig_port(intel_dp)->port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
@@ -1250,7 +1277,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
}
static void
-intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
+intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
@@ -1426,6 +1453,44 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
+static void intel_dp_print_hw_revision(struct intel_dp *intel_dp)
+{
+ uint8_t rev;
+ int len;
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return;
+
+ len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_HW_REV, &rev, 1);
+ if (len < 0)
+ return;
+
+ DRM_DEBUG_KMS("sink hw revision: %d.%d\n", (rev & 0xf0) >> 4, rev & 0xf);
+}
+
+static void intel_dp_print_sw_revision(struct intel_dp *intel_dp)
+{
+ uint8_t rev[2];
+ int len;
+
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT))
+ return;
+
+ len = drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_SW_REV, &rev, 2);
+ if (len < 0)
+ return;
+
+ DRM_DEBUG_KMS("sink sw revision: %d.%d\n", rev[0], rev[1]);
+}
+
static int rate_to_index(int find, const int *rates)
{
int i = 0;
@@ -1447,7 +1512,7 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp)
if (WARN_ON(len <= 0))
return 162000;
- return rates[rate_to_index(0, rates) - 1];
+ return rates[len - 1];
}
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@@ -1468,9 +1533,24 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
+static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ int bpp, bpc;
+
+ bpp = pipe_config->pipe_bpp;
+ bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+
+ if (bpc > 0)
+ bpp = min(bpp, 3*bpc);
+
+ return bpp;
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1533,7 +1613,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
- bpp = pipe_config->pipe_bpp;
+ bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
if (is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
@@ -1647,22 +1727,28 @@ found:
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+ int link_rate, uint8_t lane_count,
+ bool link_mst)
{
- intel_dp->link_rate = pipe_config->port_clock;
- intel_dp->lane_count = pipe_config->lane_count;
+ intel_dp->link_rate = link_rate;
+ intel_dp->lane_count = lane_count;
+ intel_dp->link_mst = link_mst;
}
-static void intel_dp_prepare(struct intel_encoder *encoder)
+static void intel_dp_prepare(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- intel_dp_set_link_params(intel_dp, crtc->config);
+ intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
+ pipe_config->lane_count,
+ intel_crtc_has_type(pipe_config,
+ INTEL_OUTPUT_DP_MST));
/*
* There are four kinds of DP registers:
@@ -1688,7 +1774,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
+ intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
/* Split out the IBX/CPU vs CPT settings */
@@ -1716,7 +1802,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
- !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
+ !IS_CHERRYVIEW(dev) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1835,7 +1921,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
control = I915_READ(_pp_ctrl_reg(intel_dp));
- if (!IS_BROXTON(dev)) {
+ if (WARN_ON(!HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
@@ -1956,7 +2043,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
- if ((pp & POWER_TARGET_ON) == 0)
+ if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
power_domain = intel_display_port_aux_power_domain(intel_encoder);
@@ -2043,7 +2130,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
POSTING_READ(pp_ctrl_reg);
}
- pp |= POWER_TARGET_ON;
+ pp |= PANEL_POWER_ON;
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
@@ -2095,7 +2182,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
+ pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -2254,10 +2341,10 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
-static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -2265,11 +2352,11 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
assert_edp_pll_disabled(dev_priv);
DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
- crtc->config->port_clock);
+ pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
- if (crtc->config->port_clock == 162000)
+ if (pipe_config->port_clock == 162000)
intel_dp->DP |= DP_PLL_FREQ_162MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
@@ -2478,16 +2565,17 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static void intel_disable_dp(struct intel_encoder *encoder)
+static void intel_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
- if (HAS_PSR(dev) && !HAS_DDI(dev))
+ if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
intel_psr_disable(intel_dp);
/* Make sure the panel is off before trying to change the mode. But also
@@ -2498,11 +2586,13 @@ static void intel_disable_dp(struct intel_encoder *encoder)
intel_edp_panel_off(intel_dp);
/* disable the port before the pipe on g4x */
- if (INTEL_INFO(dev)->gen < 5)
+ if (INTEL_GEN(dev_priv) < 5)
intel_dp_link_down(intel_dp);
}
-static void ilk_post_disable_dp(struct intel_encoder *encoder)
+static void ilk_post_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2514,14 +2604,18 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder)
ironlake_edp_pll_off(intel_dp);
}
-static void vlv_post_disable_dp(struct intel_encoder *encoder)
+static void vlv_post_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_dp_link_down(intel_dp);
}
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_post_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2547,6 +2641,10 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
+ if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+ DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
+ dp_train_pat & DP_TRAINING_PATTERN_MASK);
+
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2588,7 +2686,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
- DRM_ERROR("DP training pattern 3 not supported\n");
+ DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
@@ -2613,7 +2711,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
if (IS_CHERRYVIEW(dev)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
- DRM_ERROR("DP training pattern 3 not supported\n");
+ DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
}
break;
@@ -2621,19 +2719,15 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
}
-static void intel_dp_enable_port(struct intel_dp *intel_dp)
+static void intel_dp_enable_port(struct intel_dp *intel_dp,
+ struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc =
- to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
/* enable with pattern 1 (as per spec) */
- _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
- DP_TRAINING_PATTERN_1);
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
/*
* Magic for VLV/CHV. We _must_ first set up the register
@@ -2642,14 +2736,15 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
- if (crtc->config->has_audio)
+ if (old_crtc_state->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
}
-static void intel_enable_dp(struct intel_encoder *encoder)
+static void intel_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2666,7 +2761,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
- intel_dp_enable_port(intel_dp);
+ intel_dp_enable_port(intel_dp, pipe_config);
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
@@ -2678,7 +2773,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
unsigned int lane_mask = 0x0;
if (IS_CHERRYVIEW(dev))
- lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+ lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
lane_mask);
@@ -2688,22 +2783,26 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
- if (crtc->config->has_audio) {
+ if (pipe_config->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(pipe));
intel_audio_codec_enable(encoder);
}
}
-static void g4x_enable_dp(struct intel_encoder *encoder)
+static void g4x_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_enable_dp(encoder);
+ intel_enable_dp(encoder, pipe_config);
intel_edp_backlight_on(intel_dp);
}
-static void vlv_enable_dp(struct intel_encoder *encoder)
+static void vlv_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2711,16 +2810,18 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
intel_psr_enable(intel_dp);
}
-static void g4x_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- intel_dp_prepare(encoder);
+ intel_dp_prepare(encoder, pipe_config);
/* Only ilk+ has port A */
if (port == PORT_A)
- ironlake_edp_pll_on(intel_dp);
+ ironlake_edp_pll_on(intel_dp, pipe_config);
}
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
@@ -2728,7 +2829,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
- i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
edp_panel_vdd_off_sync(intel_dp);
@@ -2826,38 +2927,48 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
-static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+static void vlv_pre_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder);
+ intel_enable_dp(encoder, pipe_config);
}
-static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- intel_dp_prepare(encoder);
+ intel_dp_prepare(encoder, pipe_config);
vlv_phy_pre_pll_enable(encoder);
}
-static void chv_pre_enable_dp(struct intel_encoder *encoder)
+static void chv_pre_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder);
- intel_enable_dp(encoder);
+ intel_enable_dp(encoder, pipe_config);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
}
-static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- intel_dp_prepare(encoder);
+ intel_dp_prepare(encoder, pipe_config);
chv_phy_pre_pll_enable(encoder);
}
-static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
chv_phy_post_pll_disable(encoder);
}
@@ -3395,84 +3506,67 @@ intel_dp_link_down(struct intel_dp *intel_dp)
}
static bool
-intel_dp_get_dpcd(struct intel_dp *intel_dp)
+intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
- if (intel_dp->dpcd[DP_DPCD_REV] == 0)
- return false; /* DPCD not present */
+ return intel_dp->dpcd[DP_DPCD_REV] != 0;
+}
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
- &intel_dp->sink_count, 1) < 0)
- return false;
+static bool
+intel_edp_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- /*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
- */
- intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+ /* this function is meant to be called only once */
+ WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ if (!intel_dp_read_dpcd(intel_dp))
return false;
- /* Check if the panel supports PSR */
- memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
- if (is_edp(intel_dp)) {
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
- }
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- if (INTEL_INFO(dev)->gen >= 9 &&
- (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
- uint8_t frame_sync_cap;
-
- dev_priv->psr.sink_support = true;
- drm_dp_dpcd_read(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap, 1);
- dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
- /* PSR2 needs frame sync as well */
- dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
- DRM_DEBUG_KMS("PSR2 %s on sink",
- dev_priv->psr.psr2_support ? "supported" : "not supported");
- }
-
- /* Read the eDP Display control capabilities registers */
- memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
- intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
- sizeof(intel_dp->edp_dpcd)))
- DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
- intel_dp->edp_dpcd);
- }
-
- DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
- yesno(intel_dp_source_supports_hbr2(intel_dp)),
- yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+ /* Check if the panel supports PSR */
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+ dev_priv->psr.sink_support = true;
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+ }
+
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+ uint8_t frame_sync_cap;
+
+ dev_priv->psr.sink_support = true;
+ drm_dp_dpcd_read(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap, 1);
+ dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+ /* PSR2 needs frame sync as well */
+ dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+ DRM_DEBUG_KMS("PSR2 %s on sink",
+ dev_priv->psr.psr2_support ? "supported" : "not supported");
+ }
+
+ /* Read the eDP Display control capabilities registers */
+ if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
/* Intermediate frequency support */
- if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
+ if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -3491,7 +3585,36 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
- intel_dp_print_rates(intel_dp);
+ return true;
+}
+
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (!intel_dp_read_dpcd(intel_dp))
+ return false;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+ &intel_dp->sink_count, 1) < 0)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ return false;
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
@@ -3526,7 +3649,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
}
static bool
-intel_dp_probe_mst(struct intel_dp *intel_dp)
+intel_dp_can_mst(struct intel_dp *intel_dp)
{
u8 buf[1];
@@ -3539,18 +3662,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
- if (buf[0] & DP_MST_CAP) {
- DRM_DEBUG_KMS("Sink is MST capable\n");
- intel_dp->is_mst = true;
- } else {
- DRM_DEBUG_KMS("Sink is not MST capable\n");
- intel_dp->is_mst = false;
- }
- }
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
+ return false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
- return intel_dp->is_mst;
+ return buf[0] & DP_MST_CAP;
+}
+
+static void
+intel_dp_configure_mst(struct intel_dp *intel_dp)
+{
+ if (!i915.enable_dp_mst)
+ return;
+
+ if (!intel_dp->can_mst)
+ return;
+
+ intel_dp->is_mst = intel_dp_can_mst(intel_dp);
+
+ if (intel_dp->is_mst)
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ else
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
}
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
@@ -3909,7 +4044,7 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- u8 sink_irq_vector;
+ u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -3936,7 +4071,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+ sink_irq_vector != 0) {
/* Clear interrupt source */
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -3980,6 +4116,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
connector_status_connected : connector_status_disconnected;
}
+ if (intel_dp_can_mst(intel_dp))
+ return connector_status_connected;
+
/* If no HPD, poke DDC gently */
if (drm_probe_ddc(&intel_dp->aux.ddc))
return connector_status_connected;
@@ -4148,7 +4287,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
*
* Return %true if @port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
@@ -4207,7 +4346,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false;
}
-static void
+static enum drm_connector_status
intel_dp_long_pulse(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
@@ -4217,8 +4356,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
- bool ret;
- u8 sink_irq_vector;
+ u8 sink_irq_vector = 0;
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain);
@@ -4232,7 +4370,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
else
status = connector_status_disconnected;
- if (status != connector_status_connected) {
+ if (status == connector_status_disconnected) {
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
@@ -4252,10 +4390,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
+ DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
+ yesno(intel_dp_source_supports_hbr2(intel_dp)),
+ yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+
+ intel_dp_print_rates(intel_dp);
+
intel_dp_probe_oui(intel_dp);
- ret = intel_dp_probe_mst(intel_dp);
- if (ret) {
+ intel_dp_print_hw_revision(intel_dp);
+ intel_dp_print_sw_revision(intel_dp);
+
+ intel_dp_configure_mst(intel_dp);
+
+ if (intel_dp->is_mst) {
/*
* If we are in MST mode then this connector
* won't appear connected or have anything
@@ -4284,13 +4432,14 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
-
- status = connector_status_connected;
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
+ status = connector_status_connected;
intel_dp->detect_done = true;
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
+ sink_irq_vector != 0) {
/* Clear interrupt source */
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
@@ -4303,43 +4452,29 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
out:
- if ((status != connector_status_connected) &&
- (intel_dp->is_mst == false))
+ if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
intel_display_power_put(to_i915(dev), power_domain);
- return;
+ return status;
}
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- intel_dp_unset_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DP;
- return connector_status_disconnected;
- }
-
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
- intel_dp_long_pulse(intel_dp->attached_connector);
+ status = intel_dp_long_pulse(intel_dp->attached_connector);
intel_dp->detect_done = false;
- if (is_edp(intel_dp) || intel_connector->detect_edid)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return status;
}
static void
@@ -4630,13 +4765,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
pps_lock(intel_dp);
- /*
- * Read out the current power sequencer assignment,
- * in case the BIOS did something with it.
- */
- if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
- vlv_initial_power_sequencer_setup(intel_dp);
-
+ /* Reinit the power sequencer, in case BIOS did something with it. */
+ intel_dp_pps_init(encoder->dev, intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
@@ -4696,36 +4826,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
+ if (long_hpd) {
+ intel_dp->detect_done = false;
+ return IRQ_NONE;
+ }
+
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (long_hpd) {
- intel_dp_long_pulse(intel_dp->attached_connector);
- if (intel_dp->is_mst)
- ret = IRQ_HANDLED;
- goto put_power;
-
- } else {
- if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
- /*
- * If we were in MST mode, and device is not
- * there, get out of MST mode
- */
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
- goto put_power;
- }
+ if (intel_dp->is_mst) {
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ /*
+ * If we were in MST mode, and device is not
+ * there, get out of MST mode
+ */
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ intel_dp->detect_done = false;
+ goto put_power;
}
+ }
- if (!intel_dp->is_mst) {
- if (!intel_dp_short_pulse(intel_dp)) {
- intel_dp_long_pulse(intel_dp->attached_connector);
- goto put_power;
- }
+ if (!intel_dp->is_mst) {
+ if (!intel_dp_short_pulse(intel_dp)) {
+ intel_dp->detect_done = false;
+ goto put_power;
}
}
@@ -4984,9 +5112,21 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_READ(regs.pp_div));
}
+static void intel_dp_pps_init(struct drm_device *dev,
+ struct intel_dp *intel_dp)
+{
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ vlv_initial_power_sequencer_setup(intel_dp);
+ } else {
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ }
+}
+
/**
* intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev: DRM device
+ * @dev_priv: i915 device
+ * @crtc_state: a pointer to the active intel_crtc_state
* @refresh_rate: RR to be programmed
*
* This function gets called when refresh rate (RR) has to be changed from
@@ -4996,14 +5136,14 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
*
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
-static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
+static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state,
+ int refresh_rate)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc_state *config = NULL;
- struct intel_crtc *intel_crtc = NULL;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
@@ -5030,8 +5170,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- config = intel_crtc->config;
-
if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
return;
@@ -5047,12 +5185,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
- if (!intel_crtc->active) {
+ if (!crtc_state->base.active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
- if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
+ if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
switch (index) {
case DRRS_HIGH_RR:
intel_dp_set_m_n(intel_crtc, M1_N1);
@@ -5064,18 +5202,18 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
default:
DRM_ERROR("Unsupported refreshrate type\n");
}
- } else if (INTEL_INFO(dev)->gen > 6) {
- i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
u32 val;
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val |= PIPECONF_EDP_RR_MODE_SWITCH;
} else {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
@@ -5091,18 +5229,17 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
/**
* intel_edp_drrs_enable - init drrs struct if supported
* @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
*
* Initializes frontbuffer_bits and drrs.dp
*/
-void intel_edp_drrs_enable(struct intel_dp *intel_dp)
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->config->has_drrs) {
+ if (!crtc_state->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
return;
}
@@ -5124,17 +5261,16 @@ unlock:
/**
* intel_edp_drrs_disable - Disable DRRS
* @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
*
*/
-void intel_edp_drrs_disable(struct intel_dp *intel_dp)
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+ struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc->config->has_drrs)
+ if (!old_crtc_state->has_drrs)
return;
mutex_lock(&dev_priv->drrs.mutex);
@@ -5144,9 +5280,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
}
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(&dev_priv->drm,
- intel_dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(dev_priv, old_crtc_state,
+ intel_dp->attached_connector->panel.fixed_mode->vrefresh);
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5175,10 +5310,12 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
if (dev_priv->drrs.busy_frontbuffer_bits)
goto unlock;
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
- intel_dp_set_drrs_state(&dev_priv->drm,
- intel_dp->attached_connector->panel.
- downclock_mode->vrefresh);
+ if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
+ struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+
+ intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+ intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+ }
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5186,7 +5323,7 @@ unlock:
/**
* intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called everytime rendering on the given planes start.
@@ -5194,10 +5331,9 @@ unlock:
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_edp_drrs_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits)
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5220,16 +5356,15 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(&dev_priv->drm,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+ dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
mutex_unlock(&dev_priv->drrs.mutex);
}
/**
* intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
@@ -5239,10 +5374,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
*
* Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
*/
-void intel_edp_drrs_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5265,9 +5399,8 @@ void intel_edp_drrs_flush(struct drm_device *dev,
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(&dev_priv->drm,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
+ dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
/*
* flush also means no more activity hence schedule downclock, if all
@@ -5400,27 +5533,15 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
-
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- vlv_initial_power_sequencer_setup(intel_dp);
- } else {
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
- }
-
+ intel_dp_pps_init(dev, intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
- has_dpcd = intel_dp_get_dpcd(intel_dp);
+ has_dpcd = intel_edp_init_dpcd(intel_dp);
- if (has_dpcd) {
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
- dev_priv->no_aux_handshake =
- intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
- DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- } else {
+ if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
goto out_vdd_off;
@@ -5576,7 +5697,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp_aux_init(intel_dp, intel_connector);
+ intel_dp_aux_init(intel_dp);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
edp_panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 60fb39cd220b..c438b02184cb 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -24,6 +24,15 @@
#include "intel_drv.h"
static void
+intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+
+ DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+}
+
+static void
intel_get_adjust_train(struct intel_dp *intel_dp,
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
@@ -103,13 +112,24 @@ intel_dp_update_link_train(struct intel_dp *intel_dp)
return ret == intel_dp->lane_count;
}
+static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
+{
+ int lane;
+
+ for (lane = 0; lane < intel_dp->lane_count; lane++)
+ if ((intel_dp->train_set[lane] &
+ DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ return true;
+}
+
/* Enable corresponding port and start training pattern 1 */
-static void
+static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
- int i;
uint8_t voltage;
- int voltage_tries, loop_tries;
+ int voltage_tries, max_vswing_tries;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
@@ -125,6 +145,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
@@ -140,60 +161,54 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to enable link training\n");
- return;
+ return false;
}
- voltage = 0xff;
- voltage_tries = 0;
- loop_tries = 0;
+ voltage_tries = 1;
+ max_vswing_tries = 0;
for (;;) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
- break;
+ return false;
}
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
- break;
+ return true;
}
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
- break;
- if (i == intel_dp->lane_count) {
- ++loop_tries;
- if (loop_tries == 5) {
- DRM_ERROR("too many full retries, give up\n");
- break;
- }
- intel_dp_reset_link_train(intel_dp,
- DP_TRAINING_PATTERN_1 |
- DP_LINK_SCRAMBLING_DISABLE);
- voltage_tries = 0;
- continue;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+ return false;
+ }
+
+ if (max_vswing_tries == 1) {
+ DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+ return false;
}
- /* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++voltage_tries;
- if (voltage_tries == 5) {
- DRM_ERROR("too many voltage retries, give up\n");
- break;
- }
- } else
- voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
- break;
+ return false;
}
+
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+ voltage)
+ ++voltage_tries;
+ else
+ voltage_tries = 1;
+
+ if (intel_dp_link_max_vswing_reached(intel_dp))
+ ++max_vswing_tries;
+
}
}
@@ -229,12 +244,12 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return training_pattern;
}
-static void
+static bool
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
- bool channel_eq = false;
- int tries, cr_tries;
+ int tries;
u32 training_pattern;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
training_pattern = intel_dp_training_pattern(intel_dp);
@@ -243,19 +258,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
training_pattern |
DP_LINK_SCRAMBLING_DISABLE)) {
DRM_ERROR("failed to start channel equalization\n");
- return;
+ return false;
}
- tries = 0;
- cr_tries = 0;
- channel_eq = false;
- for (;;) {
- uint8_t link_status[DP_LINK_STATUS_SIZE];
-
- if (cr_tries > 5) {
- DRM_ERROR("failed to train DP, aborting\n");
- break;
- }
+ intel_dp->channel_eq_status = false;
+ for (tries = 0; tries < 5; tries++) {
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -266,44 +273,38 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
- intel_dp_link_training_clock_recovery(intel_dp);
- intel_dp_set_link_train(intel_dp,
- training_pattern |
- DP_LINK_SCRAMBLING_DISABLE);
- cr_tries++;
- continue;
+ intel_dp_dump_link_status(link_status);
+ DRM_DEBUG_KMS("Clock recovery check failed, cannot "
+ "continue channel equalization\n");
+ break;
}
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
- channel_eq = true;
+ intel_dp->channel_eq_status = true;
+ DRM_DEBUG_KMS("Channel EQ done. DP Training "
+ "successful\n");
break;
}
- /* Try 5 times, then try clock recovery if that fails */
- if (tries > 5) {
- intel_dp_link_training_clock_recovery(intel_dp);
- intel_dp_set_link_train(intel_dp,
- training_pattern |
- DP_LINK_SCRAMBLING_DISABLE);
- tries = 0;
- cr_tries++;
- continue;
- }
-
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
DRM_ERROR("failed to update link training\n");
break;
}
- ++tries;
+ }
+
+ /* Try 5 times, else fail and try at lower BW */
+ if (tries == 5) {
+ intel_dp_dump_link_status(link_status);
+ DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
}
intel_dp_set_idle_link_train(intel_dp);
- if (channel_eq)
- DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+ return intel_dp->channel_eq_status;
+
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 68a005d729e9..54a9d7610d8f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -31,18 +31,16 @@
#include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_atomic_state *state;
- int bpp, i;
+ int bpp;
int lane_count, slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct drm_connector *drm_connector;
- struct intel_connector *connector, *found = NULL;
- struct drm_connector_state *connector_state;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -54,7 +52,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
-
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
@@ -62,20 +59,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state;
- for_each_connector_in_state(state, drm_connector, connector_state, i) {
- connector = to_intel_connector(drm_connector);
-
- if (connector_state->best_encoder == &encoder->base) {
- found = connector;
- break;
- }
- }
-
- if (!found) {
- DRM_ERROR("can't find connector\n");
- return false;
- }
-
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
@@ -92,16 +75,20 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
}
-static void intel_mst_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
@@ -109,11 +96,15 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
}
}
-static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -122,59 +113,51 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
/* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
- intel_dig_port->base.post_disable(&intel_dig_port->base);
+ intel_dig_port->base.post_disable(&intel_dig_port->base,
+ NULL, NULL);
+
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
}
-static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->port;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
int ret;
uint32_t temp;
- struct intel_connector *found = NULL, *connector;
int slots;
- struct drm_crtc *crtc = encoder->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- for_each_intel_connector(dev, connector) {
- if (connector->base.state->best_encoder == &encoder->base) {
- found = connector;
- break;
- }
- }
-
- if (!found) {
- DRM_ERROR("can't find connector\n");
- return;
- }
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
- found->encoder = encoder;
+ connector->encoder = encoder;
+ intel_mst->connector = connector;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- intel_mst->connector = found;
-
if (intel_dp->active_mst_links == 0) {
- intel_prepare_ddi_buffer(&intel_dig_port->base);
-
- intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
+ intel_ddi_clk_select(&intel_dig_port->base,
+ pipe_config->shared_dpll);
- intel_dp_set_link_params(intel_dp, intel_crtc->config);
+ intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
+ intel_dp_set_link_params(intel_dp,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ true);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
@@ -185,8 +168,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->connector->port,
- intel_crtc->config->pbn, &slots);
+ connector->port,
+ pipe_config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
@@ -200,13 +183,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
}
-static void intel_mst_enable_dp(struct intel_encoder *encoder)
+static void intel_mst_enable_dp(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->port;
int ret;
@@ -239,9 +223,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 5c1f2d235ffa..1c59ca50c430 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -24,6 +24,44 @@
#include "intel_drv.h"
struct intel_shared_dpll *
+skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
+{
+ struct intel_shared_dpll *pll = NULL;
+ struct intel_dpll_hw_state dpll_hw_state;
+ enum intel_dpll_id i;
+ bool found = false;
+
+ if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+ return pll;
+
+ for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->config.crtc_mask == 0)
+ continue;
+
+ if (memcmp(&dpll_hw_state, &pll->config.hw_state,
+ sizeof(pll->config.hw_state)) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = DPLL_ID_SKL_DPLL1;
+ ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
+ pll = &dev_priv->shared_dplls[i];
+ if (pll->config.crtc_mask == 0) {
+ pll->config.hw_state = dpll_hw_state;
+ break;
+ }
+ }
+
+ return pll;
+}
+
+struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
@@ -452,26 +490,6 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
return val & SPLL_PLL_ENABLE;
}
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
-{
- switch (pll->id) {
- case DPLL_ID_WRPLL1:
- return PORT_CLK_SEL_WRPLL1;
- case DPLL_ID_WRPLL2:
- return PORT_CLK_SEL_WRPLL2;
- case DPLL_ID_SPLL:
- return PORT_CLK_SEL_SPLL;
- case DPLL_ID_LCPLL_810:
- return PORT_CLK_SEL_LCPLL_810;
- case DPLL_ID_LCPLL_1350:
- return PORT_CLK_SEL_LCPLL_1350;
- case DPLL_ID_LCPLL_2700:
- return PORT_CLK_SEL_LCPLL_2700;
- default:
- return PORT_CLK_SEL_NONE;
- }
-}
-
#define LC_FREQ 2700
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
@@ -687,11 +705,65 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t val;
+ unsigned int p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->dpll_hw_state.wrpll = val;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+ if (!pll)
+ return NULL;
+
+ return pll;
+}
+
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+ int clock)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id pll_id;
+
+ switch (clock / 2) {
+ case 81000:
+ pll_id = DPLL_ID_LCPLL_810;
+ break;
+ case 135000:
+ pll_id = DPLL_ID_LCPLL_1350;
+ break;
+ case 270000:
+ pll_id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ return NULL;
+ }
+
+ pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+ if (!pll)
+ return NULL;
+
+ return pll;
+}
+
static struct intel_shared_dpll *
hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
int clock = crtc_state->port_clock;
@@ -699,41 +771,12 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
sizeof(crtc_state->dpll_hw_state));
if (encoder->type == INTEL_OUTPUT_HDMI) {
- uint32_t val;
- unsigned p, n2, r2;
-
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- crtc_state->dpll_hw_state.wrpll = val;
-
- pll = intel_find_shared_dpll(crtc, crtc_state,
- DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+ pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
- enum intel_dpll_id pll_id;
-
- switch (clock / 2) {
- case 81000:
- pll_id = DPLL_ID_LCPLL_810;
- break;
- case 135000:
- pll_id = DPLL_ID_LCPLL_1350;
- break;
- case 270000:
- pll_id = DPLL_ID_LCPLL_2700;
- break;
- default:
- DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
- return NULL;
- }
-
- pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ pll = hsw_ddi_dp_get_dpll(encoder, clock);
} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
@@ -751,14 +794,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
if (!pll)
return NULL;
- crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
-
intel_reference_shared_dpll(pll, crtc_state);
return pll;
}
-
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
@@ -1194,75 +1234,110 @@ skip_remaining_dividers:
return true;
}
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ int clock)
{
- struct intel_shared_dpll *pll;
uint32_t ctrl1, cfgcr1, cfgcr2;
- int clock = crtc_state->port_clock;
+ struct skl_wrpll_params wrpll_params = { 0, };
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
* as the DPLL id in this function.
*/
-
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
- if (encoder->type == INTEL_OUTPUT_HDMI) {
- struct skl_wrpll_params wrpll_params = { 0, };
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ return false;
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
- return NULL;
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+ return true;
+}
+
+
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ uint32_t ctrl1;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+ switch (clock / 2) {
+ case 81000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+ break;
+ case 135000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+ break;
+ case 270000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+ break;
+ /* eDP 1.4 rates */
+ case 162000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+ break;
+ case 108000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+ break;
+ }
+
+ dpll_hw_state->ctrl1 = ctrl1;
+ return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_shared_dpll *pll;
+ int clock = crtc_state->port_clock;
+ bool bret;
+ struct intel_dpll_hw_state dpll_hw_state;
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ if (!bret) {
+ DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ return NULL;
+ }
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
- switch (crtc_state->port_clock / 2) {
- case 81000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
- break;
- case 135000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
- break;
- case 270000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
- break;
- /* eDP 1.4 rates */
- case 162000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
- break;
- case 108000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
- break;
- case 216000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
- break;
+ bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ if (!bret) {
+ DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ return NULL;
}
-
- cfgcr1 = cfgcr2 = 0;
+ crtc_state->dpll_hw_state = dpll_hw_state;
} else {
return NULL;
}
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
if (encoder->type == INTEL_OUTPUT_EDP)
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_SKL_DPLL0,
@@ -1274,8 +1349,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
if (!pll)
return NULL;
- crtc_state->ddi_pll_sel = pll->id;
-
intel_reference_shared_dpll(pll, crtc_state);
return pll;
@@ -1484,6 +1557,8 @@ struct bxt_clk_div {
uint32_t m2_frac;
bool m2_frac_en;
uint32_t n;
+
+ int vco;
};
/* pre-calculated values for DP linkrates */
@@ -1497,57 +1572,60 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
{432000, 3, 1, 32, 1677722, 1, 1}
};
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state, int clock,
+ struct bxt_clk_div *clk_div)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
- struct intel_digital_port *intel_dig_port;
- struct bxt_clk_div clk_div = {0};
- int vco = 0;
- uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t lanestagger;
- int clock = crtc_state->port_clock;
+ struct dpll best_clock;
- if (encoder->type == INTEL_OUTPUT_HDMI) {
- struct dpll best_clock;
+ /* Calculate HDMI div */
+ /*
+ * FIXME: tie the following calculation into
+ * i9xx_crtc_compute_clock
+ */
+ if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+ clock, pipe_name(intel_crtc->pipe));
+ return false;
+ }
- /* Calculate HDMI div */
- /*
- * FIXME: tie the following calculation into
- * i9xx_crtc_compute_clock
- */
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(crtc->pipe));
- return NULL;
- }
+ clk_div->p1 = best_clock.p1;
+ clk_div->p2 = best_clock.p2;
+ WARN_ON(best_clock.m1 != 2);
+ clk_div->n = best_clock.n;
+ clk_div->m2_int = best_clock.m2 >> 22;
+ clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
+ clk_div->m2_frac_en = clk_div->m2_frac != 0;
- clk_div.p1 = best_clock.p1;
- clk_div.p2 = best_clock.p2;
- WARN_ON(best_clock.m1 != 2);
- clk_div.n = best_clock.n;
- clk_div.m2_int = best_clock.m2 >> 22;
- clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
- clk_div.m2_frac_en = clk_div.m2_frac != 0;
+ clk_div->vco = best_clock.vco;
- vco = best_clock.vco;
- } else if (encoder->type == INTEL_OUTPUT_DP ||
- encoder->type == INTEL_OUTPUT_EDP) {
- int i;
+ return true;
+}
- clk_div = bxt_dp_clk_val[0];
- for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
- if (bxt_dp_clk_val[i].clock == clock) {
- clk_div = bxt_dp_clk_val[i];
- break;
- }
+static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+{
+ int i;
+
+ *clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (bxt_dp_clk_val[i].clock == clock) {
+ *clk_div = bxt_dp_clk_val[i];
+ break;
}
- vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
}
+ clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
+}
+
+static bool bxt_ddi_set_dpll_hw_state(int clock,
+ struct bxt_clk_div *clk_div,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ int vco = clk_div->vco;
+ uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+ uint32_t lanestagger;
+
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -1566,12 +1644,9 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
targ_cnt = 9;
} else {
DRM_ERROR("Invalid VCO\n");
- return NULL;
+ return false;
}
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (clock > 270000)
lanestagger = 0x18;
else if (clock > 135000)
@@ -1583,35 +1658,86 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
else
lanestagger = 0x02;
- crtc_state->dpll_hw_state.ebb0 =
- PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
- crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
- crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
- crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+ dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
+ dpll_hw_state->pll0 = clk_div->m2_int;
+ dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
+ dpll_hw_state->pll2 = clk_div->m2_frac;
- if (clk_div.m2_frac_en)
- crtc_state->dpll_hw_state.pll3 =
- PORT_PLL_M2_FRAC_ENABLE;
+ if (clk_div->m2_frac_en)
+ dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
- crtc_state->dpll_hw_state.pll6 =
- prop_coef | PORT_PLL_INT_COEFF(int_coef);
- crtc_state->dpll_hw_state.pll6 |=
- PORT_PLL_GAIN_CTL(gain_ctl);
+ dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
+ dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
- crtc_state->dpll_hw_state.pll8 = targ_cnt;
+ dpll_hw_state->pll8 = targ_cnt;
- crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+ dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
- crtc_state->dpll_hw_state.pll10 =
+ dpll_hw_state->pll10 =
PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
| PORT_PLL_DCO_AMP_OVR_EN_H;
- crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+ dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+ dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
- crtc_state->dpll_hw_state.pcsdw12 =
- LANESTAGGER_STRAP_OVRD | lanestagger;
+ return true;
+}
- intel_dig_port = enc_to_dig_port(&encoder->base);
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ struct bxt_clk_div clk_div = {0};
+
+ bxt_ddi_dp_pll_dividers(clock, &clk_div);
+
+ return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static bool
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state, int clock,
+ struct intel_dpll_hw_state *dpll_hw_state)
+{
+ struct bxt_clk_div clk_div = { };
+
+ bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+
+ return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+}
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_dpll_hw_state dpll_hw_state = { };
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_digital_port *intel_dig_port;
+ struct intel_shared_dpll *pll;
+ int i, clock = crtc_state->port_clock;
+
+ if (encoder->type == INTEL_OUTPUT_HDMI &&
+ !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
+ &dpll_hw_state))
+ return NULL;
+
+ if ((encoder->type == INTEL_OUTPUT_DP ||
+ encoder->type == INTEL_OUTPUT_EDP) &&
+ !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+ return NULL;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state = dpll_hw_state;
+
+ if (encoder->type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+
+ intel_dig_port = intel_mst->primary;
+ } else
+ intel_dig_port = enc_to_dig_port(&encoder->base);
/* 1:1 mapping between ports and PLLs */
i = (enum intel_dpll_id) intel_dig_port->port;
@@ -1622,9 +1748,6 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
intel_reference_shared_dpll(pll, crtc_state);
- /* shared DPLL id 0 is DPLL A */
- crtc_state->ddi_pll_sel = pll->id;
-
return pll;
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 89c5ada1a315..f4385353bc11 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -160,5 +160,20 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc);
void intel_shared_dpll_commit(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+/* BXT dpll related functions */
+bool bxt_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state);
+
+
+/* SKL dpll related functions */
+bool skl_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state);
+struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
+ int clock);
+
+
+/* HSW dpll related functions */
+struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
+ int clock);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ff399b9a5c1f..a19ec06f9e42 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -52,11 +52,15 @@
*/
#define _wait_for(COND, US, W) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+ int ret__; \
+ for (;;) { \
+ bool expired__ = time_after(jiffies, timeout__); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
break; \
} \
if ((W) && drm_can_sleep()) { \
@@ -178,11 +182,22 @@ struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
struct intel_rotation_info rot_info;
+
+ /* for each plane in the normal GTT view */
+ struct {
+ unsigned int x, y;
+ } normal[2];
+ /* for each plane in the rotated GTT view */
+ struct {
+ unsigned int x, y;
+ unsigned int pitch; /* pixels */
+ } rotated[2];
};
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
+ struct i915_vma *vma;
async_cookie_t cookie;
int preferred_bpp;
};
@@ -194,14 +209,26 @@ struct intel_encoder {
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
bool (*compute_config)(struct intel_encoder *,
- struct intel_crtc_state *);
- void (*pre_pll_enable)(struct intel_encoder *);
- void (*pre_enable)(struct intel_encoder *);
- void (*enable)(struct intel_encoder *);
- void (*mode_set)(struct intel_encoder *intel_encoder);
- void (*disable)(struct intel_encoder *);
- void (*post_disable)(struct intel_encoder *);
- void (*post_pll_disable)(struct intel_encoder *);
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*pre_pll_enable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*pre_enable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*enable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*disable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*post_disable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
+ void (*post_pll_disable)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -236,6 +263,7 @@ struct intel_panel {
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
+ bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
bool util_pin_active_low; /* bxt+ */
@@ -338,10 +366,16 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
- struct drm_rect src;
- struct drm_rect dst;
struct drm_rect clip;
- bool visible;
+
+ struct {
+ u32 offset;
+ int x, y;
+ } main;
+ struct {
+ u32 offset;
+ int x, y;
+ } aux;
/*
* scaler_id
@@ -561,12 +595,6 @@ struct intel_crtc_state {
/* Selected dpll when shared or NULL. */
struct intel_shared_dpll *shared_dpll;
- /*
- * - PORT_CLK_SEL for DDI ports on HSW/BDW.
- * - enum skl_dpll on SKL
- */
- uint32_t ddi_pll_sel;
-
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -683,8 +711,8 @@ struct intel_crtc {
struct intel_crtc_state *config;
- /* reset counter value when the last flip was submitted */
- unsigned int reset_counter;
+ /* global reset count when the last flip was submitted */
+ unsigned int reset_count;
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
@@ -852,8 +880,10 @@ struct intel_dp {
int link_rate;
uint8_t lane_count;
uint8_t sink_count;
+ bool link_mst;
bool has_audio;
bool detect_done;
+ bool channel_eq_status;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
@@ -1106,8 +1136,11 @@ void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
+ struct intel_shared_dpll *pll);
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state);
+void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1122,7 +1155,6 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
@@ -1133,22 +1165,12 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-
-/* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin);
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
- unsigned frontbuffer_bits);
+struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
+ int clock);
unsigned int intel_fb_align_height(struct drm_device *dev,
unsigned int height,
uint32_t pixel_format,
uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
- enum fb_op_origin origin);
u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
@@ -1164,14 +1186,22 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
@@ -1227,8 +1257,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- unsigned int rotation);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
@@ -1238,9 +1268,9 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state);
+ struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state);
+ struct drm_plane_state *old_state);
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
@@ -1258,7 +1288,7 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
{
- return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
+ return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
}
void intel_create_rotation_property(struct drm_device *dev,
@@ -1290,9 +1320,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
u32 intel_compute_tile_offset(int *x, int *y,
- const struct drm_framebuffer *fb, int plane,
- unsigned int pitch,
- unsigned int rotation);
+ const struct intel_plane_state *state, int plane);
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1335,13 +1363,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
-u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
- struct drm_i915_gem_object *obj,
- unsigned int plane);
+u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
+ unsigned int rotation);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1355,7 +1384,8 @@ bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config);
+ int link_rate, uint8_t lane_count,
+ bool link_mst);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1364,7 +1394,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
@@ -1382,13 +1413,14 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp);
-void intel_edp_drrs_invalidate(struct drm_device *dev,
- unsigned frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port);
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1488,7 +1520,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
@@ -1561,13 +1594,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
/* intel_psr.c */
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
/* intel_runtime_pm.c */
@@ -1667,13 +1700,6 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
atomic_dec(&dev_priv->pm.wakeref_count);
}
-/* TODO: convert users of these to rely instead on proper RPM refcounting */
-#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
- disable_rpm_wakeref_asserts(dev_priv)
-
-#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \
- enable_rpm_wakeref_asserts(dev_priv)
-
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
@@ -1699,11 +1725,11 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
@@ -1716,9 +1742,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-bool skl_can_enable_sagv(struct drm_atomic_state *state);
-int skl_enable_sagv(struct drm_i915_private *dev_priv);
-int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+ const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm,
+ int plane);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index de8e9fb51595..b2e3d3a334f7 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -312,7 +312,8 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
@@ -533,14 +534,15 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
intel_panel_enable_backlight(intel_dsi->attached_connector);
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config);
-static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+static void intel_dsi_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
DRM_DEBUG_KMS("\n");
@@ -550,9 +552,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
* lock. It needs to be fully powered down to fix it.
*/
intel_disable_dsi_pll(encoder);
- intel_enable_dsi_pll(encoder, crtc->config);
+ intel_enable_dsi_pll(encoder, pipe_config);
- intel_dsi_prepare(encoder);
+ intel_dsi_prepare(encoder, pipe_config);
/* Panel Enable over CRC PMIC */
if (intel_dsi->gpio_panel)
@@ -582,7 +584,9 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
intel_dsi_enable(encoder);
}
-static void intel_dsi_enable_nop(struct intel_encoder *encoder)
+static void intel_dsi_enable_nop(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
@@ -592,7 +596,9 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder)
*/
}
-static void intel_dsi_pre_disable(struct intel_encoder *encoder)
+static void intel_dsi_pre_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -694,7 +700,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
intel_disable_dsi_pll(encoder);
}
-static void intel_dsi_post_disable(struct intel_encoder *encoder)
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -819,6 +827,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
crtc_hblank_start_sw, crtc_hblank_end_sw;
+ /* FIXME: hw readout should not depend on SW state */
intel_crtc = to_intel_crtc(encoder->base.crtc);
adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
@@ -1104,14 +1113,15 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
@@ -1348,7 +1358,7 @@ static int intel_dsi_set_property(struct drm_connector *connector,
intel_connector->panel.fitting_mode = val;
}
- crtc = intel_attached_encoder(connector)->base.crtc;
+ crtc = connector->state->crtc;
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index b9e5a63a7c9e..2e452c505e7e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -174,7 +174,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
-static void intel_disable_dvo(struct intel_encoder *encoder)
+static void intel_disable_dvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -186,17 +188,18 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
I915_READ(dvo_reg);
}
-static void intel_enable_dvo(struct intel_encoder *encoder)
+static void intel_enable_dvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &crtc->config->base.mode,
- &crtc->config->base.adjusted_mode);
+ &pipe_config->base.mode,
+ &pipe_config->base.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -235,7 +238,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
const struct drm_display_mode *fixed_mode =
@@ -253,12 +257,13 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_dvo_pre_enable(struct intel_encoder *encoder)
+static void intel_dvo_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
new file mode 100644
index 000000000000..025e232a4205
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+#include "intel_lrc.h"
+
+static const struct engine_info {
+ const char *name;
+ unsigned exec_id;
+ enum intel_engine_hw_id hw_id;
+ u32 mmio_base;
+ unsigned irq_shift;
+ int (*init_legacy)(struct intel_engine_cs *engine);
+ int (*init_execlists)(struct intel_engine_cs *engine);
+} intel_engines[] = {
+ [RCS] = {
+ .name = "render ring",
+ .exec_id = I915_EXEC_RENDER,
+ .hw_id = RCS_HW,
+ .mmio_base = RENDER_RING_BASE,
+ .irq_shift = GEN8_RCS_IRQ_SHIFT,
+ .init_execlists = logical_render_ring_init,
+ .init_legacy = intel_init_render_ring_buffer,
+ },
+ [BCS] = {
+ .name = "blitter ring",
+ .exec_id = I915_EXEC_BLT,
+ .hw_id = BCS_HW,
+ .mmio_base = BLT_RING_BASE,
+ .irq_shift = GEN8_BCS_IRQ_SHIFT,
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_blt_ring_buffer,
+ },
+ [VCS] = {
+ .name = "bsd ring",
+ .exec_id = I915_EXEC_BSD,
+ .hw_id = VCS_HW,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_bsd_ring_buffer,
+ },
+ [VCS2] = {
+ .name = "bsd2 ring",
+ .exec_id = I915_EXEC_BSD,
+ .hw_id = VCS2_HW,
+ .mmio_base = GEN8_BSD2_RING_BASE,
+ .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_bsd2_ring_buffer,
+ },
+ [VECS] = {
+ .name = "video enhancement ring",
+ .exec_id = I915_EXEC_VEBOX,
+ .hw_id = VECS_HW,
+ .mmio_base = VEBOX_RING_BASE,
+ .irq_shift = GEN8_VECS_IRQ_SHIFT,
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_vebox_ring_buffer,
+ },
+};
+
+static struct intel_engine_cs *
+intel_engine_setup(struct drm_i915_private *dev_priv,
+ enum intel_engine_id id)
+{
+ const struct engine_info *info = &intel_engines[id];
+ struct intel_engine_cs *engine = &dev_priv->engine[id];
+
+ engine->id = id;
+ engine->i915 = dev_priv;
+ engine->name = info->name;
+ engine->exec_id = info->exec_id;
+ engine->hw_id = engine->guc_id = info->hw_id;
+ engine->mmio_base = info->mmio_base;
+ engine->irq_shift = info->irq_shift;
+
+ return engine;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev: DRM device.
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ unsigned int mask = 0;
+ int (*init)(struct intel_engine_cs *engine);
+ unsigned int i;
+ int ret;
+
+ WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0);
+ WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+
+ for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
+ if (!HAS_ENGINE(dev_priv, i))
+ continue;
+
+ if (i915.enable_execlists)
+ init = intel_engines[i].init_execlists;
+ else
+ init = intel_engines[i].init_legacy;
+
+ if (!init)
+ continue;
+
+ ret = init(intel_engine_setup(dev_priv, i));
+ if (ret)
+ goto cleanup;
+
+ mask |= ENGINE_MASK(i);
+ }
+
+ /*
+ * Catch failures to update intel_engines table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ device_info->ring_mask = mask;
+
+ device_info->num_rings = hweight32(mask);
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ if (i915.enable_execlists)
+ intel_logical_ring_cleanup(&dev_priv->engine[i]);
+ else
+ intel_engine_cleanup(&dev_priv->engine[i]);
+ }
+
+ return ret;
+}
+
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+ * so long as the semaphore value in the register/page is greater
+ * than the sync value), so whenever we reset the seqno,
+ * so long as we reset the tracking semaphore value to 0, it will
+ * always be before the next request's seqno. If we don't reset
+ * the semaphore value, then when the seqno moves backwards all
+ * future waits will complete instantly (causing rendering corruption).
+ */
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+ if (dev_priv->semaphore) {
+ struct page *page = i915_vma_first_page(dev_priv->semaphore);
+ void *semaphores;
+
+ /* Semaphores are in noncoherent memory, flush to be safe */
+ semaphores = kmap(page);
+ memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+ 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+ drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+ I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+ kunmap(page);
+ }
+ memset(engine->semaphore.sync_seqno, 0,
+ sizeof(engine->semaphore.sync_seqno));
+
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+ engine->last_submitted_seqno = seqno;
+
+ engine->hangcheck.seqno = seqno;
+
+ /* After manually advancing the seqno, fake the interrupt in case
+ * there are any waiters for that seqno.
+ */
+ intel_engine_wakeup(engine);
+}
+
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
+{
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static void intel_engine_init_requests(struct intel_engine_cs *engine)
+{
+ init_request_active(&engine->last_request, NULL);
+ INIT_LIST_HEAD(&engine->request_list);
+}
+
+/**
+ * intel_engines_setup_common - setup engine state not requiring hw access
+ * @engine: Engine to setup.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do not require hardware access.
+ *
+ * Typically done early in the submission mode specific engine setup stage.
+ */
+void intel_engine_setup_common(struct intel_engine_cs *engine)
+{
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ spin_lock_init(&engine->execlist_lock);
+
+ engine->fence_context = fence_context_alloc(1);
+
+ intel_engine_init_requests(engine);
+ intel_engine_init_hangcheck(engine);
+ i915_gem_batch_pool_init(engine, &engine->batch_pool);
+
+ intel_engine_init_cmd_parser(engine);
+}
+
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ WARN_ON(engine->scratch);
+
+ obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ if (!obj)
+ obj = i915_gem_object_create(&engine->i915->drm, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ engine->scratch = vma;
+ DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+ engine->name, i915_ggtt_offset(vma));
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+{
+ i915_vma_unpin_and_release(&engine->scratch);
+}
+
+/**
+ * intel_engines_init_common - initialize cengine state which might require hw access
+ * @engine: Engine to initialize.
+ *
+ * Initializes @engine@ structure members shared between legacy and execlists
+ * submission modes which do require hardware access.
+ *
+ * Typcally done at later stages of submission mode specific engine setup.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_init_common(struct intel_engine_cs *engine)
+{
+ int ret;
+
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * intel_engines_cleanup_common - cleans up the engine state created by
+ * the common initiailizers.
+ * @engine: Engine to cleanup.
+ *
+ * This cleans up everything created by the common helpers.
+ */
+void intel_engine_cleanup_common(struct intel_engine_cs *engine)
+{
+ intel_engine_cleanup_scratch(engine);
+
+ intel_engine_fini_breadcrumbs(engine);
+ intel_engine_cleanup_cmd_parser(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3836a1c79714..c43dd9abce79 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) == 7)
lines = min(lines, 2048);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
return lines * cache->fb.stride;
@@ -190,9 +192,13 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
- I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+ I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ } else {
+ I915_WRITE(DPFC_FENCE_YOFF, 0);
+ }
/* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -244,21 +250,29 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
- dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev_priv))
- dpfc_ctl |= params->fb.fence_reg;
+
+ if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN;
+ if (IS_GEN5(dev_priv))
+ dpfc_ctl |= params->fb.fence_reg;
+ if (IS_GEN6(dev_priv)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
+ }
+ } else {
+ if (IS_GEN6(dev_priv)) {
+ I915_WRITE(SNB_DPFC_CTL_SA, 0);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ }
+ }
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- if (IS_GEN6(dev_priv)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
- }
-
intel_fbc_recompress(dev_priv);
}
@@ -305,7 +319,15 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+ if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
+ } else {
+ I915_WRITE(SNB_DPFC_CTL_SA,0);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ }
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
@@ -324,10 +346,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-
intel_fbc_recompress(dev_priv);
}
@@ -494,7 +512,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc,
if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
- if (plane_state->visible)
+ if (plane_state->base.visible)
fbc->visible_pipes_mask |= (1 << pipe);
else
fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -709,6 +727,14 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
+/* XXX replace me when we have VMA tracking for intel_plane_state */
+static int get_fence_id(struct drm_framebuffer *fb)
+{
+ struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
+
+ return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
+}
+
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -725,9 +751,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
ilk_pipe_pixel_rate(crtc_state);
cache->plane.rotation = plane_state->base.rotation;
- cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
- cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
- cache->plane.visible = plane_state->visible;
+ cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ cache->plane.visible = plane_state->base.visible;
if (!cache->plane.visible)
return;
@@ -737,11 +763,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
if (IS_GEN(dev_priv, 5, 6))
- cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+ cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
- cache->fb.fence_reg = obj->fence_reg;
- cache->fb.tiling_mode = obj->tiling_mode;
+ cache->fb.fence_reg = get_fence_id(fb);
+ cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -768,6 +794,10 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
+ *
+ * Note that is possible for a tiled surface to be unmappable (and
+ * so have no fence associated with it) due to aperture constaints
+ * at the time of pinning.
*/
if (cache->fb.tiling_mode != I915_TILING_X ||
cache->fb.fence_reg == I915_FENCE_REG_NONE) {
@@ -775,7 +805,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
- cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+ cache->plane.rotation != DRM_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
@@ -1050,7 +1080,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
- if (!intel_plane_state->visible)
+ if (!intel_plane_state->base.visible)
continue;
for_each_crtc_in_state(state, crtc, crtc_state, j) {
@@ -1075,6 +1105,8 @@ out:
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
+ * @crtc_state: corresponding &drm_crtc_state for @crtc
+ * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
*
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
@@ -1163,11 +1195,8 @@ void intel_fbc_disable(struct intel_crtc *crtc)
return;
mutex_lock(&fbc->lock);
- if (fbc->crtc == crtc) {
- WARN_ON(!fbc->enabled);
- WARN_ON(fbc->active);
+ if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
- }
mutex_unlock(&fbc->lock);
cancel_work_sync(&fbc->work.work);
@@ -1212,7 +1241,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(&crtc->base) &&
- to_intel_plane_state(crtc->base.primary->state)->visible)
+ to_intel_plane_state(crtc->base.primary->state)->base.visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 3e3632c18733..b7098f98bb67 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -34,7 +34,6 @@
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/vga_switcheroo.h>
@@ -42,6 +41,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -159,7 +159,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
ret = PTR_ERR(fb);
goto out;
}
@@ -183,13 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
bool prealloc = false;
- void *vaddr;
+ void __iomem *vaddr;
int ret;
if (intel_fb &&
@@ -215,17 +215,17 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
- obj = intel_fb->obj;
-
mutex_lock(&dev->struct_mutex);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
- if (ret)
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out_unlock;
+ }
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -245,13 +245,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
- vma = i915_gem_obj_to_ggtt(obj);
-
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+ info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma);
@@ -273,23 +271,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (ifbdev->fb->obj->stolen && !prealloc)
+ if (intel_fb->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
- fb->width, fb->height,
- i915_gem_obj_ggtt_offset(obj), obj);
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
+ fb->width, fb->height, i915_ggtt_offset(vma));
+ ifbdev->vma = vma;
mutex_unlock(&dev->struct_mutex);
- vga_switcheroo_client_fb_set(dev->pdev, info);
+ vga_switcheroo_client_fb_set(pdev, info);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+ intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -554,7 +552,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
- intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+ intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -768,7 +766,7 @@ void intel_fbdev_fini(struct drm_device *dev)
if (!ifbdev)
return;
- flush_work(&dev_priv->fbdev_suspend_work);
+ cancel_work_sync(&dev_priv->fbdev_suspend_work);
if (!current_is_async())
intel_fbdev_sync(ifbdev);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357010b4..966de4c7c7a2 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -63,47 +63,30 @@
#include <drm/drmP.h>
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#include "i915_drv.h"
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (origin == ORIGIN_CS) {
- mutex_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits
- |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits
- &= ~obj->frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&dev_priv->fb_tracking.lock);
}
- intel_psr_invalidate(dev, obj->frontbuffer_bits);
- intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
- intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+ intel_psr_invalidate(dev_priv, frontbuffer_bits);
+ intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -113,64 +96,45 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-static void intel_frontbuffer_flush(struct drm_device *dev,
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Delay flushing when rings are still busy.*/
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
if (!frontbuffer_bits)
return;
- intel_edp_drrs_flush(dev, frontbuffer_bits);
- intel_psr_flush(dev, frontbuffer_bits, origin);
+ intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+ intel_psr_flush(dev_priv, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
}
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned frontbuffer_bits;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (!obj->frontbuffer_bits)
- return;
-
- frontbuffer_bits = obj->frontbuffer_bits;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (retire) {
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
}
- intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+ if (frontbuffer_bits)
+ intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -180,23 +144,21 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
- intel_psr_single_frame_update(dev, frontbuffer_bits);
+ intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -204,23 +166,23 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+ if (frontbuffer_bits)
+ intel_frontbuffer_flush(dev_priv,
+ frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -229,15 +191,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- mutex_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&dev_priv->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- mutex_unlock(&dev_priv->fb_tracking.lock);
+ spin_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+ intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
new file mode 100644
index 000000000000..76ceb539f9f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!frontbuffer_bits)
+ return;
+
+ __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire,
+ enum fb_op_origin origin)
+{
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!frontbuffer_bits)
+ return;
+
+ __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
+}
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743740c0..5cdf7aa75be5 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -63,26 +63,27 @@ struct drm_i915_gem_request;
* retcode: errno from last guc_submit()
*/
struct i915_guc_client {
- struct drm_i915_gem_object *client_obj;
+ struct i915_vma *vma;
void *client_base; /* first page (only) of above */
struct i915_gem_context *owner;
struct intel_guc *guc;
+
+ uint32_t engines; /* bitmap of (host) engine ids */
uint32_t priority;
uint32_t ctx_index;
-
uint32_t proc_desc_offset;
+
uint32_t doorbell_offset;
uint32_t cookie;
uint16_t doorbell_id;
- uint16_t padding; /* Maintain alignment */
+ uint16_t padding[3]; /* Maintain alignment */
+ spinlock_t wq_lock;
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
- uint32_t unused; /* Was 'wq_head' */
-
+ uint32_t wq_rsvd;
uint32_t no_wq_space;
- uint32_t q_fail; /* No longer used */
uint32_t b_fail;
int retcode;
@@ -125,11 +126,10 @@ struct intel_guc_fw {
struct intel_guc {
struct intel_guc_fw guc_fw;
uint32_t log_flags;
- struct drm_i915_gem_object *log_obj;
-
- struct drm_i915_gem_object *ads_obj;
+ struct i915_vma *log_vma;
- struct drm_i915_gem_object *ctx_pool_obj;
+ struct i915_vma *ads_vma;
+ struct i915_vma *ctx_pool_vma;
struct ida ctx_ids;
struct i915_guc_client *execbuf_client;
@@ -159,8 +159,8 @@ extern int intel_guc_resume(struct drm_device *dev);
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
+int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
+void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 944786d7075b..e40db2d2ae99 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -155,6 +155,7 @@
*
* +-------------------------------+
* | guc_css_header |
+ * | |
* | contains major/minor version |
* +-------------------------------+
* | uCode |
@@ -176,10 +177,10 @@
*
* 1. Header, uCode and RSA are must-have components.
* 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
+ * in the layout table above.
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
+ * in fw. So driver will load a truncated firmware in this case.
*/
struct guc_css_header {
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 605c69658d2c..6fd39efb7894 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,13 +59,25 @@
*
*/
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
+#define SKL_FW_MAJOR 6
+#define SKL_FW_MINOR 1
+
+#define BXT_FW_MAJOR 8
+#define BXT_FW_MINOR 7
+
+#define KBL_FW_MAJOR 9
+#define KBL_FW_MINOR 14
+
+#define GUC_FW_PATH(platform, major, minor) \
+ "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
+
+#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
-#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
-#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
/* User-friendly representation of an enum */
@@ -85,7 +97,7 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
}
};
-static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
int irqs;
@@ -102,7 +114,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_WD_VECS_IER, 0);
}
-static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
int irqs;
@@ -122,13 +134,28 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
/*
- * If GuC has routed PM interrupts to itself, don't keep it.
- * and keep other interrupts those are unmasked by GuC.
- */
+ * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+ * (unmasked) PM interrupts to the GuC. All other bits of this
+ * register *disable* generation of a specific interrupt.
+ *
+ * 'pm_intr_keep' indicates bits that are NOT to be set when
+ * writing to the PM interrupt mask register, i.e. interrupts
+ * that must not be disabled.
+ *
+ * If the GuC is handling these interrupts, then we must not let
+ * the PM code disable ANY interrupt that the GuC is expecting.
+ * So for each ENABLED (0) bit in this register, we must SET the
+ * bit in pm_intr_keep so that it's left enabled for the GuC.
+ *
+ * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
+ * (so interrupts go to the DISPLAY unit at first); but here we
+ * need to CLEAR that bit, which will result in the register bit
+ * being left SET!
+ */
tmp = I915_READ(GEN6_PMINTRMSK);
- if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
- dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
- dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+ if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
+ dev_priv->rps.pm_intr_keep |= ~tmp;
+ dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
}
}
@@ -140,17 +167,24 @@ static u32 get_gttype(struct drm_i915_private *dev_priv)
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
- switch (INTEL_INFO(dev_priv)->gen) {
+ u32 gen = INTEL_GEN(dev_priv);
+
+ switch (gen) {
case 9:
return GFXCORE_FAMILY_GEN9;
default:
- DRM_ERROR("GUC: unsupported core family\n");
+ WARN(1, "GEN%d does not support GuC operation!\n", gen);
return GFXCORE_FAMILY_UNKNOWN;
}
}
-static void set_guc_init_params(struct drm_i915_private *dev_priv)
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+static void guc_params_init(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
u32 params[GUC_CTL_MAX_DWORDS];
@@ -181,16 +215,15 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
}
- if (guc->ads_obj) {
- u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
- >> PAGE_SHIFT;
+ if (guc->ads_vma) {
+ u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
}
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
- u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
+ u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
@@ -238,12 +271,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
* Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
+ struct i915_vma *vma)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
unsigned long offset;
- struct sg_table *sg = fw_obj->pages;
+ struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i, ret = 0;
@@ -260,7 +293,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset;
+ offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -315,6 +348,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
struct drm_device *dev = &dev_priv->drm;
+ struct i915_vma *vma;
int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -323,10 +357,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret;
}
- ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("pin failed %d\n", ret);
- return ret;
+ vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
}
/* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
@@ -349,7 +383,9 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
}
/* WaC6DisallowByGfxPause*/
- I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+ if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
if (IS_BROXTON(dev))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -361,13 +397,13 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
- /* allows for 5us before GT can go to RC6 */
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
- set_guc_init_params(dev_priv);
+ guc_params_init(dev_priv);
- ret = guc_ucode_xfer_dma(dev_priv);
+ ret = guc_ucode_xfer_dma(dev_priv, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -375,12 +411,12 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
- i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+ i915_vma_unpin(vma);
return ret;
}
-static int i915_reset_guc(struct drm_i915_private *dev_priv)
+static int guc_hw_reset(struct drm_i915_private *dev_priv)
{
int ret;
u32 guc_status;
@@ -433,7 +469,7 @@ int intel_guc_setup(struct drm_device *dev)
goto fail;
} else if (*fw_path == '\0') {
/* Device has a GuC but we don't know what f/w to load? */
- DRM_INFO("No GuC firmware known for this platform\n");
+ WARN(1, "No GuC firmware known for this platform!\n");
err = -ENODEV;
goto fail;
}
@@ -447,7 +483,7 @@ int intel_guc_setup(struct drm_device *dev)
goto fail;
}
- direct_interrupts_to_host(dev_priv);
+ guc_interrupts_release(dev_priv);
guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
@@ -470,11 +506,9 @@ int intel_guc_setup(struct drm_device *dev)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- err = i915_reset_guc(dev_priv);
- if (err) {
- DRM_ERROR("GuC reset failed: %d\n", err);
+ err = guc_hw_reset(dev_priv);
+ if (err)
goto fail;
- }
err = guc_ucode_xfer(dev_priv);
if (!err)
@@ -497,7 +531,7 @@ int intel_guc_setup(struct drm_device *dev)
err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
- direct_interrupts_to_guc(dev_priv);
+ guc_interrupts_capture(dev_priv);
}
return 0;
@@ -506,7 +540,7 @@ fail:
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
- direct_interrupts_to_host(dev_priv);
+ guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
@@ -532,15 +566,15 @@ fail:
else if (err == 0)
DRM_INFO("GuC firmware load skipped\n");
else if (ret != -EIO)
- DRM_INFO("GuC firmware load failed: %d\n", err);
+ DRM_NOTE("GuC firmware load failed: %d\n", err);
else
- DRM_ERROR("GuC firmware load failed: %d\n", err);
+ DRM_WARN("GuC firmware load failed: %d\n", err);
if (i915.enable_guc_submission) {
if (fw_path == NULL)
DRM_INFO("GuC submission without firmware not supported\n");
if (ret == 0)
- DRM_INFO("Falling back from GuC submission to execlist mode\n");
+ DRM_NOTE("Falling back from GuC submission to execlist mode\n");
else
DRM_ERROR("GuC init failed: %d\n", ret);
}
@@ -551,6 +585,7 @@ fail:
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
{
+ struct pci_dev *pdev = dev->pdev;
struct drm_i915_gem_object *obj;
const struct firmware *fw;
struct guc_css_header *css;
@@ -560,7 +595,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
- err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+ err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
if (err)
goto fail;
if (!fw)
@@ -571,7 +606,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* Check the size of the blob before examining buffer contents */
if (fw->size < sizeof(struct guc_css_header)) {
- DRM_ERROR("Firmware header is missing\n");
+ DRM_NOTE("Firmware header is missing\n");
goto fail;
}
@@ -583,7 +618,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
if (guc_fw->header_size != sizeof(struct guc_css_header)) {
- DRM_ERROR("CSS header definition mismatch\n");
+ DRM_NOTE("CSS header definition mismatch\n");
goto fail;
}
@@ -593,7 +628,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_ERROR("RSA key size is bad\n");
+ DRM_NOTE("RSA key size is bad\n");
goto fail;
}
guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
@@ -602,14 +637,14 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* At least, it should have header, uCode and RSA. Size of all three. */
size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
if (fw->size < size) {
- DRM_ERROR("Missing firmware components\n");
+ DRM_NOTE("Missing firmware components\n");
goto fail;
}
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
if (size > guc_wopcm_size(to_i915(dev))) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ DRM_NOTE("Firmware is too large to fit in WOPCM\n");
goto fail;
}
@@ -624,7 +659,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
- DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+ DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
err = -ENOEXEC;
@@ -654,15 +689,15 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
return;
fail:
+ DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
+ guc_fw->guc_fw_path, err);
DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
err, fw, guc_fw->guc_fw_obj);
- DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
- guc_fw->guc_fw_path, err);
mutex_lock(&dev->struct_mutex);
obj = guc_fw->guc_fw_obj;
if (obj)
- drm_gem_object_unreference(&obj->base);
+ i915_gem_object_put(obj);
guc_fw->guc_fw_obj = NULL;
mutex_unlock(&dev->struct_mutex);
@@ -695,16 +730,16 @@ void intel_guc_init(struct drm_device *dev)
fw_path = NULL;
} else if (IS_SKYLAKE(dev)) {
fw_path = I915_SKL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = 6;
- guc_fw->guc_fw_minor_wanted = 1;
+ guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
+ guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev)) {
fw_path = I915_BXT_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = 8;
- guc_fw->guc_fw_minor_wanted = 7;
+ guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
+ guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev)) {
fw_path = I915_KBL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = 9;
- guc_fw->guc_fw_minor_wanted = 14;
+ guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
+ guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
} else {
fw_path = ""; /* unknown device */
}
@@ -738,12 +773,12 @@ void intel_guc_fini(struct drm_device *dev)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
mutex_lock(&dev->struct_mutex);
- direct_interrupts_to_host(dev_priv);
+ guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
if (guc_fw->guc_fw_obj)
- drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+ i915_gem_object_put(guc_fw->guc_fw_obj);
guc_fw->guc_fw_obj = NULL;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4df9f384910c..13c306173f27 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -985,7 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
intel_audio_codec_enable(encoder);
}
-static void g4x_enable_hdmi(struct intel_encoder *encoder)
+static void g4x_enable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1006,7 +1008,9 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
intel_enable_hdmi_audio(encoder);
}
-static void ibx_enable_hdmi(struct intel_encoder *encoder)
+static void ibx_enable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1055,7 +1059,9 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
intel_enable_hdmi_audio(encoder);
}
-static void cpt_enable_hdmi(struct intel_encoder *encoder)
+static void cpt_enable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1108,11 +1114,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder)
intel_enable_hdmi_audio(encoder);
}
-static void vlv_enable_hdmi(struct intel_encoder *encoder)
+static void vlv_enable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
}
-static void intel_disable_hdmi(struct intel_encoder *encoder)
+static void intel_disable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1164,17 +1174,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
-static void g4x_disable_hdmi(struct intel_encoder *encoder)
+static void g4x_disable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
- intel_disable_hdmi(encoder);
+ intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_hdmi(struct intel_encoder *encoder)
+static void pch_disable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1182,9 +1196,11 @@ static void pch_disable_hdmi(struct intel_encoder *encoder)
intel_audio_codec_disable(encoder);
}
-static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+static void pch_post_disable_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_disable_hdmi(encoder);
+ intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
@@ -1204,10 +1220,17 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
if (respect_downstream_limits) {
+ struct intel_connector *connector = hdmi->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+
if (hdmi->dp_dual_mode.max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
hdmi->dp_dual_mode.max_tmds_clock);
- if (!hdmi->has_hdmi_sink)
+
+ if (info->max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock,
+ info->max_tmds_clock);
+ else if (!hdmi->has_hdmi_sink)
max_tmds_clock = min(max_tmds_clock, 165000);
}
@@ -1285,7 +1308,8 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -1422,24 +1446,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct edid *edid = NULL;
+ struct edid *edid;
bool connected = false;
- if (force) {
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
- intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
- }
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1487,16 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool live_status = false;
- unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- for (try = 0; !live_status && try < 9; try++) {
- if (try)
- msleep(10);
- live_status = intel_digital_port_connected(dev_priv,
- hdmi_to_dig_port(intel_hdmi));
- }
-
- if (!live_status) {
- DRM_DEBUG_KMS("HDMI live status down\n");
- /*
- * Live status register is not reliable on all intel platforms.
- * So consider live_status only for certain platforms, for
- * others, read EDID to determine presence of sink.
- */
- if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
- live_status = true;
- }
-
intel_hdmi_unset_edid(connector);
- if (intel_hdmi_set_edid(connector, live_status)) {
+ if (intel_hdmi_set_edid(connector)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1522,7 @@ intel_hdmi_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector, true);
+ intel_hdmi_set_edid(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
@@ -1638,7 +1639,9 @@ done:
return 0;
}
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1651,7 +1654,9 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
adjusted_mode);
}
-static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1671,37 +1676,47 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- g4x_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
}
-static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder);
vlv_phy_pre_pll_enable(encoder);
}
-static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder);
chv_phy_pre_pll_enable(encoder);
}
-static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder);
}
-static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
vlv_phy_reset_lanes(encoder);
}
-static void chv_hdmi_post_disable(struct intel_encoder *encoder)
+static void chv_hdmi_post_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1714,7 +1729,9 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
-static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
+static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
@@ -1734,7 +1751,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_crtc->config->has_hdmi_sink,
adjusted_mode);
- g4x_enable_hdmi(encoder);
+ g4x_enable_hdmi(encoder, pipe_config, conn_state);
vlv_wait_port_ready(dev_priv, dport, 0x0);
@@ -1782,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ u8 ddc_pin;
+
+ if (info->alternate_ddc_pin) {
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+ info->alternate_ddc_pin, port_name(port));
+ return info->alternate_ddc_pin;
+ }
+
+ switch (port) {
+ case PORT_B:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_1_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ if (IS_BROXTON(dev_priv))
+ ddc_pin = GMBUS_PIN_2_BXT;
+ else
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ else
+ ddc_pin = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
+ ddc_pin, port_name(port));
+
+ return ddc_pin;
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1791,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@@ -1809,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
@@ -1825,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- if (WARN_ON(IS_BROXTON(dev_priv)))
- intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
- else if (IS_CHERRYVIEW(dev_priv))
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
- else
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_E:
- /* On SKL PORT E doesn't have seperate GMBUS pin
- * We rely on VBT to set a proper alternate GMBUS pin. */
- alternate_ddc_pin =
- dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
- switch (alternate_ddc_pin) {
- case DDC_PIN_B:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
- break;
- case DDC_PIN_C:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
- break;
- case DDC_PIN_D:
- intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
- break;
- default:
- MISSING_CASE(alternate_ddc_pin);
- }
intel_encoder->hpd_pin = HPD_PORT_E;
break;
- case PORT_A:
- intel_encoder->hpd_pin = HPD_PORT_A;
- /* Internal port only for eDP. */
default:
- BUG();
+ MISSING_CASE(port);
+ return;
}
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index f48957ea100d..334d47b5811a 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -477,7 +477,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-void i915_hpd_poll_init_work(struct work_struct *work) {
+static void i915_hpd_poll_init_work(struct work_struct *work)
+{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
@@ -525,7 +526,6 @@ void i915_hpd_poll_init_work(struct work_struct *work) {
/**
* intel_hpd_poll_init - enables/disables polling for connectors with hpd
* @dev_priv: i915 device instance
- * @enabled: Whether to enable or disable polling
*
* This function enables polling for all connectors, regardless of whether or
* not they support hotplug detection. Under certain conditions HPD may not be
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1f266d7df2ec..79aab9ad6faa 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -255,67 +255,59 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
algo->data = bus;
}
-static int
-gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
- u32 gmbus2_status,
- u32 gmbus4_irq_en)
+static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
{
- int i;
- u32 gmbus2 = 0;
DEFINE_WAIT(wait);
-
- if (!HAS_GMBUS_IRQ(dev_priv))
- gmbus4_irq_en = 0;
+ u32 gmbus2;
+ int ret;
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
- * need to wake up periodically and check that ourselves. */
- I915_WRITE(GMBUS4, gmbus4_irq_en);
-
- for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
- prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
+ * need to wake up periodically and check that ourselves.
+ */
+ if (!HAS_GMBUS_IRQ(dev_priv))
+ irq_en = 0;
- gmbus2 = I915_READ_NOTRACE(GMBUS2);
- if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
- break;
+ add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ I915_WRITE_FW(GMBUS4, irq_en);
- schedule_timeout(1);
- }
- finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+ status |= GMBUS_SATOER;
+ ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+ if (ret)
+ ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
- I915_WRITE(GMBUS4, 0);
+ I915_WRITE_FW(GMBUS4, 0);
+ remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
- if (gmbus2 & gmbus2_status)
- return 0;
- return -ETIMEDOUT;
+
+ return ret;
}
static int
gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
+ DEFINE_WAIT(wait);
+ u32 irq_enable;
int ret;
- if (!HAS_GMBUS_IRQ(dev_priv))
- return intel_wait_for_register(dev_priv,
- GMBUS2, GMBUS_ACTIVE, 0,
- 10);
-
/* Important: The hw handles only the first bit, so set only one! */
- I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
+ irq_enable = 0;
+ if (HAS_GMBUS_IRQ(dev_priv))
+ irq_enable = GMBUS_IDLE_EN;
- ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
- (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
- msecs_to_jiffies_timeout(10));
+ add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ I915_WRITE_FW(GMBUS4, irq_enable);
- I915_WRITE(GMBUS4, 0);
+ ret = intel_wait_for_register_fw(dev_priv,
+ GMBUS2, GMBUS_ACTIVE, 0,
+ 10);
- if (ret)
- return 0;
- else
- return -ETIMEDOUT;
+ I915_WRITE_FW(GMBUS4, 0);
+ remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+
+ return ret;
}
static int
@@ -323,22 +315,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
- I915_WRITE(GMBUS1,
- gmbus1_index |
- GMBUS_CYCLE_WAIT |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ I915_WRITE_FW(GMBUS1,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
- ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
- GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
- val = I915_READ(GMBUS3);
+ val = I915_READ_FW(GMBUS3);
do {
*buf++ = val & 0xff;
val >>= 8;
@@ -385,12 +376,12 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- I915_WRITE(GMBUS3, val);
- I915_WRITE(GMBUS1,
- GMBUS_CYCLE_WAIT |
- (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ I915_WRITE_FW(GMBUS3, val);
+ I915_WRITE_FW(GMBUS1,
+ GMBUS_CYCLE_WAIT |
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -399,10 +390,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- I915_WRITE(GMBUS3, val);
+ I915_WRITE_FW(GMBUS3, val);
- ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
- GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
@@ -460,13 +450,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- I915_WRITE(GMBUS5, gmbus5);
+ I915_WRITE_FW(GMBUS5, gmbus5);
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- I915_WRITE(GMBUS5, 0);
+ I915_WRITE_FW(GMBUS5, 0);
return ret;
}
@@ -478,11 +468,15 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
+ const unsigned int fw =
+ intel_uncore_forcewake_for_reg(dev_priv, GMBUS0,
+ FW_REG_READ | FW_REG_WRITE);
int i = 0, inc, try = 0;
int ret = 0;
+ intel_uncore_forcewake_get(dev_priv, fw);
retry:
- I915_WRITE(GMBUS0, bus->reg0);
+ I915_WRITE_FW(GMBUS0, bus->reg0);
for (; i < num; i += inc) {
inc = 1;
@@ -496,8 +490,8 @@ retry:
}
if (!ret)
- ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
- GMBUS_HW_WAIT_EN);
+ ret = gmbus_wait(dev_priv,
+ GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
else if (ret)
@@ -508,7 +502,7 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
@@ -519,7 +513,7 @@ retry:
adapter->name);
ret = -ETIMEDOUT;
}
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE_FW(GMBUS0, 0);
ret = ret ?: i;
goto out;
@@ -548,9 +542,9 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT);
- I915_WRITE(GMBUS1, 0);
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
+ I915_WRITE_FW(GMBUS1, 0);
+ I915_WRITE_FW(GMBUS0, 0);
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
@@ -573,7 +567,7 @@ clear_err:
timeout:
DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE_FW(GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -582,6 +576,7 @@ timeout:
ret = -EAGAIN;
out:
+ intel_uncore_forcewake_put(dev_priv, fw);
return ret;
}
@@ -633,6 +628,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
int intel_setup_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gmbus *bus;
unsigned int pin;
int ret;
@@ -663,7 +659,7 @@ int intel_setup_gmbus(struct drm_device *dev)
"i915 gmbus %s",
get_gmbus_pin(dev_priv, pin)->name);
- bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.dev.parent = &pdev->dev;
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 414ddda43922..0adb879833ff 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -156,6 +156,11 @@
#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
+#define GEN8_CTX_STATUS_COMPLETED_MASK \
+ (GEN8_CTX_STATUS_ACTIVE_IDLE | \
+ GEN8_CTX_STATUS_PREEMPTED | \
+ GEN8_CTX_STATUS_ELEMENT_SWITCH)
+
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
#define CTX_RING_HEAD 0x04
@@ -221,10 +226,16 @@ enum {
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+#define WA_TAIL_DWORDS 2
+
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
+static void execlists_init_reg_state(u32 *reg_state,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_ring *ring);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,12 +274,10 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
- engine->idle_lite_restore_wa = ~0;
-
- engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
- (engine->id == VCS || engine->id == VCS2);
+ engine->disable_lite_restore_wa =
+ (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
+ (engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
if (IS_GEN8(dev_priv))
@@ -288,7 +297,6 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
- *
* @ctx: Context to work on
* @engine: Engine the descriptor will be used with
*
@@ -297,12 +305,13 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* expensive to calculate, we'll just do it once and cache the result,
* which remains valid until the context is unpinned.
*
- * This is what a descriptor looks like, from LSB to MSB:
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
- * bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-52: ctx ID, a globally unique tag
- * bits 53-54: mbz, reserved for use by hardware
- * bits 55-63: group ID, currently unused and set to 0
+ * This is what a descriptor looks like, from LSB to MSB::
+ *
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 12-31: LRCA, GTT address of (the HWSP of) this context
+ * bits 32-52: ctx ID, a globally unique tag
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
*/
static void
intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
@@ -315,7 +324,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc = ctx->desc_template; /* bits 3-4 */
desc |= engine->ctx_desc_template; /* bits 0-11 */
- desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+ desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
/* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
@@ -328,34 +337,18 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
-static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
- struct drm_i915_gem_request *rq1)
+static inline void
+execlists_context_status_change(struct drm_i915_gem_request *rq,
+ unsigned long status)
{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
- struct intel_engine_cs *engine = rq0->engine;
- struct drm_i915_private *dev_priv = rq0->i915;
- uint64_t desc[2];
-
- if (rq1) {
- desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
- rq1->elsp_submitted++;
- } else {
- desc[1] = 0;
- }
-
- desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
- rq0->elsp_submitted++;
-
- /* You must always write both descriptors in the order below. */
- I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
-
- I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
- /* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
-
- /* ELSP is a wo register, use another nearby reg for posting */
- POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
+ atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
}
static void
@@ -367,13 +360,13 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
}
-static void execlists_update_context(struct drm_i915_gem_request *rq)
+static u64 execlists_update_context(struct drm_i915_gem_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
+ struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+ u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -382,321 +375,236 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
*/
if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
execlists_update_context_pdps(ppgtt, reg_state);
+
+ return ce->lrc_desc;
}
-static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
- struct drm_i915_gem_request *rq1)
+static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = rq0->i915;
- unsigned int fw_domains = rq0->engine->fw_domains;
-
- execlists_update_context(rq0);
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct execlist_port *port = engine->execlist_port;
+ u32 __iomem *elsp =
+ dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ u64 desc[2];
- if (rq1)
- execlists_update_context(rq1);
+ if (!port[0].count)
+ execlists_context_status_change(port[0].request,
+ INTEL_CONTEXT_SCHEDULE_IN);
+ desc[0] = execlists_update_context(port[0].request);
+ engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ if (port[1].request) {
+ GEM_BUG_ON(port[1].count);
+ execlists_context_status_change(port[1].request,
+ INTEL_CONTEXT_SCHEDULE_IN);
+ desc[1] = execlists_update_context(port[1].request);
+ port[1].count = 1;
+ } else {
+ desc[1] = 0;
+ }
+ GEM_BUG_ON(desc[0] == desc[1]);
- execlists_elsp_write(rq0, rq1);
+ /* You must always write both descriptors in the order below. */
+ writel(upper_32_bits(desc[1]), elsp);
+ writel(lower_32_bits(desc[1]), elsp);
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ writel(upper_32_bits(desc[0]), elsp);
+ /* The context is automatically loaded after the following */
+ writel(lower_32_bits(desc[0]), elsp);
}
-static inline void execlists_context_status_change(
- struct drm_i915_gem_request *rq,
- unsigned long status)
+static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
{
- /*
- * Only used when GVT-g is enabled now. When GVT-g is disabled,
- * The compiler should eliminate this function as dead-code.
- */
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return;
-
- atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+ return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
+ ctx->execlists_force_single_submission);
}
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
+static bool can_merge_ctx(const struct i915_gem_context *prev,
+ const struct i915_gem_context *next)
{
- struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
- struct drm_i915_gem_request *cursor, *tmp;
+ if (prev != next)
+ return false;
- assert_spin_locked(&engine->execlist_lock);
+ if (ctx_single_port_submission(prev))
+ return false;
- /*
- * If irqs are not active generate a warning as batches that finish
- * without the irqs may get lost and a GPU Hang may occur.
- */
- WARN_ON(!intel_irqs_enabled(engine->i915));
-
- /* Try to read in pairs */
- list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
- execlist_link) {
- if (!req0) {
- req0 = cursor;
- } else if (req0->ctx == cursor->ctx) {
- /* Same ctx: ignore first request, as second request
- * will update tail past first request's workload */
- cursor->elsp_submitted = req0->elsp_submitted;
- list_del(&req0->execlist_link);
- i915_gem_request_unreference(req0);
- req0 = cursor;
- } else {
- if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
- /*
- * req0 (after merged) ctx requires single
- * submission, stop picking
- */
- if (req0->ctx->execlists_force_single_submission)
- break;
- /*
- * req0 ctx doesn't require single submission,
- * but next req ctx requires, stop picking
- */
- if (cursor->ctx->execlists_force_single_submission)
- break;
- }
- req1 = cursor;
- WARN_ON(req1->elsp_submitted);
- break;
- }
- }
+ return true;
+}
- if (unlikely(!req0))
- return;
+static void execlists_dequeue(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *cursor, *last;
+ struct execlist_port *port = engine->execlist_port;
+ bool submit = false;
+
+ last = port->request;
+ if (last)
+ /* WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
+ * as we resubmit the request. See gen8_emit_request()
+ * for where we prepare the padding after the end of the
+ * request.
+ */
+ last->tail = last->wa_tail;
- execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+ GEM_BUG_ON(port[1].request);
- if (req1)
- execlists_context_status_change(req1,
- INTEL_CONTEXT_SCHEDULE_IN);
+ /* Hardware submission is through 2 ports. Conceptually each port
+ * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
+ * static for a context, and unique to each, so we only execute
+ * requests belonging to a single context from each ring. RING_HEAD
+ * is maintained by the CS in the context image, it marks the place
+ * where it got up to last time, and through RING_TAIL we tell the CS
+ * where we want to execute up to this time.
+ *
+ * In this list the requests are in order of execution. Consecutive
+ * requests from the same context are adjacent in the ringbuffer. We
+ * can combine these requests into a single RING_TAIL update:
+ *
+ * RING_HEAD...req1...req2
+ * ^- RING_TAIL
+ * since to execute req2 the CS must first execute req1.
+ *
+ * Our goal then is to point each port to the end of a consecutive
+ * sequence of requests as being the most optimal (fewest wake ups
+ * and context switches) submission.
+ */
- if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
- /*
- * WaIdleLiteRestore: make sure we never cause a lite restore
- * with HEAD==TAIL.
+ spin_lock(&engine->execlist_lock);
+ list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) {
+ /* Can we combine this request with the current port? It has to
+ * be the same context/ringbuffer and not have any exceptions
+ * (e.g. GVT saying never to combine contexts).
*
- * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
- * resubmit the request. See gen8_emit_request() for where we
- * prepare the padding after the end of the request.
+ * If we can combine the requests, we can execute both by
+ * updating the RING_TAIL to point to the end of the second
+ * request, and so we never need to tell the hardware about
+ * the first.
*/
- struct intel_ringbuffer *ringbuf;
+ if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
+ /* If we are on the second port and cannot combine
+ * this request with the last, then we are done.
+ */
+ if (port != engine->execlist_port)
+ break;
+
+ /* If GVT overrides us we only ever submit port[0],
+ * leaving port[1] empty. Note that we also have
+ * to be careful that we don't queue the same
+ * context (even though a different request) to
+ * the second port.
+ */
+ if (ctx_single_port_submission(cursor->ctx))
+ break;
+
+ GEM_BUG_ON(last->ctx == cursor->ctx);
+
+ i915_gem_request_assign(&port->request, last);
+ port++;
+ }
+ last = cursor;
+ submit = true;
+ }
+ if (submit) {
+ /* Decouple all the requests submitted from the queue */
+ engine->execlist_queue.next = &cursor->execlist_link;
+ cursor->execlist_link.prev = &engine->execlist_queue;
- ringbuf = req0->ctx->engine[engine->id].ringbuf;
- req0->tail += 8;
- req0->tail &= ringbuf->size - 1;
+ i915_gem_request_assign(&port->request, last);
}
+ spin_unlock(&engine->execlist_lock);
- execlists_submit_requests(req0, req1);
+ if (submit)
+ execlists_submit_ports(engine);
}
-static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
+static bool execlists_elsp_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *head_req;
-
- assert_spin_locked(&engine->execlist_lock);
-
- head_req = list_first_entry_or_null(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
-
- if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
- return 0;
-
- WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
-
- if (--head_req->elsp_submitted > 0)
- return 0;
-
- execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
-
- list_del(&head_req->execlist_link);
- i915_gem_request_unreference(head_req);
-
- return 1;
+ return !engine->execlist_port[0].request;
}
-static u32
-get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
- u32 *context_id)
+static bool execlists_elsp_ready(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 status;
+ int port;
- read_pointer %= GEN8_CSB_ENTRIES;
+ port = 1; /* wait for a free slot */
+ if (engine->disable_lite_restore_wa || engine->preempt_wa)
+ port = 0; /* wait for GPU to be idle before continuing */
- status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
-
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- return 0;
-
- *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
- read_pointer));
-
- return status;
+ return !engine->execlist_port[port].request;
}
-/**
- * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @data: tasklet handler passed in unsigned long
- *
+/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct execlist_port *port = engine->execlist_port;
struct drm_i915_private *dev_priv = engine->i915;
- u32 status_pointer;
- unsigned int read_pointer, write_pointer;
- u32 csb[GEN8_CSB_ENTRIES][2];
- unsigned int csb_read = 0, i;
- unsigned int submit_contexts = 0;
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
-
- read_pointer = engine->next_context_status_buffer;
- write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
- if (read_pointer > write_pointer)
- write_pointer += GEN8_CSB_ENTRIES;
-
- while (read_pointer < write_pointer) {
- if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
- break;
- csb[csb_read][0] = get_context_status(engine, ++read_pointer,
- &csb[csb_read][1]);
- csb_read++;
- }
-
- engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
- /* Update the read pointer to the old write pointer. Manual ringbuffer
- * management ftw </sarcasm> */
- I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
- _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- engine->next_context_status_buffer << 8));
-
- intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
-
- spin_lock(&engine->execlist_lock);
+ if (!execlists_elsp_idle(engine)) {
+ u32 __iomem *csb_mmio =
+ dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+ u32 __iomem *buf =
+ dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+ unsigned int csb, head, tail;
+
+ csb = readl(csb_mmio);
+ head = GEN8_CSB_READ_PTR(csb);
+ tail = GEN8_CSB_WRITE_PTR(csb);
+ if (tail < head)
+ tail += GEN8_CSB_ENTRIES;
+ while (head < tail) {
+ unsigned int idx = ++head % GEN8_CSB_ENTRIES;
+ unsigned int status = readl(buf + 2 * idx);
+
+ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+ continue;
+
+ GEM_BUG_ON(port[0].count == 0);
+ if (--port[0].count == 0) {
+ GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ execlists_context_status_change(port[0].request,
+ INTEL_CONTEXT_SCHEDULE_OUT);
+
+ i915_gem_request_put(port[0].request);
+ port[0] = port[1];
+ memset(&port[1], 0, sizeof(port[1]));
+
+ engine->preempt_wa = false;
+ }
- for (i = 0; i < csb_read; i++) {
- if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
- if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
- if (execlists_check_remove_request(engine, csb[i][1]))
- WARN(1, "Lite Restored request removed from queue\n");
- } else
- WARN(1, "Preemption without Lite Restore\n");
+ GEM_BUG_ON(port[0].count == 0 &&
+ !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
}
- if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
- GEN8_CTX_STATUS_ELEMENT_SWITCH))
- submit_contexts +=
- execlists_check_remove_request(engine, csb[i][1]);
- }
-
- if (submit_contexts) {
- if (!engine->disable_lite_restore_wa ||
- (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
- execlists_context_unqueue(engine);
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ GEN8_CSB_WRITE_PTR(csb) << 8),
+ csb_mmio);
}
- spin_unlock(&engine->execlist_lock);
+ if (execlists_elsp_ready(engine))
+ execlists_dequeue(engine);
- if (unlikely(submit_contexts > 2))
- DRM_ERROR("More than two context complete events?\n");
+ intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
-static void execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
- struct drm_i915_gem_request *cursor;
- int num_elements = 0;
-
- spin_lock_bh(&engine->execlist_lock);
+ unsigned long flags;
- list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
- if (++num_elements > 2)
- break;
+ spin_lock_irqsave(&engine->execlist_lock, flags);
- if (num_elements > 2) {
- struct drm_i915_gem_request *tail_req;
-
- tail_req = list_last_entry(&engine->execlist_queue,
- struct drm_i915_gem_request,
- execlist_link);
-
- if (request->ctx == tail_req->ctx) {
- WARN(tail_req->elsp_submitted != 0,
- "More than 2 already-submitted reqs queued\n");
- list_del(&tail_req->execlist_link);
- i915_gem_request_unreference(tail_req);
- }
- }
-
- i915_gem_request_reference(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
- request->ctx_hw_id = request->ctx->hw_id;
- if (num_elements == 0)
- execlists_context_unqueue(engine);
+ if (execlists_elsp_idle(engine))
+ tasklet_hi_schedule(&engine->irq_tasklet);
- spin_unlock_bh(&engine->execlist_lock);
-}
-
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- uint32_t flush_domains;
- int ret;
-
- flush_domains = 0;
- if (engine->gpu_caches_dirty)
- flush_domains = I915_GEM_GPU_DOMAINS;
-
- ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
-static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
- struct list_head *vmas)
-{
- const unsigned other_rings = ~intel_engine_flag(req->engine);
- struct i915_vma *vma;
- uint32_t flush_domains = 0;
- bool flush_chipset = false;
- int ret;
-
- list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->engine, &req);
- if (ret)
- return ret;
- }
-
- if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- flush_chipset |= i915_gem_clflush_object(obj, false);
-
- flush_domains |= obj->base.write_domain;
- }
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
-
- /* Unconditionally invalidate gpu caches and ensure that we do flush
- * any residual writes from the previous batch.
- */
- return logical_ring_invalidate_all_caches(req);
+ spin_unlock_irqrestore(&engine->execlist_lock, flags);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -717,7 +625,11 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret;
}
- request->ringbuf = ce->ringbuf;
+ request->ring = ce->ring;
+
+ ret = intel_lr_context_pin(request->ctx, engine);
+ if (ret)
+ return ret;
if (i915.enable_guc_submission) {
/*
@@ -725,23 +637,19 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
- ret = i915_guc_wq_check_space(request);
+ ret = i915_guc_wq_reserve(request);
if (ret)
- return ret;
+ goto err_unpin;
}
- ret = intel_lr_context_pin(request->ctx, engine);
- if (ret)
- return ret;
-
ret = intel_ring_begin(request, 0);
if (ret)
- goto err_unpin;
+ goto err_unreserve;
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
- goto err_unpin;
+ goto err_unreserve;
ce->initialised = true;
}
@@ -756,13 +664,16 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
+err_unreserve:
+ if (i915.enable_guc_submission)
+ i915_guc_wq_unreserve(request);
err_unpin:
intel_lr_context_unpin(request->ctx, engine);
return ret;
}
/*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
@@ -771,13 +682,13 @@ err_unpin:
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
+ struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
- intel_logical_ring_advance(ringbuf);
- request->tail = ringbuf->tail;
+ intel_ring_advance(ring);
+ request->tail = ring->tail;
/*
* Here we add two extra NOOPs as padding to avoid
@@ -785,9 +696,10 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
*
* Caller must reserve WA_TAIL_DWORDS for us!
*/
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ request->wa_tail = ring->tail;
/* We keep the previous context alive until we retire the following
* request. This ensures that any the context object is still pinned
@@ -797,168 +709,14 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
*/
request->previous_context = engine->last_context;
engine->last_context = request->ctx;
-
- if (i915.enable_guc_submission)
- i915_guc_submit(request);
- else
- execlists_context_queue(request);
-
- return 0;
-}
-
-/**
- * execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @params: execbuffer call parameters.
- * @args: execbuffer call arguments.
- * @vmas: list of vmas.
- *
- * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
- * away the submission details of the execbuffer ioctl call.
- *
- * Return: non-zero if the submission fails.
- */
-int intel_execlists_submission(struct i915_execbuffer_params *params,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas)
-{
- struct drm_device *dev = params->dev;
- struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
- u64 exec_start;
- int instp_mode;
- u32 instp_mask;
- int ret;
-
- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- instp_mask = I915_EXEC_CONSTANTS_MASK;
- switch (instp_mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (instp_mode != dev_priv->relative_constants_mode) {
- if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
- return -EINVAL;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- DRM_DEBUG("sol reset is gen7 only\n");
- return -EINVAL;
- }
-
- ret = execlists_move_to_gpu(params->request, vmas);
- if (ret)
- return ret;
-
- if (engine == &dev_priv->engine[RCS] &&
- instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(params->request, 4);
- if (ret)
- return ret;
-
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
- intel_logical_ring_emit_reg(ringbuf, INSTPM);
- intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
- intel_logical_ring_advance(ringbuf);
-
- dev_priv->relative_constants_mode = instp_mode;
- }
-
- exec_start = params->batch_obj_vm_offset +
- args->batch_start_offset;
-
- ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
- if (ret)
- return ret;
-
- trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
- i915_gem_execbuffer_move_to_active(vmas, params->request);
-
- return 0;
-}
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_request *req, *tmp;
- LIST_HEAD(cancel_list);
-
- WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
-
- spin_lock_bh(&engine->execlist_lock);
- list_replace_init(&engine->execlist_queue, &cancel_list);
- spin_unlock_bh(&engine->execlist_lock);
-
- list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
- list_del(&req->execlist_link);
- i915_gem_request_unreference(req);
- }
-}
-
-void intel_logical_ring_stop(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- if (!intel_engine_initialized(engine))
- return;
-
- ret = intel_engine_idle(engine);
- if (ret)
- DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- engine->name, ret);
-
- /* TODO: Is this correct with Execlists enabled? */
- I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(dev_priv,
- RING_MI_MODE(engine->mmio_base),
- MODE_IDLE, MODE_IDLE,
- 1000)) {
- DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
- return;
- }
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
-}
-
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- if (!engine->gpu_caches_dirty)
- return 0;
-
- ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
return 0;
}
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ctx->i915;
struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
- u32 *lrc_reg_state;
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
@@ -966,41 +724,42 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ce->pin_count++)
return 0;
- ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
if (ret)
goto err;
- vaddr = i915_gem_object_pin_map(ce->state);
+ vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto unpin_ctx_obj;
+ goto unpin_vma;
}
- lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
- ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
+ ret = intel_ring_pin(ce->ring);
if (ret)
goto unpin_map;
- i915_gem_context_reference(ctx);
- ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
- ce->lrc_reg_state = lrc_reg_state;
- ce->state->dirty = true;
+ ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
+ i915_ggtt_offset(ce->ring->vma);
+
+ ce->state->obj->dirty = true;
/* Invalidate GuC TLB. */
- if (i915.enable_guc_submission)
+ if (i915.enable_guc_submission) {
+ struct drm_i915_private *dev_priv = ctx->i915;
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ }
+ i915_gem_context_get(ctx);
return 0;
unpin_map:
- i915_gem_object_unpin_map(ce->state);
-unpin_ctx_obj:
- i915_gem_object_ggtt_unpin(ce->state);
+ i915_gem_object_unpin_map(ce->state->obj);
+unpin_vma:
+ __i915_vma_unpin(ce->state);
err:
ce->pin_count = 0;
return ret;
@@ -1017,30 +776,24 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
if (--ce->pin_count)
return;
- intel_unpin_ringbuffer_obj(ce->ringbuf);
+ intel_ring_unpin(ce->ring);
- i915_gem_object_unpin_map(ce->state);
- i915_gem_object_ggtt_unpin(ce->state);
+ i915_gem_object_unpin_map(ce->state->obj);
+ i915_vma_unpin(ce->state);
- ce->lrc_vma = NULL;
- ce->lrc_desc = 0;
- ce->lrc_reg_state = NULL;
-
- i915_gem_context_unreference(ctx);
+ i915_gem_context_put(ctx);
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
@@ -1048,17 +801,16 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
- intel_logical_ring_emit(ringbuf, w->reg[i].value);
+ intel_ring_emit_reg(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
}
- intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_advance(ring);
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
@@ -1094,7 +846,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
* code duplication.
*/
static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
- uint32_t *const batch,
+ uint32_t *batch,
uint32_t index)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1113,7 +865,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1131,7 +883,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
wa_ctx_emit(batch, index, 0);
return index;
@@ -1156,37 +908,24 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
return 0;
}
-/**
- * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
- *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- * offset: specifies start of the batch, should be cache-aligned. This is updated
- * with the offset value received as input.
- * size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of the batch, it should be
- * cache-aligned otherwise it is adjusted accordingly.
- * Typically we only have one indirect_ctx and per_ctx batch buffer which are
- * initialized at the beginning and shared across all contexts but this field
- * helps us to have multiple batches at different offsets and select them based
- * on a criteria. At the moment this batch always start at the beginning of the page
- * and at this point we don't have multiple wa_ctx batch buffers.
- *
- * The number of WA applied are not known at the beginning; we use this field
- * to return the no of DWORDS written.
+/*
+ * Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ * initialized at the beginning and shared across all contexts but this field
+ * helps us to have multiple batches at different offsets and select them based
+ * on a criteria. At the moment this batch always start at the beginning of the page
+ * and at this point we don't have multiple wa_ctx batch buffers.
*
- * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
- * so it adds NOOPs as padding to make it cacheline aligned.
- * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
- * makes a complete batch buffer.
+ * The number of WA applied are not known at the beginning; we use this field
+ * to return the no of DWORDS written.
*
- * Return: non-zero if we exceed the PAGE_SIZE limit.
+ * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ * so it adds NOOPs as padding to make it cacheline aligned.
+ * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ * makes a complete batch buffer.
*/
-
static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *const batch,
+ uint32_t *batch,
uint32_t *offset)
{
uint32_t scratch_addr;
@@ -1205,7 +944,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
- scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1230,26 +969,18 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}
-/**
- * gen8_init_perctx_bb() - initialize per ctx batch with WA
- *
- * @engine: only applicable for RCS
- * @wa_ctx: structure representing wa_ctx
- * offset: specifies start of the batch, should be cache-aligned.
- * size: size of the batch in DWORDS but HW expects in terms of cachelines
- * @batch: page in which WA are loaded
- * @offset: This field specifies the start of this batch.
- * This batch is started immediately after indirect_ctx batch. Since we ensure
- * that indirect_ctx ends on a cacheline this batch is aligned automatically.
+/*
+ * This batch is started immediately after indirect_ctx batch. Since we ensure
+ * that indirect_ctx ends on a cacheline this batch is aligned automatically.
*
- * The number of DWORDS written are returned using this field.
+ * The number of DWORDS written are returned using this field.
*
* This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
* to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
*/
static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *const batch,
+ uint32_t *batch,
uint32_t *offset)
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1264,7 +995,7 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *const batch,
+ uint32_t *batch,
uint32_t *offset)
{
int ret;
@@ -1282,11 +1013,18 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
return ret;
index = ret;
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+ wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
+ GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
+ wa_ctx_emit(batch, index, MI_NOOP);
+
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
- uint32_t scratch_addr
- = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ u32 scratch_addr =
+ i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1332,7 +1070,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *const batch,
+ uint32_t *batch,
uint32_t *offset)
{
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
@@ -1378,44 +1116,44 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
- int ret;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
- engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
- PAGE_ALIGN(size));
- if (IS_ERR(engine->wa_ctx.obj)) {
- DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
- ret = PTR_ERR(engine->wa_ctx.obj);
- engine->wa_ctx.obj = NULL;
- return ret;
- }
+ obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
- ret);
- drm_gem_object_unreference(&engine->wa_ctx.obj->base);
- return ret;
+ vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
}
+ err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err;
+
+ engine->wa_ctx.vma = vma;
return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return err;
}
static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
{
- if (engine->wa_ctx.obj) {
- i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
- drm_gem_object_unreference(&engine->wa_ctx.obj->base);
- engine->wa_ctx.obj = NULL;
- }
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma);
}
static int intel_init_workaround_bb(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
uint32_t *batch;
uint32_t offset;
struct page *page;
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+ int ret;
WARN_ON(engine->id != RCS);
@@ -1427,7 +1165,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
}
/* some WA perform writes to scratch page, ensure it is valid */
- if (engine->scratch.obj == NULL) {
+ if (!engine->scratch) {
DRM_ERROR("scratch page not allocated for %s\n", engine->name);
return -EINVAL;
}
@@ -1438,7 +1176,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
- page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
+ page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
batch = kmap_atomic(page);
offset = 0;
@@ -1485,55 +1223,37 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- (u32)engine->status_page.gfx_addr);
+ engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned int next_context_status_buffer_hw;
+ int ret;
+
+ ret = intel_mocs_init_engine(engine);
+ if (ret)
+ return ret;
lrc_init_hws(engine);
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ intel_engine_reset_breadcrumbs(engine);
+
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- POSTING_READ(RING_MODE_GEN7(engine));
-
- /*
- * Instead of resetting the Context Status Buffer (CSB) read pointer to
- * zero, we need to read the write pointer from hardware and use its
- * value because "this register is power context save restored".
- * Effectively, these states have been observed:
- *
- * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
- * BDW | CSB regs not reset | CSB regs reset |
- * CHT | CSB regs not reset | CSB regs not reset |
- * SKL | ? | ? |
- * BXT | ? | ? |
- */
- next_context_status_buffer_hw =
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
-
- /*
- * When the CSB registers are reset (also after power-up / gpu reset),
- * CSB write pointer is set to all 1's, which is not valid, use '5' in
- * this special case, so the first element read is CSB[0].
- */
- if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
- next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
- engine->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
intel_engine_init_hangcheck(engine);
- return intel_mocs_init_engine(engine);
+ if (!execlists_elsp_idle(engine))
+ execlists_submit_ports(engine);
+
+ return 0;
}
static int gen8_init_render_ring(struct intel_engine_cs *engine)
@@ -1569,11 +1289,57 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
return init_workarounds_ring(engine);
}
+static void reset_common_ring(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct execlist_port *port = engine->execlist_port;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+
+ /* We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ execlists_init_reg_state(ce->lrc_reg_state,
+ request->ctx, engine, ce->ring);
+
+ /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
+ ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
+ i915_ggtt_offset(ce->ring->vma);
+ ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+
+ request->ring->head = request->postfix;
+ request->ring->last_retired_head = -1;
+ intel_ring_update_space(request->ring);
+
+ if (i915.enable_guc_submission)
+ return;
+
+ /* Catch up with any missed context-switch interrupts */
+ I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
+ if (request->ctx != port[0].request->ctx) {
+ i915_gem_request_put(port[0].request);
+ port[0] = port[1];
+ memset(&port[1], 0, sizeof(port[1]));
+ }
+
+ /* CS is stopped, and we will resubmit both ports on resume */
+ GEM_BUG_ON(request->ctx != port[0].request->ctx);
+ port[0].count = 0;
+ port[1].count = 0;
+
+ /* Reset WaIdleLiteRestore:bdw,skl as well */
+ request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+}
+
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
@@ -1581,28 +1347,27 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_logical_ring_emit_reg(ringbuf,
- GEN8_RING_PDP_UDW(engine, i));
- intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
- intel_logical_ring_emit_reg(ringbuf,
- GEN8_RING_PDP_LDW(engine, i));
- intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+ intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
+ intel_ring_emit(ring, upper_32_bits(pd_daddr));
+ intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
+ intel_ring_emit(ring, lower_32_bits(pd_daddr));
}
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
- u64 offset, unsigned dispatch_flags)
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ring *ring = req->ring;
bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
@@ -1629,14 +1394,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
- (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
- intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
+ (ppgtt<<8) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0));
+ intel_ring_emit(ring, lower_32_bits(offset));
+ intel_ring_emit(ring, upper_32_bits(offset));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -1655,14 +1420,10 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
-static int gen8_emit_flush(struct drm_i915_gem_request *request,
- u32 invalidate_domains,
- u32 unused)
+static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *engine = ringbuf->engine;
- struct drm_i915_private *dev_priv = request->i915;
- uint32_t cmd;
+ struct intel_ring *ring = request->ring;
+ u32 cmd;
int ret;
ret = intel_ring_begin(request, 4);
@@ -1678,30 +1439,30 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
- if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+ if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
- if (engine == &dev_priv->engine[VCS])
+ if (request->engine->id == VCS)
cmd |= MI_INVALIDATE_BSD;
}
- intel_logical_ring_emit(ringbuf, cmd);
- intel_logical_ring_emit(ringbuf,
- I915_GEM_HWS_SCRATCH_ADDR |
- MI_FLUSH_DW_USE_GTT);
- intel_logical_ring_emit(ringbuf, 0); /* upper addr */
- intel_logical_ring_emit(ringbuf, 0); /* value */
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring,
+ I915_GEM_HWS_SCRATCH_ADDR |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
+ intel_ring_advance(ring);
return 0;
}
static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
- u32 invalidate_domains,
- u32 flush_domains)
+ u32 mode)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *engine = ringbuf->engine;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_ring *ring = request->ring;
+ struct intel_engine_cs *engine = request->engine;
+ u32 scratch_addr =
+ i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
@@ -1709,14 +1470,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
flags |= PIPE_CONTROL_CS_STALL;
- if (flush_domains) {
+ if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
- if (invalidate_domains) {
+ if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -1751,40 +1512,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
return ret;
if (vf_flush_wa) {
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
}
if (dc_flush_wa) {
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
- intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
}
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
- intel_logical_ring_emit(ringbuf, flags);
- intel_logical_ring_emit(ringbuf, scratch_addr);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
if (dc_flush_wa) {
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
- intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, 0);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
}
- intel_logical_ring_advance(ringbuf);
+ intel_ring_advance(ring);
return 0;
}
@@ -1809,11 +1570,10 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-#define WA_TAIL_DWORDS 2
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
+ struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1823,21 +1583,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- intel_logical_ring_emit(ringbuf,
- (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
- intel_logical_ring_emit(ringbuf,
- intel_hws_seqno_address(request->engine) |
- MI_FLUSH_DW_USE_GTT);
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, request->seqno);
- intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- return intel_logical_ring_advance_and_submit(request);
+ intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(ring,
+ intel_hws_seqno_address(request->engine) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, request->fence.seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_emit(ring, MI_NOOP);
+ return intel_logical_ring_advance(request);
}
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
{
- struct intel_ringbuffer *ringbuf = request->ringbuf;
+ struct intel_ring *ring = request->ring;
int ret;
ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -1851,50 +1610,19 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
- intel_logical_ring_emit(ringbuf,
- (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf,
- intel_hws_seqno_address(request->engine));
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring,
+ (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
- intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- return intel_logical_ring_advance_and_submit(request);
-}
-
-static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
-{
- struct render_state so;
- int ret;
-
- ret = i915_gem_render_state_prepare(req->engine, &so);
- if (ret)
- return ret;
-
- if (so.rodata == NULL)
- return 0;
-
- ret = req->engine->emit_bb_start(req, so.ggtt_offset,
- I915_DISPATCH_SECURE);
- if (ret)
- goto out;
-
- ret = req->engine->emit_bb_start(req,
- (so.ggtt_offset + so.aux_batch_offset),
- I915_DISPATCH_SECURE);
- if (ret)
- goto out;
-
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
- i915_gem_render_state_fini(&so);
- return ret;
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_emit(ring, MI_NOOP);
+ return intel_logical_ring_advance(request);
}
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
@@ -1913,14 +1641,12 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
if (ret)
DRM_ERROR("MOCS failed to program: expect performance issues.\n");
- return intel_lr_context_render_state_init(req);
+ return i915_gem_render_state_init(req);
}
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
- *
* @engine: Engine Command Streamer.
- *
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
@@ -1939,39 +1665,42 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
- intel_logical_ring_stop(engine);
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
}
if (engine->cleanup)
engine->cleanup(engine);
- i915_cmd_parser_fini_ring(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
-
- intel_engine_fini_breadcrumbs(engine);
+ intel_engine_cleanup_common(engine);
- if (engine->status_page.obj) {
- i915_gem_object_unpin_map(engine->status_page.obj);
- engine->status_page.obj = NULL;
+ if (engine->status_page.vma) {
+ i915_gem_object_unpin_map(engine->status_page.vma->obj);
+ engine->status_page.vma = NULL;
}
intel_lr_context_unpin(dev_priv->kernel_context, engine);
- engine->idle_lite_restore_wa = 0;
- engine->disable_lite_restore_wa = false;
- engine->ctx_desc_template = 0;
-
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
}
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ for_each_engine(engine, dev_priv)
+ engine->submit_request = execlists_submit_request;
+}
+
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
- engine->emit_request = gen8_emit_request;
+ engine->reset_hw = reset_common_ring;
engine->emit_flush = gen8_emit_flush;
+ engine->emit_request = gen8_emit_request;
+ engine->submit_request = execlists_submit_request;
+
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
@@ -1980,41 +1709,71 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
}
static inline void
-logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine)
{
+ unsigned shift = engine->irq_shift;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
static int
-lrc_setup_hws(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *dctx_obj)
+lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
{
+ const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
void *hws;
/* The HWSP is part of the default context object in LRC mode. */
- engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
- LRC_PPHWSP_PN * PAGE_SIZE;
- hws = i915_gem_object_pin_map(dctx_obj);
+ hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(hws))
return PTR_ERR(hws);
- engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
- engine->status_page.obj = dctx_obj;
+
+ engine->status_page.page_addr = hws + hws_offset;
+ engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
+ engine->status_page.vma = vma;
return 0;
}
+static void
+logical_ring_setup(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ enum forcewake_domains fw_domains;
+
+ intel_engine_setup_common(engine);
+
+ /* Intentionally left blank. */
+ engine->buffer = NULL;
+
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
+
+ engine->fw_domains = fw_domains;
+
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
+
+ logical_ring_init_platform_invariants(engine);
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine);
+}
+
static int
logical_ring_init(struct intel_engine_cs *engine)
{
struct i915_gem_context *dctx = engine->i915->kernel_context;
int ret;
- ret = intel_engine_init_breadcrumbs(engine);
- if (ret)
- goto error;
-
- ret = i915_cmd_parser_init_ring(engine);
+ ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2044,11 +1803,13 @@ error:
return ret;
}
-static int logical_render_ring_init(struct intel_engine_cs *engine)
+int logical_render_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
+ logical_ring_setup(engine);
+
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
@@ -2058,11 +1819,10 @@ static int logical_render_ring_init(struct intel_engine_cs *engine)
else
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
- engine->cleanup = intel_fini_pipe_control;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
- ret = intel_init_pipe_control(engine, 4096);
+ ret = intel_engine_create_scratch(engine, 4096);
if (ret)
return ret;
@@ -2085,160 +1845,11 @@ static int logical_render_ring_init(struct intel_engine_cs *engine)
return ret;
}
-static const struct logical_ring_info {
- const char *name;
- unsigned exec_id;
- unsigned guc_id;
- u32 mmio_base;
- unsigned irq_shift;
- int (*init)(struct intel_engine_cs *engine);
-} logical_rings[] = {
- [RCS] = {
- .name = "render ring",
- .exec_id = I915_EXEC_RENDER,
- .guc_id = GUC_RENDER_ENGINE,
- .mmio_base = RENDER_RING_BASE,
- .irq_shift = GEN8_RCS_IRQ_SHIFT,
- .init = logical_render_ring_init,
- },
- [BCS] = {
- .name = "blitter ring",
- .exec_id = I915_EXEC_BLT,
- .guc_id = GUC_BLITTER_ENGINE,
- .mmio_base = BLT_RING_BASE,
- .irq_shift = GEN8_BCS_IRQ_SHIFT,
- .init = logical_ring_init,
- },
- [VCS] = {
- .name = "bsd ring",
- .exec_id = I915_EXEC_BSD,
- .guc_id = GUC_VIDEO_ENGINE,
- .mmio_base = GEN6_BSD_RING_BASE,
- .irq_shift = GEN8_VCS1_IRQ_SHIFT,
- .init = logical_ring_init,
- },
- [VCS2] = {
- .name = "bsd2 ring",
- .exec_id = I915_EXEC_BSD,
- .guc_id = GUC_VIDEO_ENGINE2,
- .mmio_base = GEN8_BSD2_RING_BASE,
- .irq_shift = GEN8_VCS2_IRQ_SHIFT,
- .init = logical_ring_init,
- },
- [VECS] = {
- .name = "video enhancement ring",
- .exec_id = I915_EXEC_VEBOX,
- .guc_id = GUC_VIDEOENHANCE_ENGINE,
- .mmio_base = VEBOX_RING_BASE,
- .irq_shift = GEN8_VECS_IRQ_SHIFT,
- .init = logical_ring_init,
- },
-};
-
-static struct intel_engine_cs *
-logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
-{
- const struct logical_ring_info *info = &logical_rings[id];
- struct intel_engine_cs *engine = &dev_priv->engine[id];
- enum forcewake_domains fw_domains;
-
- engine->id = id;
- engine->name = info->name;
- engine->exec_id = info->exec_id;
- engine->guc_id = info->guc_id;
- engine->mmio_base = info->mmio_base;
-
- engine->i915 = dev_priv;
-
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->fw_domains = fw_domains;
-
- INIT_LIST_HEAD(&engine->active_list);
- INIT_LIST_HEAD(&engine->request_list);
- INIT_LIST_HEAD(&engine->buffers);
- INIT_LIST_HEAD(&engine->execlist_queue);
- spin_lock_init(&engine->execlist_lock);
-
- tasklet_init(&engine->irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
-
- logical_ring_init_platform_invariants(engine);
- logical_ring_default_vfuncs(engine);
- logical_ring_default_irqs(engine, info->irq_shift);
-
- intel_engine_init_hangcheck(engine);
- i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
-
- return engine;
-}
-
-/**
- * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
- * @dev: DRM device.
- *
- * This function inits the engines for an Execlists submission style (the
- * equivalent in the legacy ringbuffer submission world would be
- * i915_gem_init_engines). It does it only for those engines that are present in
- * the hardware.
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_logical_rings_init(struct drm_device *dev)
+int logical_xcs_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned int mask = 0;
- unsigned int i;
- int ret;
-
- WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
- GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
-
- for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
- if (!HAS_ENGINE(dev_priv, i))
- continue;
-
- if (!logical_rings[i].init)
- continue;
+ logical_ring_setup(engine);
- ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
- if (ret)
- goto cleanup;
-
- mask |= ENGINE_MASK(i);
- }
-
- /*
- * Catch failures to update logical_rings table when the new engines
- * are added to the driver by a warning and disabling the forgotten
- * engines.
- */
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
- struct intel_device_info *info =
- (struct intel_device_info *)&dev_priv->info;
- info->ring_mask = mask;
- }
-
- return 0;
-
-cleanup:
- for (i = 0; i < I915_NUM_ENGINES; i++)
- intel_logical_ring_cleanup(&dev_priv->engine[i]);
-
- return ret;
+ return logical_ring_init(engine);
}
static u32
@@ -2259,24 +1870,24 @@ make_rpcs(struct drm_i915_private *dev_priv)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev_priv)->has_slice_pg) {
+ if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev_priv)->slice_total <<
+ rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->has_subslice_pg) {
+ if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
+ rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev_priv)->has_eu_pg) {
- rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+ if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+ rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
+ rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2305,38 +1916,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static int
-populate_lr_context(struct i915_gem_context *ctx,
- struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *engine,
- struct intel_ringbuffer *ringbuf)
+static void execlists_init_reg_state(u32 *reg_state,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_ring *ring)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- void *vaddr;
- u32 *reg_state;
- int ret;
-
- if (!ppgtt)
- ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
- return ret;
- }
-
- vaddr = i915_gem_object_pin_map(ctx_obj);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
- return ret;
- }
- ctx_obj->dirty = true;
-
- /* The second page of the context object contains some fields which must
- * be set up prior to the first execution. */
- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
@@ -2350,19 +1936,16 @@ populate_lr_context(struct i915_gem_context *ctx,
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev_priv) ?
- CTX_CTRL_RS_CTX_ENABLE : 0)));
+ CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
0);
ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
0);
- /* Ring buffer start address is not known until the buffer is pinned.
- * It is written to the context image in execlists_update_context()
- */
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
RING_START(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
RING_CTL(engine->mmio_base),
- ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
RING_BBADDR_UDW(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2383,9 +1966,9 @@ populate_lr_context(struct i915_gem_context *ctx,
RING_INDIRECT_CTX(engine->mmio_base), 0);
ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
- if (engine->wa_ctx.obj) {
+ if (engine->wa_ctx.vma) {
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+ u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
reg_state[CTX_RCS_INDIRECT_CTX+1] =
(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
@@ -2440,6 +2023,36 @@ populate_lr_context(struct i915_gem_context *ctx,
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
make_rpcs(dev_priv));
}
+}
+
+static int
+populate_lr_context(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *ctx_obj,
+ struct intel_engine_cs *engine,
+ struct intel_ring *ring)
+{
+ void *vaddr;
+ int ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
+ return ret;
+ }
+
+ vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
+ return ret;
+ }
+ ctx_obj->dirty = true;
+
+ /* The second page of the context object contains some fields which must
+ * be set up prior to the first execution. */
+
+ execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
+ ctx, engine, ring);
i915_gem_object_unpin_map(ctx_obj);
@@ -2484,26 +2097,14 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
return ret;
}
-/**
- * execlists_context_deferred_alloc() - create the LRC specific bits of a context
- * @ctx: LR context to create.
- * @engine: engine to be used with the context.
- *
- * This function can be called more than once, with different engines, if we plan
- * to use the context with them. The context backing objects and the ringbuffers
- * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
- * the creation is a deferred call: it's better to make sure first that we need to use
- * a given ring with the context.
- *
- * Return: non-zero on error.
- */
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id];
+ struct i915_vma *vma;
uint32_t context_size;
- struct intel_ringbuffer *ringbuf;
+ struct intel_ring *ring;
int ret;
WARN_ON(ce->state);
@@ -2519,60 +2120,63 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return PTR_ERR(ctx_obj);
}
- ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
- if (IS_ERR(ringbuf)) {
- ret = PTR_ERR(ringbuf);
+ vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto error_deref_obj;
+ }
+
+ ring = intel_engine_create_ring(engine, ctx->ring_size);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+ ret = populate_lr_context(ctx, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
- goto error_ringbuf;
+ goto error_ring_free;
}
- ce->ringbuf = ringbuf;
- ce->state = ctx_obj;
+ ce->ring = ring;
+ ce->state = vma;
ce->initialised = engine->init_context == NULL;
return 0;
-error_ringbuf:
- intel_ringbuffer_free(ringbuf);
+error_ring_free:
+ intel_ring_free(ring);
error_deref_obj:
- drm_gem_object_unreference(&ctx_obj->base);
- ce->ringbuf = NULL;
- ce->state = NULL;
+ i915_gem_object_put(ctx_obj);
return ret;
}
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct i915_gem_context *ctx)
+void intel_lr_context_resume(struct drm_i915_private *dev_priv)
{
+ struct i915_gem_context *ctx = dev_priv->kernel_context;
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
struct intel_context *ce = &ctx->engine[engine->id];
- struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr;
uint32_t *reg_state;
- if (!ctx_obj)
+ if (!ce->state)
continue;
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
if (WARN_ON(IS_ERR(vaddr)))
continue;
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
- i915_gem_object_unpin_map(ctx_obj);
+ ce->state->obj->dirty = true;
+ i915_gem_object_unpin_map(ce->state->obj);
- ce->ringbuf->head = 0;
- ce->ringbuf->tail = 0;
+ ce->ring->head = 0;
+ ce->ring->tail = 0;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 2b8255c19dcc..4fed8165f98a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -29,17 +29,17 @@
#define GEN8_LR_CONTEXT_ALIGN 4096
/* Execlists regs */
-#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
+#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
+#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
+#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
-#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
+#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
+#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
+#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
/* The docs specify that the write pointer wraps around after 5h, "After status
* is written out to the last available status QW at offset 5h, this pointer
@@ -67,35 +67,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
-int intel_logical_rings_init(struct drm_device *dev);
+int logical_render_ring_init(struct intel_engine_cs *engine);
+int logical_xcs_ring_init(struct intel_engine_cs *engine);
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
- ringbuf->tail &= ringbuf->size - 1;
-}
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
- u32 data)
-{
- iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
- ringbuf->tail += 4;
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
- i915_reg_t reg)
-{
- intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
+int intel_engines_init(struct drm_device *dev);
/* Logical Ring Contexts */
@@ -112,19 +87,13 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct drm_i915_private;
-void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct i915_gem_context *ctx);
+void intel_lr_context_resume(struct drm_i915_private *dev_priv);
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
-struct i915_execbuffer_params;
-int intel_execlists_submission(struct i915_execbuffer_params *params,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas);
-
-void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 49550470483e..e1d47d51ea47 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -48,6 +48,20 @@ struct intel_lvds_connector {
struct notifier_block lid_notifier;
};
+struct intel_lvds_pps {
+ /* 100us units */
+ int t1_t2;
+ int t3;
+ int t4;
+ int t5;
+ int tx;
+
+ int divider;
+
+ int port;
+ bool powerdown_on_reset;
+};
+
struct intel_lvds_encoder {
struct intel_encoder base;
@@ -55,6 +69,9 @@ struct intel_lvds_encoder {
i915_reg_t reg;
u32 a3_power;
+ struct intel_lvds_pps init_pps;
+ u32 init_lvds_val;
+
struct intel_lvds_connector *attached_connector;
};
@@ -136,28 +153,108 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_lvds_pps *pps)
+{
+ u32 val;
+
+ pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+
+ val = I915_READ(PP_ON_DELAYS(0));
+ pps->port = (val & PANEL_PORT_SELECT_MASK) >>
+ PANEL_PORT_SELECT_SHIFT;
+ pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
+ pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
+
+ val = I915_READ(PP_OFF_DELAYS(0));
+ pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
+ pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+ val = I915_READ(PP_DIVISOR(0));
+ pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
+ PP_REFERENCE_DIVIDER_SHIFT;
+ val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ PANEL_POWER_CYCLE_DELAY_SHIFT;
+ /*
+ * Remove the BSpec specified +1 (100ms) offset that accounts for a
+ * too short power-cycle delay due to the asynchronous programming of
+ * the register.
+ */
+ if (val)
+ val--;
+ /* Convert from 100ms to 100us units */
+ pps->t4 = val * 1000;
+
+ if (INTEL_INFO(dev_priv)->gen <= 4 &&
+ pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+ DRM_DEBUG_KMS("Panel power timings uninitialized, "
+ "setting defaults\n");
+ /* Set T2 to 40ms and T5 to 200ms in 100 usec units */
+ pps->t1_t2 = 40 * 10;
+ pps->t5 = 200 * 10;
+ /* Set T3 to 35ms and Tx to 200ms in 100 usec units */
+ pps->t3 = 35 * 10;
+ pps->tx = 200 * 10;
+ }
+
+ DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
+}
+
+static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
+ struct intel_lvds_pps *pps)
+{
+ u32 val;
+
+ val = I915_READ(PP_CONTROL(0));
+ WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+ if (pps->powerdown_on_reset)
+ val |= PANEL_POWER_RESET;
+ I915_WRITE(PP_CONTROL(0), val);
+
+ I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
+ (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
+ (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
+ I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
+ (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+
+ val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
+ val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
+ PANEL_POWER_CYCLE_DELAY_SHIFT;
+ I915_WRITE(PP_DIVISOR(0), val);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int pipe = crtc->pipe;
u32 temp;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
- crtc->config->shared_dpll);
+ pipe_config->shared_dpll);
} else {
assert_pll_disabled(dev_priv, pipe);
}
- temp = I915_READ(lvds_encoder->reg);
+ intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps);
+
+ temp = lvds_encoder->init_lvds_val;
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
@@ -170,7 +267,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= crtc->config->gmch_pfit.lvds_border_bits;
+ temp |= pipe_config->gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -193,7 +290,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (IS_GEN4(dev_priv)) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (crtc->config->dither && crtc->config->pipe_bpp == 18)
+ if (pipe_config->dither && pipe_config->pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -210,57 +307,45 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/**
* Sets the power state for the panel.
*/
-static void intel_enable_lvds(struct intel_encoder *encoder)
+static void intel_enable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t ctl_reg, stat_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- ctl_reg = PCH_PP_CONTROL;
- stat_reg = PCH_PP_STATUS;
- } else {
- ctl_reg = PP_CONTROL;
- stat_reg = PP_STATUS;
- }
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
+ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
}
-static void intel_disable_lvds(struct intel_encoder *encoder)
+static void intel_disable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(dev);
- i915_reg_t ctl_reg, stat_reg;
-
- if (HAS_PCH_SPLIT(dev)) {
- ctl_reg = PCH_PP_CONTROL;
- stat_reg = PCH_PP_STATUS;
- } else {
- ctl_reg = PP_CONTROL;
- stat_reg = PP_STATUS;
- }
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
+ I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_encoder->reg);
}
-static void gmch_disable_lvds(struct intel_encoder *encoder)
+static void gmch_disable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
+
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
@@ -268,10 +353,12 @@ static void gmch_disable_lvds(struct intel_encoder *encoder)
intel_panel_disable_backlight(intel_connector);
- intel_disable_lvds(encoder);
+ intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
-static void pch_disable_lvds(struct intel_encoder *encoder)
+static void pch_disable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
@@ -280,9 +367,11 @@ static void pch_disable_lvds(struct intel_encoder *encoder)
intel_panel_disable_backlight(intel_connector);
}
-static void pch_post_disable_lvds(struct intel_encoder *encoder)
+static void pch_post_disable_lvds(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_disable_lvds(encoder);
+ intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
static enum drm_mode_status
@@ -304,7 +393,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder =
@@ -900,17 +990,6 @@ void intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
- /*
- * Unlock registers and just leave them unlocked. Do this before
- * checking quirk lists to avoid bogus WARNINGs.
- */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_PP_CONTROL,
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
- } else if (INTEL_INFO(dev_priv)->gen < 5) {
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
- }
if (!intel_lvds_supported(dev))
return;
@@ -943,18 +1022,6 @@ void intel_lvds_init(struct drm_device *dev)
DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
}
- /* Set the Panel Power On/Off timings if uninitialized. */
- if (INTEL_INFO(dev_priv)->gen < 5 &&
- I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
- /* Set T2 to 40ms and T5 to 200ms */
- I915_WRITE(PP_ON_DELAYS, 0x019007d0);
-
- /* Set T3 to 35ms and Tx to 200ms */
- I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
-
- DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
- }
-
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return;
@@ -1020,6 +1087,10 @@ void intel_lvds_init(struct drm_device *dev)
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+
+ intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps);
+ lvds_encoder->init_lvds_val = lvds;
+
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1054,17 +1125,6 @@ void intel_lvds_init(struct drm_device *dev)
}
lvds_connector->base.edid = edid;
- if (IS_ERR_OR_NULL(edid)) {
- /* Didn't get an EDID, so
- * Set wide sync ranges so we get all modes
- * handed to valid_mode for checking
- */
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
- }
-
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
DRM_DEBUG_KMS("using preferred mode from EDID: ");
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 927825f5b284..80bb9247ce66 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -97,7 +97,8 @@ struct drm_i915_mocs_table {
* end.
*/
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
- { /* 0x00000009 */
+ [I915_MOCS_UNCACHED] = {
+ /* 0x00000009 */
.control_value = LE_CACHEABILITY(LE_UC) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
@@ -106,7 +107,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
/* 0x0010 */
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
},
- {
+ [I915_MOCS_PTE] = {
/* 0x00000038 */
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -115,7 +116,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
/* 0x0030 */
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
},
- {
+ [I915_MOCS_CACHED] = {
/* 0x0000003b */
.control_value = LE_CACHEABILITY(LE_WB) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -128,7 +129,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
- {
+ [I915_MOCS_UNCACHED] = {
/* 0x00000009 */
.control_value = LE_CACHEABILITY(LE_UC) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -138,7 +139,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/* 0x0010 */
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
},
- {
+ [I915_MOCS_PTE] = {
/* 0x00000038 */
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -148,7 +149,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/* 0x0030 */
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
},
- {
+ [I915_MOCS_CACHED] = {
/* 0x00000039 */
.control_value = LE_CACHEABILITY(LE_UC) |
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
@@ -203,9 +204,9 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
return result;
}
-static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
{
- switch (ring) {
+ switch (engine_id) {
case RCS:
return GEN9_GFX_MOCS(index);
case VCS:
@@ -217,7 +218,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
case VCS2:
return GEN9_MFX1_MOCS(index);
default:
- MISSING_CASE(ring);
+ MISSING_CASE(engine_id);
return INVALID_MMIO_REG;
}
}
@@ -275,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
@@ -287,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_logical_ring_emit(ringbuf,
- MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) {
- intel_logical_ring_emit_reg(ringbuf,
- mocs_register(engine, index));
- intel_logical_ring_emit(ringbuf,
- table->table[index].control_value);
+ intel_ring_emit_reg(ring, mocs_register(engine, index));
+ intel_ring_emit(ring, table->table[index].control_value);
}
/*
@@ -306,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_logical_ring_emit_reg(ringbuf,
- mocs_register(engine, index));
- intel_logical_ring_emit(ringbuf,
- table->table[0].control_value);
+ intel_ring_emit_reg(ring, mocs_register(engine, index));
+ intel_ring_emit(ring, table->table[0].control_value);
}
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -340,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_ring *ring = req->ring;
unsigned int i;
int ret;
@@ -351,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_logical_ring_emit(ringbuf,
+ intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
for (i = 0; i < table->size/2; i++) {
- intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf,
- l3cc_combine(table, 2*i, 2*i+1));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
i++;
}
@@ -373,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
- intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+ intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+ intel_ring_emit(ring, l3cc_combine(table, 0, 0));
}
- intel_logical_ring_emit(ringbuf, MI_NOOP);
- intel_logical_ring_advance(ringbuf);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 4640299e04ec..a8bd9f7bfece 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -54,6 +54,6 @@
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
void intel_mocs_init_l3cc_table(struct drm_device *dev);
-int intel_mocs_init_engine(struct intel_engine_cs *ring);
+int intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f2584d0a01ab..951e834dd274 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3212d8806b5a..a24bc8c7889f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
/* Limits for overlay size. According to intel doc, the real limits are:
* Y width: 4095, UV width (planar): 2047, Y height: 2047,
@@ -170,8 +171,8 @@ struct overlay_registers {
struct intel_overlay {
struct drm_i915_private *i915;
struct intel_crtc *crtc;
- struct drm_i915_gem_object *vid_bo;
- struct drm_i915_gem_object *old_vid_bo;
+ struct i915_vma *vma;
+ struct i915_vma *old_vma;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -183,8 +184,7 @@ struct intel_overlay {
u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
/* flip handling */
- struct drm_i915_gem_request *last_flip_req;
- void (*flip_tail)(struct intel_overlay *);
+ struct i915_gem_active last_flip;
};
static struct overlay_registers __iomem *
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+ regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
overlay->flip_addr,
PAGE_SIZE);
@@ -210,37 +210,46 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
io_mapping_unmap(regs);
}
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *req,
- void (*tail)(struct intel_overlay *))
+ i915_gem_retire_fn retire)
{
- int ret;
-
- WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req, req);
+ GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex));
+ overlay->last_flip.retire = retire;
+ i915_gem_active_set(&overlay->last_flip, req);
i915_add_request(req);
+}
- overlay->flip_tail = tail;
- ret = i915_wait_request(overlay->last_flip_req);
- if (ret)
- return ret;
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ struct drm_i915_gem_request *req,
+ i915_gem_retire_fn retire)
+{
+ intel_overlay_submit_request(overlay, req, retire);
+ return i915_gem_active_retire(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex);
+}
- i915_gem_request_assign(&overlay->last_flip_req, NULL);
- return 0;
+static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+{
+ struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+
+ return i915_gem_request_alloc(engine, dev_priv->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- req = i915_gem_request_alloc(engine, NULL);
+ req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -252,11 +261,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
- intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ ring = req->ring;
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@@ -266,8 +276,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
@@ -282,7 +292,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = i915_gem_request_alloc(engine, NULL);
+ req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -292,38 +302,48 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
return ret;
}
- intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(engine, flip_addr);
- intel_ring_advance(engine);
+ ring = req->ring;
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- WARN_ON(overlay->last_flip_req);
- i915_gem_request_assign(&overlay->last_flip_req, req);
- i915_add_request(req);
+ intel_overlay_submit_request(overlay, req, NULL);
return 0;
}
-static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+ struct drm_i915_gem_request *req)
{
- struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+ struct i915_vma *vma;
- i915_gem_object_ggtt_unpin(obj);
- drm_gem_object_unreference(&obj->base);
+ vma = fetch_and_zero(&overlay->old_vma);
+ if (WARN_ON(!vma))
+ return;
- overlay->old_vid_bo = NULL;
+ i915_gem_track_fb(vma->obj, NULL,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
+ i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_put(vma);
}
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
+static void intel_overlay_off_tail(struct i915_gem_active *active,
+ struct drm_i915_gem_request *req)
{
- struct drm_i915_gem_object *obj = overlay->vid_bo;
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+ struct i915_vma *vma;
/* never have the overlay hw on without showing a frame */
- if (WARN_ON(!obj))
+ vma = fetch_and_zero(&overlay->vma);
+ if (WARN_ON(!vma))
return;
- i915_gem_object_ggtt_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- overlay->vid_bo = NULL;
+ i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_put(vma);
overlay->crtc->overlay = NULL;
overlay->crtc = NULL;
@@ -334,8 +354,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -347,7 +367,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = i915_gem_request_alloc(engine, NULL);
+ req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -357,46 +377,36 @@ static int intel_overlay_off(struct intel_overlay *overlay)
return ret;
}
+ ring = req->ring;
/* wait for overlay to go idle */
- intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(engine, flip_addr);
- intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev_priv)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
} else {
- intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(engine, flip_addr);
- intel_ring_emit(engine,
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
+ return intel_overlay_do_wait_request(overlay, req,
+ intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- int ret;
-
- if (overlay->last_flip_req == NULL)
- return 0;
-
- ret = i915_wait_request(overlay->last_flip_req);
- if (ret)
- return ret;
-
- if (overlay->flip_tail)
- overlay->flip_tail(overlay);
-
- i915_gem_request_assign(&overlay->last_flip_req, NULL);
- return 0;
+ return i915_gem_active_retire(&overlay->last_flip,
+ &overlay->i915->drm.struct_mutex);
}
/* Wait for pending overlay flip and release old frame.
@@ -406,7 +416,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -414,14 +423,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
- if (!overlay->old_vid_bo)
+ if (!overlay->old_vma)
return 0;
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
- req = i915_gem_request_alloc(engine, NULL);
+ req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -431,22 +441,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return ret;
}
- intel_ring_emit(engine,
+ ring = req->ring;
+ intel_ring_emit(ring,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
- }
+ } else
+ intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
- intel_overlay_release_old_vid_tail(overlay);
-
-
- i915_gem_track_fb(overlay->old_vid_bo, NULL,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
@@ -459,7 +466,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
intel_overlay_release_old_vid(overlay);
- overlay->last_flip_req = NULL;
overlay->old_xscale = 0;
overlay->old_yscale = 0;
overlay->crtc = NULL;
@@ -740,6 +746,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
+ struct i915_vma *vma;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -748,12 +755,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin_to_display_plane(new_bo, 0,
+ vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
&i915_ggtt_view_normal);
- if (ret != 0)
- return ret;
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- ret = i915_gem_object_put_fence(new_bo);
+ ret = i915_vma_put_fence(vma);
if (ret)
goto out_unpin;
@@ -794,7 +801,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -808,8 +815,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
- iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+ iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+ &regs->OBUF_0U);
+ iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+ &regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -830,19 +839,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vid_bo, new_bo,
+ i915_gem_track_fb(overlay->vma->obj, new_bo,
INTEL_FRONTBUFFER_OVERLAY(pipe));
- overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = new_bo;
+ overlay->old_vma = overlay->vma;
+ overlay->vma = vma;
- intel_frontbuffer_flip(&dev_priv->drm,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0;
out_unpin:
- i915_gem_object_ggtt_unpin(new_bo);
+ i915_gem_object_unpin_from_display_plane(vma);
return ret;
}
@@ -870,12 +878,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay);
- if (ret != 0)
- return ret;
-
- intel_overlay_off_tail(overlay);
- return 0;
+ return intel_overlay_off(overlay);
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
@@ -1122,9 +1125,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
- put_image_rec->bo_handle));
- if (&new_bo->base == NULL) {
+ new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle);
+ if (!new_bo) {
ret = -ENOENT;
goto out_free;
}
@@ -1132,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
mutex_lock(&dev->struct_mutex);
- if (new_bo->tiling_mode) {
+ if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
@@ -1220,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
- drm_gem_object_unreference_unlocked(&new_bo->base);
+ i915_gem_object_put_unlocked(new_bo);
out_free:
kfree(params);
@@ -1371,6 +1373,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
+ struct i915_vma *vma = NULL;
int ret;
if (!HAS_OVERLAY(dev_priv))
@@ -1404,12 +1407,14 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
}
overlay->flip_addr = reg_bo->phys_handle->busaddr;
} else {
- ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
- if (ret) {
+ vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
+ 0, PAGE_SIZE, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
DRM_ERROR("failed to pin overlay register bo\n");
+ ret = PTR_ERR(vma);
goto out_free_bo;
}
- overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
+ overlay->flip_addr = i915_ggtt_offset(vma);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1441,10 +1446,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
return;
out_unpin_bo:
- if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- i915_gem_object_ggtt_unpin(reg_bo);
+ if (vma)
+ i915_vma_unpin(vma);
out_free_bo:
- drm_gem_object_unreference(&reg_bo->base);
+ i915_gem_object_put(reg_bo);
out_free:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
@@ -1461,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
* hardware should be off already */
WARN_ON(dev_priv->overlay->active);
- drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+ i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo);
kfree(dev_priv->overlay);
}
@@ -1484,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+ regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
overlay->flip_addr);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c65d77e886..be4b4d546fd9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2;
+ u32 pch_ctl1, pch_ctl2, schicken;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
@@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
}
+ if (HAS_PCH_LPT(dev_priv)) {
+ schicken = I915_READ(SOUTH_CHICKEN2);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= LPT_PWM_GRANULARITY;
+ else
+ schicken &= ~LPT_PWM_GRANULARITY;
+ I915_WRITE(SOUTH_CHICKEN2, schicken);
+ } else {
+ schicken = I915_READ(SOUTH_CHICKEN1);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= SPT_PWM_GRANULARITY;
+ else
+ schicken &= ~SPT_PWM_GRANULARITY;
+ I915_WRITE(SOUTH_CHICKEN1, schicken);
+ }
+
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
u32 mul;
- if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
+ if (panel->backlight.alternate_pwm_increment)
mul = 128;
else
mul = 16;
@@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
u32 mul, clock;
- if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
+ if (panel->backlight.alternate_pwm_increment)
mul = 16;
else
mul = 128;
@@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
+ bool alt;
+
+ if (HAS_PCH_LPT(dev_priv))
+ alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ else
+ alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ panel->backlight.alternate_pwm_increment = alt;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
@@ -1430,10 +1454,11 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.min = get_backlight_min_vbt(connector);
val = lpt_get_backlight(connector);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
- panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
- panel->backlight.level != 0;
+ panel->backlight.enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
return 0;
}
@@ -1459,11 +1484,13 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.min = get_backlight_min_vbt(connector);
val = pch_get_backlight(connector);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
- (pch_ctl1 & BLM_PCH_PWM_ENABLE) && panel->backlight.level != 0;
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE);
return 0;
}
@@ -1498,9 +1525,11 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.min = get_backlight_min_vbt(connector);
val = i9xx_get_backlight(connector);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
- panel->backlight.enabled = panel->backlight.level != 0;
+ panel->backlight.enabled = val != 0;
return 0;
}
@@ -1530,10 +1559,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
panel->backlight.min = get_backlight_min_vbt(connector);
val = i9xx_get_backlight(connector);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
- panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
- panel->backlight.level != 0;
+ panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1562,10 +1592,11 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
panel->backlight.min = get_backlight_min_vbt(connector);
val = _vlv_get_backlight(dev_priv, pipe);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
- panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
- panel->backlight.level != 0;
+ panel->backlight.enabled = ctl2 & BLM_PWM_ENABLE;
return 0;
}
@@ -1607,10 +1638,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return -ENODEV;
val = bxt_get_backlight(connector);
- panel->backlight.level = intel_panel_compute_brightness(connector, val);
+ val = intel_panel_compute_brightness(connector, val);
+ panel->backlight.level = clamp(val, panel->backlight.min,
+ panel->backlight.max);
- panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
- panel->backlight.level != 0;
+ panel->backlight.enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2d2481392824..db24f898853c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -340,6 +340,11 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
I915_WRITE(FW_BLC_SELF, val);
POSTING_READ(FW_BLC_SELF);
} else if (IS_I915GM(dev)) {
+ /*
+ * FIXME can't find a bit like this for 915G, and
+ * and yet it does have the related watermark in
+ * FW_BLC_SELF. What's going on?
+ */
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
@@ -960,7 +965,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
- if (!state->visible)
+ if (!state->base.visible)
return 0;
cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
@@ -1002,7 +1007,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
continue;
- if (state->visible) {
+ if (state->base.visible) {
wm_state->num_active_planes++;
total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
}
@@ -1018,7 +1023,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
continue;
}
- if (!state->visible) {
+ if (!state->base.visible) {
plane->wm.fifo_size = 0;
continue;
}
@@ -1118,7 +1123,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
- if (!state->visible)
+ if (!state->base.visible)
continue;
/* normal watermarks */
@@ -1580,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
obj = intel_fb_obj(enabled->primary->state->fb);
/* self-refresh seems busted with untiled */
- if (obj->tiling_mode == I915_TILING_NONE)
+ if (!i915_gem_object_is_tiled(obj))
enabled = NULL;
}
@@ -1604,6 +1609,9 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
unsigned long line_time_us;
int entries;
+ if (IS_I915GM(dev) || IS_I945GM(dev))
+ cpp = 4;
+
line_time_us = max(htotal * 1000 / clock, 1);
/* Use ns/us then divide to preserve precision */
@@ -1618,7 +1626,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev))
+ else
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
@@ -1767,7 +1775,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active || !pstate->base.visible)
return 0;
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
@@ -1777,7 +1785,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
+ drm_rect_width(&pstate->base.dst),
cpp, mem_value);
return min(method1, method2);
@@ -1795,13 +1803,13 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active || !pstate->base.visible)
return 0;
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
+ drm_rect_width(&pstate->base.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -1820,7 +1828,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
* this is necessary to avoid flickering.
*/
int cpp = 4;
- int width = pstate->visible ? pstate->base.crtc_w : 64;
+ int width = pstate->base.visible ? pstate->base.crtc_w : 64;
if (!cstate->base.active)
return 0;
@@ -1838,10 +1846,10 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
int cpp = pstate->base.fb ?
drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active || !pstate->base.visible)
return 0;
- return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2119,32 +2127,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
GEN9_MEM_LATENCY_LEVEL_MASK;
/*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+ break;
+ }
+ }
+
+ /*
* WaWmMemoryReadLatency:skl
*
* punit doesn't take into account the read latency so we need
- * to add 2us to the various latency levels we retrieve from
- * the punit.
- * - W0 is a bit special in that it's the only level that
- * can't be disabled if we want to have display working, so
- * we always add 2us there.
- * - For levels >=1, punit returns 0us latency when they are
- * disabled, so we respect that and don't add 2us then
- *
- * Additionally, if a level n (n > 1) has a 0us latency, all
- * levels m (m >= n) need to be disabled. We make sure to
- * sanitize the values out of the punit to satisfy this
- * requirement.
+ * to add 2us to the various latency levels we retrieve from the
+ * punit when level 0 response data us 0us.
*/
- wm[0] += 2;
- for (level = 1; level <= max_level; level++)
- if (wm[level] != 0)
+ if (wm[0] == 0) {
+ wm[0] += 2;
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0)
+ break;
wm[level] += 2;
- else {
- for (i = level + 1; i <= max_level; i++)
- wm[i] = 0;
-
- break;
}
+ }
+
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
@@ -2358,10 +2368,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
pipe_wm->pipe_enabled = cstate->base.active;
if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->visible;
- pipe_wm->sprites_scaled = sprstate->visible &&
- (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
- drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+ pipe_wm->sprites_enabled = sprstate->base.visible;
+ pipe_wm->sprites_scaled = sprstate->base.visible &&
+ (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
+ drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
}
usable_level = max_level;
@@ -2845,13 +2855,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-/*
- * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
- * different active planes.
- */
-
-#define SKL_DDB_SIZE 896 /* in blocks */
-#define BXT_DDB_SIZE 512
#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
@@ -2876,6 +2879,19 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+static bool
+intel_has_sagv(struct drm_i915_private *dev_priv)
+{
+ if (IS_KABYLAKE(dev_priv))
+ return true;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
+ return true;
+
+ return false;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -2888,12 +2904,14 @@ skl_wm_plane_id(const struct intel_plane *plane)
* - We're not using an interlaced display configuration
*/
int
-skl_enable_sagv(struct drm_i915_private *dev_priv)
+intel_enable_sagv(struct drm_i915_private *dev_priv)
{
int ret;
- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
- dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+ if (!intel_has_sagv(dev_priv))
+ return 0;
+
+ if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
DRM_DEBUG_KMS("Enabling the SAGV\n");
@@ -2909,21 +2927,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv)
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (ret == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
DRM_ERROR("Failed to enable the SAGV\n");
return ret;
}
- dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
return 0;
}
static int
-skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+intel_do_sagv_disable(struct drm_i915_private *dev_priv)
{
int ret;
uint32_t temp = GEN9_SAGV_DISABLE;
@@ -2937,19 +2955,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv)
}
int
-skl_disable_sagv(struct drm_i915_private *dev_priv)
+intel_disable_sagv(struct drm_i915_private *dev_priv)
{
int ret, result;
- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
- dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+ if (!intel_has_sagv(dev_priv))
+ return 0;
+
+ if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
DRM_DEBUG_KMS("Disabling the SAGV\n");
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+ ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret == -ETIMEDOUT) {
@@ -2961,20 +2981,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv)
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (result < 0) {
DRM_ERROR("Failed to disable the SAGV\n");
return result;
}
- dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+ dev_priv->sagv_status = I915_SAGV_DISABLED;
return 0;
}
-bool skl_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2983,6 +3003,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state)
enum pipe pipe;
int level, plane;
+ if (!intel_has_sagv(dev_priv))
+ return false;
+
/*
* SKL workaround: bspec recommends we disable the SAGV when we have
* more then one pipe enabled
@@ -3049,10 +3072,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
else
*num_active = hweight32(dev_priv->active_crtcs);
- if (IS_BROXTON(dev))
- ddb_size = BXT_DDB_SIZE;
- else
- ddb_size = SKL_DDB_SIZE;
+ ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+ WARN_ON(ddb_size == 0);
ddb_size -= 4; /* 4 blocks for bypass path allocation */
@@ -3144,14 +3165,14 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
uint32_t downscale_h, downscale_w;
uint32_t src_w, src_h, dst_w, dst_h;
- if (WARN_ON(!pstate->visible))
+ if (WARN_ON(!pstate->base.visible))
return DRM_PLANE_HELPER_NO_SCALING;
/* n.b., src is 16.16 fixed point, dst is whole integer */
- src_w = drm_rect_width(&pstate->src);
- src_h = drm_rect_height(&pstate->src);
- dst_w = drm_rect_width(&pstate->dst);
- dst_h = drm_rect_height(&pstate->dst);
+ src_w = drm_rect_width(&pstate->base.src);
+ src_h = drm_rect_height(&pstate->base.src);
+ dst_w = drm_rect_width(&pstate->base.dst);
+ dst_h = drm_rect_height(&pstate->base.dst);
if (intel_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
@@ -3173,15 +3194,15 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
uint32_t width = 0, height = 0;
unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
- if (!intel_pstate->visible)
+ if (!intel_pstate->base.visible)
return 0;
if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
return 0;
- width = drm_rect_width(&intel_pstate->src) >> 16;
- height = drm_rect_height(&intel_pstate->src) >> 16;
+ width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
@@ -3280,8 +3301,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
return 8;
- src_w = drm_rect_width(&intel_pstate->src) >> 16;
- src_h = drm_rect_height(&intel_pstate->src) >> 16;
+ src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
+ src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
@@ -3341,13 +3362,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int num_active;
int id, i;
+ /* Clear the partitioning for disabled planes. */
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+
if (WARN_ON(!state))
return 0;
if (!cstate->base.active) {
ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
return 0;
}
@@ -3372,7 +3395,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (intel_plane->pipe != pipe)
continue;
- if (!to_intel_plane_state(pstate)->visible) {
+ if (!to_intel_plane_state(pstate)->base.visible) {
minimum[id] = 0;
y_minimum[id] = 0;
continue;
@@ -3447,12 +3470,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
-{
- /* TODO: Take into account the scalers once we support them */
- return config->base.adjusted_mode.crtc_clock;
-}
-
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and cpp should always be <= 8, so that
@@ -3473,29 +3490,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc
}
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t horiz_pixels, uint8_t cpp,
- uint64_t tiling, uint32_t latency)
+ uint32_t latency, uint32_t plane_blocks_per_line)
{
uint32_t ret;
- uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t wm_intermediate_val;
if (latency == 0)
return UINT_MAX;
- plane_bytes_per_line = horiz_pixels * cpp;
-
- if (tiling == I915_FORMAT_MOD_Y_TILED ||
- tiling == I915_FORMAT_MOD_Yf_TILED) {
- plane_bytes_per_line *= 4;
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line /= 4;
- } else if (tiling == DRM_FORMAT_MOD_NONE) {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- } else {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
- }
-
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
plane_blocks_per_line;
@@ -3511,14 +3513,14 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
uint64_t pixel_rate;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!pstate->visible))
+ if (WARN_ON(!pstate->base.visible))
return 0;
/*
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
downscale_amount = skl_plane_downscale_amount(pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3546,14 +3548,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint8_t cpp;
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
+ uint32_t y_tile_minimum, y_min_scanlines;
- if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+ if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
- width = drm_rect_width(&intel_pstate->src) >> 16;
- height = drm_rect_height(&intel_pstate->src) >> 16;
+ width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
@@ -3561,38 +3564,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+ if (intel_rotation_90_or_270(pstate->rotation)) {
+ int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+ drm_format_plane_cpp(fb->pixel_format, 1) :
+ drm_format_plane_cpp(fb->pixel_format, 0);
+
+ switch (cpp) {
+ case 1:
+ y_min_scanlines = 16;
+ break;
+ case 2:
+ y_min_scanlines = 8;
+ break;
+ default:
+ WARN(1, "Unsupported pixel depth for rotation");
+ case 4:
+ y_min_scanlines = 4;
+ break;
+ }
+ } else {
+ y_min_scanlines = 4;
+ }
+
+ plane_bytes_per_line = width * cpp;
+ if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ plane_blocks_per_line =
+ DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
+ plane_blocks_per_line /= y_min_scanlines;
+ } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+ + 1;
+ } else {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ }
+
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
- width,
- cpp,
- fb->modifier[0],
- latency);
+ latency,
+ plane_blocks_per_line);
- plane_bytes_per_line = width * cpp;
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
- uint32_t min_scanlines = 4;
- uint32_t y_tile_minimum;
- if (intel_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
- drm_format_plane_cpp(fb->pixel_format, 1) :
- drm_format_plane_cpp(fb->pixel_format, 0);
-
- switch (cpp) {
- case 1:
- min_scanlines = 16;
- break;
- case 2:
- min_scanlines = 8;
- break;
- case 8:
- WARN(1, "Unsupported pixel depth for rotation");
- }
- }
- y_tile_minimum = plane_blocks_per_line * min_scanlines;
selected_result = max(method2, y_tile_minimum);
} else {
if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3606,10 +3622,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (level >= 1 && level <= 7) {
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
- res_lines += 4;
- else
+ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ res_blocks += y_tile_minimum;
+ res_lines += y_min_scanlines;
+ } else {
res_blocks++;
+ }
}
if (res_blocks >= ddb_allocation || res_lines > 31) {
@@ -3714,11 +3732,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (!cstate->base.active)
return 0;
- if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
+ if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- skl_pipe_pixel_rate(cstate));
+ ilk_pipe_pixel_rate(cstate));
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -3828,183 +3846,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
- const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm,
+ int plane)
{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc) {
- int i, level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = crtc->pipe;
-
- if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
- continue;
- if (!crtc->active)
- continue;
-
- I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(crtc); i++)
- I915_WRITE(PLANE_WM(pipe, i, level),
- new->plane[pipe][i][level]);
- I915_WRITE(CUR_WM(pipe, level),
- new->plane[pipe][PLANE_CURSOR][level]);
- }
- for (i = 0; i < intel_num_planes(crtc); i++)
- I915_WRITE(PLANE_WM_TRANS(pipe, i),
- new->plane_trans[pipe][i]);
- I915_WRITE(CUR_WM_TRANS(pipe),
- new->plane_trans[pipe][PLANE_CURSOR]);
-
- for (i = 0; i < intel_num_planes(crtc); i++) {
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, i),
- &new->ddb.plane[pipe][i]);
- skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, i),
- &new->ddb.y_plane[pipe][i]);
- }
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &new->ddb.plane[pipe][PLANE_CURSOR]);
+ for (level = 0; level <= max_level; level++) {
+ I915_WRITE(PLANE_WM(pipe, plane, level),
+ wm->plane[pipe][plane][level]);
}
-}
+ I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- * 1. re-allocate the pipe(s) with the allocation being reduced and not
- * overlapping with a previous light-up pipe (another way to put it is:
- * pipes with their new allocation strickly included into their old ones).
- * 2. re-allocate the other pipes that get their allocation reduced
- * 3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- * | B | C |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- * allocation
- * | A | B | C |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+ &wm->ddb.plane[pipe][plane]);
+ skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+ &wm->ddb.y_plane[pipe][plane]);
+}
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm)
{
- int plane;
-
- DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
- for_each_plane(dev_priv, pipe, plane) {
- I915_WRITE(PLANE_SURF(pipe, plane),
- I915_READ(PLANE_SURF(pipe, plane)));
+ for (level = 0; level <= max_level; level++) {
+ I915_WRITE(CUR_WM(pipe, level),
+ wm->plane[pipe][PLANE_CURSOR][level]);
}
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+ I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+ &wm->ddb.plane[pipe][PLANE_CURSOR]);
}
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
{
- uint16_t old_size, new_size;
-
- old_size = skl_ddb_entry_size(&old->pipe[pipe]);
- new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
- return old_size != new_size &&
- new->pipe[pipe].start >= old->pipe[pipe].start &&
- new->pipe[pipe].end <= old->pipe[pipe].end;
+ return new->pipe[pipe].start == old->pipe[pipe].start &&
+ new->pipe[pipe].end == old->pipe[pipe].end;
}
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
- struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
{
- struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *cur_ddb, *new_ddb;
- bool reallocated[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
- enum pipe pipe;
-
- new_ddb = &new_values->ddb;
- cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
- /*
- * First pass: flush the pipes with the new allocation contained into
- * the old space.
- *
- * We'll wait for the vblank on those pipes to ensure we can safely
- * re-allocate the freed space without this pipe fetching from it.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- pipe = crtc->pipe;
-
- if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
- continue;
-
- skl_wm_flush_pipe(dev_priv, pipe, 1);
- intel_wait_for_vblank(dev, pipe);
-
- reallocated[pipe] = true;
- }
-
+ return a->start < b->end && b->start < a->end;
+}
- /*
- * Second pass: flush the pipes that are having their allocation
- * reduced, but overlapping with a previous allocation.
- *
- * Here as well we need to wait for the vblank to make sure the freed
- * space is not used anymore.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+ const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
+{
+ struct drm_device *dev = state->dev;
+ struct intel_crtc *intel_crtc;
+ enum pipe otherp;
- pipe = crtc->pipe;
+ for_each_intel_crtc(dev, intel_crtc) {
+ otherp = intel_crtc->pipe;
- if (reallocated[pipe])
+ if (otherp == pipe)
continue;
- if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
- skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
- skl_wm_flush_pipe(dev_priv, pipe, 2);
- intel_wait_for_vblank(dev, pipe);
- reallocated[pipe] = true;
- }
+ if (skl_ddb_entries_overlap(&new->pipe[pipe],
+ &old->pipe[otherp]))
+ return true;
}
- /*
- * Third pass: flush the pipes that got more space allocated.
- *
- * We don't need to actively wait for the update here, next vblank
- * will just get more DDB space with the correct WM values.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- pipe = crtc->pipe;
-
- /*
- * At this point, only the pipes more space than before are
- * left to re-allocate.
- */
- if (reallocated[pipe])
- continue;
-
- skl_wm_flush_pipe(dev_priv, pipe, 3);
- }
+ return false;
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -4041,6 +3958,41 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
+int
+skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct drm_device *dev = state->dev;
+ struct drm_crtc *crtc = cstate->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ enum pipe pipe = intel_crtc->pipe;
+ int id;
+
+ WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
+
+ drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ id = skl_wm_plane_id(to_intel_plane(plane));
+
+ if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
+ &new_ddb->plane[pipe][id]) &&
+ skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
+ &new_ddb->y_plane[pipe][id]))
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
static int
skl_compute_ddb(struct drm_atomic_state *state)
{
@@ -4094,6 +4046,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
intel_state->wm_results.dirty_pipes = ~0;
}
+ /*
+ * We're not recomputing for the pipes not included in the commit, so
+ * make sure we start with the current state.
+ */
+ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+
for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
struct intel_crtc_state *cstate;
@@ -4105,7 +4063,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+ ret = skl_ddb_add_affected_planes(cstate);
if (ret)
return ret;
}
@@ -4206,7 +4164,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- int pipe;
+ enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
@@ -4215,15 +4173,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
mutex_lock(&dev_priv->wm.wm_mutex);
- skl_write_wm_values(dev_priv, results);
- skl_flush_wm_values(dev_priv, results);
-
/*
- * Store the new configuration (but only for the pipes that have
- * changed; the other values weren't recomputed).
+ * If this pipe isn't active already, we're going to be enabling it
+ * very soon. Since it's safe to update a pipe's ddb allocation while
+ * the pipe's shut off, just do so here. Already active pipes will have
+ * their watermarks updated once we update their planes.
*/
- for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
- skl_copy_wm_for_pipe(hw_vals, results, pipe);
+ if (crtc->state->active_changed) {
+ int plane;
+
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+ skl_write_plane_wm(intel_crtc, results, plane);
+
+ skl_write_cursor_wm(intel_crtc, results);
+ }
+
+ skl_copy_wm_for_pipe(hw_vals, results, pipe);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5103,7 +5068,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
*/
if (!(dev_priv->gt.awake &&
dev_priv->rps.enabled &&
- dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
+ dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
return;
/* Force a RPS boost (and don't count it against the client) if
@@ -5294,35 +5259,31 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
- uint32_t rp_state_cap;
- u32 ddcc_status = 0;
- int ret;
-
/* All of these values are in units of 50MHz */
- dev_priv->rps.cur_freq = 0;
+
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_BROXTON(dev_priv)) {
- rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+ u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
} else {
- rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
}
-
/* hw_max = RP0 until we check for overclocking */
- dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- ret = sandybridge_pcode_read(dev_priv,
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status);
- if (0 == ret)
+ u32 ddcc_status = 0;
+
+ if (sandybridge_pcode_read(dev_priv,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status) == 0)
dev_priv->rps.efficient_freq =
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
@@ -5332,29 +5293,26 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
- the natural hardware unit for SKL */
+ * the natural hardware unit for SKL
+ */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
}
+}
- dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+static void reset_rps(struct drm_i915_private *dev_priv,
+ void (*set)(struct drm_i915_private *, u8))
+{
+ u8 freq = dev_priv->rps.cur_freq;
- /* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_freq_softlimit == 0)
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+ /* force a reset */
+ dev_priv->rps.power = -1;
+ dev_priv->rps.cur_freq = -1;
- if (dev_priv->rps.min_freq_softlimit == 0) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dev_priv->rps.min_freq_softlimit =
- max_t(int, dev_priv->rps.efficient_freq,
- intel_freq_opcode(dev_priv, 450));
- else
- dev_priv->rps.min_freq_softlimit =
- dev_priv->rps.min_freq;
- }
+ set(dev_priv, freq);
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
@@ -5362,8 +5320,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- gen6_init_rps_frequencies(dev_priv);
-
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
/*
@@ -5393,8 +5349,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
/* Leaning on the below call to gen6_set_rps to program/setup the
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
- dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+ reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5481,9 +5436,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- /* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev_priv);
-
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
@@ -5540,8 +5492,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
/* 6: Ring frequency + overclocking (our driver does this later */
- dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+ reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5549,7 +5500,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
+ u32 rc6vids, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int ret;
@@ -5573,9 +5524,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev_priv);
-
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5626,16 +5574,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
if (ret)
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
- ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
- if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
- (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
- (pcu_mbox & 0xff) * 50);
- dev_priv->rps.max_freq = pcu_mbox & 0xff;
- }
-
- dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
+ reset_rps(dev_priv, gen6_set_rps);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -5654,7 +5593,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
int min_freq = 15;
unsigned int gpu_freq;
@@ -5738,23 +5677,13 @@ static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
}
}
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- if (!HAS_CORE_RING_FREQ(dev_priv))
- return;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- __gen6_update_ring_freq(dev_priv);
- mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev_priv)->eu_total) {
+ switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5892,8 +5821,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
u32 pcbr;
int pctx_size = 24*1024;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -5929,7 +5856,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -5937,7 +5863,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
if (WARN_ON(!dev_priv->vlv_pctx))
return;
- drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base);
+ i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
dev_priv->vlv_pctx = NULL;
}
@@ -5960,8 +5886,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
vlv_init_gpll_ref_freq(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
-
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
case 0:
@@ -5997,17 +5921,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
-
- dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
- /* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_freq_softlimit == 0)
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
- mutex_unlock(&dev_priv->rps.hw_lock);
}
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6018,8 +5931,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
vlv_init_gpll_ref_freq(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
-
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->sb_lock);
@@ -6061,17 +5972,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
dev_priv->rps.rp1_freq |
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
-
- dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
-
- /* Preserve min/max settings in case of re-init */
- if (dev_priv->rps.max_freq_softlimit == 0)
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
- if (dev_priv->rps.min_freq_softlimit == 0)
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
-
- mutex_unlock(&dev_priv->rps.hw_lock);
}
static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6162,16 +6062,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- dev_priv->rps.cur_freq = (val >> 8) & 0xff;
- DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq);
-
- DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
- dev_priv->rps.idle_freq);
-
- valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+ reset_rps(dev_priv, valleyview_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6251,16 +6142,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- dev_priv->rps.cur_freq = (val >> 8) & 0xff;
- DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
- dev_priv->rps.cur_freq);
-
- DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
- dev_priv->rps.idle_freq);
-
- valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
+ reset_rps(dev_priv, valleyview_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6589,19 +6471,11 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- struct drm_i915_private *dev_priv;
- struct intel_engine_cs *engine;
bool ret = false;
spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- for_each_engine(engine, dev_priv)
- ret |= !list_empty(&engine->request_list);
-
-out_unlock:
+ if (i915_mch_dev)
+ ret = i915_mch_dev->gt.awake;
spin_unlock_irq(&mchdev_lock);
return ret;
@@ -6757,10 +6631,51 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_runtime_pm_get(dev_priv);
}
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ /* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_init_gt_powersave(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_init_rps_frequencies(dev_priv);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
+ dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ dev_priv->rps.min_freq_softlimit =
+ max_t(int,
+ dev_priv->rps.efficient_freq,
+ intel_freq_opcode(dev_priv, 450));
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
+ u32 params = 0;
+
+ sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
+ if (params & BIT(31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (dev_priv->rps.max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ dev_priv->rps.max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ intel_autoenable_gt_powersave(dev_priv);
}
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -6772,13 +6687,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
-static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
-{
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- gen6_disable_rps_interrupts(dev_priv);
-}
-
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev_priv: i915 device
@@ -6792,60 +6700,76 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- gen6_suspend_rps(dev_priv);
+ if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+ intel_runtime_pm_put(dev_priv);
- /* Force GPU to min freq during suspend */
- gen6_rps_idle(dev_priv);
+ /* gen6_rps_idle() will be called later to disable interrupts */
+}
+
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
+{
+ dev_priv->rps.enabled = true; /* force disabling */
+ intel_disable_gt_powersave(dev_priv);
+
+ gen6_reset_rps_interrupts(dev_priv);
}
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_disable_drps(dev_priv);
- } else if (INTEL_INFO(dev_priv)->gen >= 6) {
- intel_suspend_gt_powersave(dev_priv);
+ if (!READ_ONCE(dev_priv->rps.enabled))
+ return;
- mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev_priv)->gen >= 9) {
- gen9_disable_rc6(dev_priv);
- gen9_disable_rps(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rps(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rps(dev_priv);
- else
- gen6_disable_rps(dev_priv);
+ mutex_lock(&dev_priv->rps.hw_lock);
- dev_priv->rps.enabled = false;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ gen9_disable_rc6(dev_priv);
+ gen9_disable_rps(dev_priv);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_disable_rps(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ valleyview_disable_rps(dev_priv);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ gen6_disable_rps(dev_priv);
+ } else if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_disable_drps(dev_priv);
}
+
+ dev_priv->rps.enabled = false;
+ mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void intel_gen6_powersave_work(struct work_struct *work)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- rps.delayed_resume_work.work);
+ /* We shouldn't be disabling as we submit, so this should be less
+ * racy than it appears!
+ */
+ if (READ_ONCE(dev_priv->rps.enabled))
+ return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev_priv))
+ return;
- gen6_reset_rps_interrupts(dev_priv);
+ mutex_lock(&dev_priv->rps.hw_lock);
if (IS_CHERRYVIEW(dev_priv)) {
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
- } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ } else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- __gen6_update_ring_freq(dev_priv);
+ gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
- __gen6_update_ring_freq(dev_priv);
- } else {
+ gen6_update_ring_freq(dev_priv);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
gen6_enable_rps(dev_priv);
- __gen6_update_ring_freq(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ } else if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_enable_drps(dev_priv);
+ intel_init_emon(dev_priv);
}
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6855,25 +6779,52 @@ static void intel_gen6_powersave_work(struct work_struct *work)
WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
dev_priv->rps.enabled = true;
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void __intel_autoenable_gt_powersave(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+ struct intel_engine_cs *rcs;
+ struct drm_i915_gem_request *req;
- gen6_enable_rps_interrupts(dev_priv);
+ if (READ_ONCE(dev_priv->rps.enabled))
+ goto out;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ rcs = &dev_priv->engine[RCS];
+ if (rcs->last_context)
+ goto out;
+
+ if (!rcs->init_context)
+ goto out;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
+ if (IS_ERR(req))
+ goto unlock;
+
+ if (!i915.enable_execlists && i915_switch_context(req) == 0)
+ rcs->init_context(req);
+
+ /* Mark the device busy, calling intel_enable_gt_powersave() */
+ i915_add_request_no_flush(req);
+
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+out:
intel_runtime_pm_put(dev_priv);
}
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
{
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
+ if (READ_ONCE(dev_priv->rps.enabled))
return;
if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_init_emon(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
@@ -6887,21 +6838,13 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
* paths, so the _noresume version is enough (and in case of
* runtime resume it's necessary).
*/
- if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
- round_jiffies_up_relative(HZ)))
+ if (queue_delayed_work(dev_priv->wq,
+ &dev_priv->rps.autoenable_work,
+ round_jiffies_up_relative(HZ)))
intel_runtime_pm_get_noresume(dev_priv);
}
}
-void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (INTEL_INFO(dev_priv)->gen < 6)
- return;
-
- gen6_suspend_rps(dev_priv);
- dev_priv->rps.enabled = false;
-}
-
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -8046,7 +7989,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
if (!i915_gem_request_completed(req))
gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
- i915_gem_request_unreference(req);
+ i915_gem_request_put(req);
kfree(boost);
}
@@ -8064,8 +8007,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
if (boost == NULL)
return;
- i915_gem_request_reference(req);
- boost->req = req;
+ boost->req = i915_gem_request_get(req);
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(req->i915->wq, &boost->work);
@@ -8078,11 +8020,9 @@ void intel_pm_setup(struct drm_device *dev)
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
- INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
- intel_gen6_powersave_work);
+ INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+ __intel_autoenable_gt_powersave);
INIT_LIST_HEAD(&dev_priv->rps.clients);
- INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
- INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index cf171b4b8c67..108ba1e5d658 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -645,9 +645,8 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void intel_psr_exit(struct drm_device *dev)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -656,7 +655,7 @@ static void intel_psr_exit(struct drm_device *dev)
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev)) {
+ if (HAS_DDI(dev_priv)) {
val = I915_READ(EDP_PSR_CTL);
WARN_ON(!(val & EDP_PSR_ENABLE));
@@ -691,7 +690,7 @@ static void intel_psr_exit(struct drm_device *dev)
/**
* intel_psr_single_frame_update - Single Frame Update
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Some platforms support a single frame update feature that is used to
@@ -699,10 +698,9 @@ static void intel_psr_exit(struct drm_device *dev)
* So far it is only implemented for Valleyview and Cherryview because
* hardware requires this to be done before a page flip.
*/
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
u32 val;
@@ -711,7 +709,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
* Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed.
*/
- if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -737,7 +735,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
/**
* intel_psr_invalidate - Invalidade PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
@@ -747,10 +745,9 @@ void intel_psr_single_frame_update(struct drm_device *dev,
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -767,14 +764,14 @@ void intel_psr_invalidate(struct drm_device *dev,
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
- intel_psr_exit(dev);
+ intel_psr_exit(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
}
/**
* intel_psr_flush - Flush PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -785,10 +782,9 @@ void intel_psr_invalidate(struct drm_device *dev,
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -806,7 +802,7 @@ void intel_psr_flush(struct drm_device *dev,
/* By definition flush = invalidate + flush */
if (frontbuffer_bits)
- intel_psr_exit(dev);
+ intel_psr_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
if (!work_busy(&dev_priv->psr.work.work))
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 5bd69852752c..08f6fea05a2c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,12 +24,13 @@
#ifndef _INTEL_RENDERSTATE_H
#define _INTEL_RENDERSTATE_H
-#include "i915_drv.h"
+#include <linux/types.h>
-extern const struct intel_renderstate_rodata gen6_null_state;
-extern const struct intel_renderstate_rodata gen7_null_state;
-extern const struct intel_renderstate_rodata gen8_null_state;
-extern const struct intel_renderstate_rodata gen9_null_state;
+struct intel_renderstate_rodata {
+ const u32 *reloc;
+ const u32 *batch;
+ const u32 batch_items;
+};
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
@@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state;
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
#endif /* INTEL_RENDERSTATE_H */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d3161bbea24..ed9955dce156 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,57 +47,44 @@ int __intel_ring_space(int head, int tail, int size)
return space - I915_RING_FREE_SPACE;
}
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ring)
{
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
}
- ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
- ringbuf->tail, ringbuf->size);
-}
-
-static void __intel_ring_advance(struct intel_engine_cs *engine)
-{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- ringbuf->tail &= ringbuf->size - 1;
- engine->write_tail(engine, ringbuf->tail);
+ ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+ ring->tail, ring->size);
}
static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains,
- u32 flush_domains)
+gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
cmd = MI_FLUSH;
- if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
- cmd |= MI_NO_WRITE_FLUSH;
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine, cmd);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains,
- u32 flush_domains)
+gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
u32 cmd;
int ret;
@@ -129,23 +116,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
* are flushed at any MI_FLUSH.
*/
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd = MI_FLUSH;
+ if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
-
- if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(req->i915) || IS_GEN5(req->i915)))
- cmd |= MI_INVALIDATE_ISP;
+ if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+ cmd |= MI_INVALIDATE_ISP;
+ }
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine, cmd);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -190,45 +174,46 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_ring *ring = req->ring;
+ u32 scratch_addr =
+ i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(engine, 0); /* low dword */
- intel_ring_emit(engine, 0); /* high dword */
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0); /* low dword */
+ intel_ring_emit(ring, 0); /* high dword */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
+ u32 scratch_addr =
+ i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -240,7 +225,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
* number of bits based on the write domains has little performance
* impact.
*/
- if (flush_domains) {
+ if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
@@ -249,7 +234,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
*/
flags |= PIPE_CONTROL_CS_STALL;
}
- if (invalidate_domains) {
+ if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -266,11 +251,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(engine, flags);
- intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(engine, 0);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
return 0;
}
@@ -278,30 +263,31 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, 0);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains, u32 flush_domains)
+gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
+ u32 scratch_addr =
+ i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/*
@@ -318,13 +304,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
* number of bits based on the write domains has little performance
* impact.
*/
- if (flush_domains) {
+ if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
- if (invalidate_domains) {
+ if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -350,11 +336,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(engine, flags);
- intel_ring_emit(engine, scratch_addr);
- intel_ring_emit(engine, 0);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
return 0;
}
@@ -363,41 +349,41 @@ static int
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(engine, flags);
- intel_ring_emit(engine, scratch_addr);
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, 0);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains, u32 flush_domains)
+gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
+ u32 scratch_addr =
+ i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
u32 flags = 0;
- u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
- if (flush_domains) {
+ if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
- if (invalidate_domains) {
+ if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -419,14 +405,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-static void ring_write_tail(struct intel_engine_cs *engine,
- u32 value)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_TAIL(engine, value);
-}
-
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
@@ -488,7 +467,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
+ I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
/*
@@ -519,7 +498,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- if (!IS_GEN2(dev_priv)) {
+ if (INTEL_GEN(dev_priv) > 2) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(dev_priv,
RING_MI_MODE(engine->mmio_base),
@@ -539,9 +518,9 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_CTL(engine, 0);
I915_WRITE_HEAD(engine, 0);
- engine->write_tail(engine, 0);
+ I915_WRITE_TAIL(engine, 0);
- if (!IS_GEN2(dev_priv)) {
+ if (INTEL_GEN(dev_priv) > 2) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -549,16 +528,10 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-}
-
static int init_ring_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ringbuffer *ringbuf = engine->buffer;
- struct drm_i915_gem_object *obj = ringbuf->obj;
+ struct intel_ring *ring = engine->buffer;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -586,10 +559,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
}
}
- if (I915_NEED_GFX_HWS(dev_priv))
- intel_ring_setup_status_page(engine);
- else
+ if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
+ else
+ intel_ring_setup_status_page(engine);
+
+ intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -598,40 +573,39 @@ static int init_ring_common(struct intel_engine_cs *engine)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(engine))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
engine->name, I915_READ_HEAD(engine));
- I915_WRITE_HEAD(engine, 0);
- (void)I915_READ_HEAD(engine);
+
+ intel_ring_update_space(ring);
+ I915_WRITE_HEAD(engine, ring->head);
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine,
- ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
- I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
- (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
+ if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 50)) {
DRM_ERROR("%s initialization failed "
- "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
I915_READ_CTL(engine),
I915_READ_CTL(engine) & RING_VALID,
- I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+ I915_READ_HEAD(engine), ring->head,
+ I915_READ_TAIL(engine), ring->tail,
I915_READ_START(engine),
- (unsigned long)i915_gem_obj_ggtt_offset(obj));
+ i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
- ringbuf->last_retired_head = -1;
- ringbuf->head = I915_READ_HEAD(engine);
- ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
- intel_ring_update_space(ringbuf);
-
intel_engine_init_hangcheck(engine);
out:
@@ -640,59 +614,25 @@ out:
return ret;
}
-void intel_fini_pipe_control(struct intel_engine_cs *engine)
+static void reset_ring_common(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
{
- if (engine->scratch.obj == NULL)
- return;
-
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
- drm_gem_object_unreference(&engine->scratch.obj->base);
- engine->scratch.obj = NULL;
-}
-
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
-{
- struct drm_i915_gem_object *obj;
- int ret;
-
- WARN_ON(engine->scratch.obj);
-
- obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
- if (!obj)
- obj = i915_gem_object_create(&engine->i915->drm, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- ret = PTR_ERR(obj);
- goto err;
- }
-
- ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
- if (ret)
- goto err_unref;
+ struct intel_ring *ring = request->ring;
- engine->scratch.obj = obj;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
- DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- engine->name, engine->scratch.gtt_offset);
- return 0;
-
-err_unref:
- drm_gem_object_unreference(&engine->scratch.obj->base);
-err:
- return ret;
+ ring->head = request->postfix;
+ ring->last_retired_head = -1;
}
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
int ret, i;
if (w->count == 0)
return 0;
- engine->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
@@ -700,17 +640,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(engine, w->reg[i].addr);
- intel_ring_emit(engine, w->reg[i].value);
+ intel_ring_emit_reg(ring, w->reg[i].addr);
+ intel_ring_emit(ring, w->reg[i].value);
}
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
- engine->gpu_caches_dirty = true;
- ret = intel_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
@@ -1022,7 +961,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
continue;
/*
@@ -1031,7 +970,7 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(dev_priv->info.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
@@ -1329,191 +1268,194 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- if (dev_priv->semaphore_obj) {
- i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
- drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
- dev_priv->semaphore_obj = NULL;
- }
-
- intel_fini_pipe_control(engine);
+ i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
- unsigned int num_dwords)
+static int gen8_rcs_signal(struct drm_i915_gem_request *req)
{
-#define MBOX_UPDATE_DWORDS 8
- struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_i915_private *dev_priv = signaller_req->i915;
+ struct intel_ring *ring = req->ring;
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
- num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
- ret = intel_ring_begin(signaller_req, num_dwords);
+ num_rings = INTEL_INFO(dev_priv)->num_rings;
+ ret = intel_ring_begin(req, (num_rings-1) * 8);
if (ret)
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+ u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- intel_ring_emit(signaller, lower_32_bits(gtt_offset));
- intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller_req->seqno);
- intel_ring_emit(signaller, 0);
- intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(signaller, 0);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring,
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ intel_ring_emit(ring, lower_32_bits(gtt_offset));
+ intel_ring_emit(ring, upper_32_bits(gtt_offset));
+ intel_ring_emit(ring, req->fence.seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ intel_ring_emit(ring, 0);
}
+ intel_ring_advance(ring);
return 0;
}
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
- unsigned int num_dwords)
+static int gen8_xcs_signal(struct drm_i915_gem_request *req)
{
-#define MBOX_UPDATE_DWORDS 6
- struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_i915_private *dev_priv = signaller_req->i915;
+ struct intel_ring *ring = req->ring;
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
- num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
- ret = intel_ring_begin(signaller_req, num_dwords);
+ num_rings = INTEL_INFO(dev_priv)->num_rings;
+ ret = intel_ring_begin(req, (num_rings-1) * 6);
if (ret)
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+ u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
- MI_FLUSH_DW_OP_STOREDW);
- intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller_req->seqno);
- intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- intel_ring_emit(signaller, 0);
+ intel_ring_emit(ring,
+ (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(ring,
+ lower_32_bits(gtt_offset) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, upper_32_bits(gtt_offset));
+ intel_ring_emit(ring, req->fence.seqno);
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
+ intel_ring_emit(ring, 0);
}
+ intel_ring_advance(ring);
return 0;
}
-static int gen6_signal(struct drm_i915_gem_request *signaller_req,
- unsigned int num_dwords)
+static int gen6_signal(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_i915_private *dev_priv = signaller_req->i915;
- struct intel_engine_cs *useless;
- enum intel_engine_id id;
+ struct intel_ring *ring = req->ring;
+ struct drm_i915_private *dev_priv = req->i915;
+ struct intel_engine_cs *engine;
int ret, num_rings;
-#define MBOX_UPDATE_DWORDS 3
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
- num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-#undef MBOX_UPDATE_DWORDS
-
- ret = intel_ring_begin(signaller_req, num_dwords);
+ num_rings = INTEL_INFO(dev_priv)->num_rings;
+ ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
if (ret)
return ret;
- for_each_engine_id(useless, dev_priv, id) {
- i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
+ for_each_engine(engine, dev_priv) {
+ i915_reg_t mbox_reg;
+ if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
+ continue;
+
+ mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(signaller, mbox_reg);
- intel_ring_emit(signaller, signaller_req->seqno);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(ring, mbox_reg);
+ intel_ring_emit(ring, req->fence.seqno);
}
}
/* If num_dwords was rounded, make sure the tail pointer is correct */
if (num_rings % 2 == 0)
- intel_ring_emit(signaller, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = request->i915;
+
+ I915_WRITE_TAIL(request->engine,
+ intel_ring_offset(request->ring, request->tail));
+}
+
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
+{
+ struct intel_ring *ring = req->ring;
+ int ret;
+
+ ret = intel_ring_begin(req, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, req->fence.seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ req->tail = ring->tail;
return 0;
}
/**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_request - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
int ret;
- if (engine->semaphore.signal)
- ret = engine->semaphore.signal(req, 4);
- else
- ret = intel_ring_begin(req, 4);
-
+ ret = req->engine->semaphore.signal(req);
if (ret)
return ret;
- intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
- intel_ring_emit(engine,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(engine, req->seqno);
- intel_ring_emit(engine, MI_USER_INTERRUPT);
- __intel_ring_advance(engine);
-
- return 0;
+ return i9xx_emit_request(req);
}
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
- if (engine->semaphore.signal)
- ret = engine->semaphore.signal(req, 8);
- else
- ret = intel_ring_begin(req, 8);
+ if (engine->semaphore.signal) {
+ ret = engine->semaphore.signal(req);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_ring_begin(req, 8);
if (ret)
return ret;
- intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_ring_emit(ring, intel_hws_seqno_address(engine));
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, i915_gem_request_get_seqno(req));
/* We're thrashing one dword of HWS. */
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, MI_USER_INTERRUPT);
- intel_ring_emit(engine, MI_NOOP);
- __intel_ring_advance(engine);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
- return 0;
-}
+ req->tail = ring->tail;
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
- u32 seqno)
-{
- return dev_priv->last_seqno < seqno;
+ return 0;
}
/**
@@ -1525,82 +1467,71 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
*/
static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
- struct intel_engine_cs *signaller,
- u32 seqno)
+gen8_ring_sync_to(struct drm_i915_gem_request *req,
+ struct drm_i915_gem_request *signal)
{
- struct intel_engine_cs *waiter = waiter_req->engine;
- struct drm_i915_private *dev_priv = waiter_req->i915;
- u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+ struct intel_ring *ring = req->ring;
+ struct drm_i915_private *dev_priv = req->i915;
+ u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
struct i915_hw_ppgtt *ppgtt;
int ret;
- ret = intel_ring_begin(waiter_req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, lower_32_bits(offset));
- intel_ring_emit(waiter, upper_32_bits(offset));
- intel_ring_advance(waiter);
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_SAD_GTE_SDD);
+ intel_ring_emit(ring, signal->fence.seqno);
+ intel_ring_emit(ring, lower_32_bits(offset));
+ intel_ring_emit(ring, upper_32_bits(offset));
+ intel_ring_advance(ring);
/* When the !RCS engines idle waiting upon a semaphore, they lose their
* pagetables and we must reload them before executing the batch.
* We do this on the i915_switch_context() following the wait and
* before the dispatch.
*/
- ppgtt = waiter_req->ctx->ppgtt;
- if (ppgtt && waiter_req->engine->id != RCS)
- ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+ ppgtt = req->ctx->ppgtt;
+ if (ppgtt && req->engine->id != RCS)
+ ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
return 0;
}
static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
- struct intel_engine_cs *signaller,
- u32 seqno)
+gen6_ring_sync_to(struct drm_i915_gem_request *req,
+ struct drm_i915_gem_request *signal)
{
- struct intel_engine_cs *waiter = waiter_req->engine;
+ struct intel_ring *ring = req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+ u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
int ret;
- /* Throughout all of the GEM code, seqno passed implies our current
- * seqno is >= the last seqno executed. However for hardware the
- * comparison is strictly greater than.
- */
- seqno -= 1;
-
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(waiter_req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- /* If seqno wrap happened, omit the wait with no-ops */
- if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
- intel_ring_emit(waiter, dw1 | wait_mbox);
- intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter, 0);
- intel_ring_emit(waiter, MI_NOOP);
- } else {
- intel_ring_emit(waiter, MI_NOOP);
- intel_ring_emit(waiter, MI_NOOP);
- intel_ring_emit(waiter, MI_NOOP);
- intel_ring_emit(waiter, MI_NOOP);
- }
- intel_ring_advance(waiter);
+ intel_ring_emit(ring, dw1 | wait_mbox);
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ intel_ring_emit(ring, signal->fence.seqno - 1);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static void
-gen5_seqno_barrier(struct intel_engine_cs *ring)
+gen5_seqno_barrier(struct intel_engine_cs *engine)
{
/* MI_STORE are internally buffered by the GPU and not flushed
* either by MI_FLUSH or SyncFlush or any other combination of
@@ -1693,40 +1624,18 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
}
static int
-bsd_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate_domains,
- u32 flush_domains)
+bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine, MI_FLUSH);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
- return 0;
-}
-
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
- intel_ring_emit(engine,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(engine, req->seqno);
- intel_ring_emit(engine, MI_USER_INTERRUPT);
- __intel_ring_advance(engine);
-
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
@@ -1788,24 +1697,24 @@ gen8_irq_disable(struct intel_engine_cs *engine)
}
static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 length,
- unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(engine, offset);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
@@ -1815,12 +1724,12 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
- u32 cs_offset = engine->scratch.gtt_offset;
+ struct intel_ring *ring = req->ring;
+ u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
int ret;
ret = intel_ring_begin(req, 6);
@@ -1828,13 +1737,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* Evict the invalid PTE TLBs */
- intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(engine, cs_offset);
- intel_ring_emit(engine, 0xdeadbeef);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 0xdeadbeef);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
@@ -1848,17 +1757,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(engine,
+ intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(ring,
BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(engine, cs_offset);
- intel_ring_emit(engine, 4096);
- intel_ring_emit(engine, offset);
+ intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+ intel_ring_emit(ring, cs_offset);
+ intel_ring_emit(ring, 4096);
+ intel_ring_emit(ring, offset);
- intel_ring_emit(engine, MI_FLUSH);
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
/* ... and execute it. */
offset = cs_offset;
@@ -1868,30 +1777,30 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
return 0;
}
static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(engine);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(ring);
return 0;
}
@@ -1909,79 +1818,79 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- obj = engine->status_page.obj;
- if (obj == NULL)
+ vma = fetch_and_zero(&engine->status_page.vma);
+ if (!vma)
return;
- kunmap(sg_page(obj->pages->sgl));
- i915_gem_object_ggtt_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- engine->status_page.obj = NULL;
+ i915_vma_unpin(vma);
+ i915_gem_object_unpin_map(vma->obj);
+ i915_vma_put(vma);
}
static int init_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj = engine->status_page.obj;
-
- if (obj == NULL) {
- unsigned flags;
- int ret;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags;
+ int ret;
- obj = i915_gem_object_create(&engine->i915->drm, 4096);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
- return PTR_ERR(obj);
- }
+ obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return PTR_ERR(obj);
+ }
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
-
- flags = 0;
- if (!HAS_LLC(engine->i915))
- /* On g33, we cannot place HWS above 256MiB, so
- * restrict its pinning to the low mappable arena.
- * Though this restriction is not documented for
- * gen4, gen5, or byt, they also behave similarly
- * and hang if the HWS is placed at the top of the
- * GTT. To generalise, it appears that all !llc
- * platforms have issues with us placing the HWS
- * above the mappable region (even though we never
- * actualy map it).
- */
- flags |= PIN_MAPPABLE;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
- if (ret) {
-err_unref:
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err;
- engine->status_page.obj = obj;
+ vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
}
- engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
- engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
- memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+ flags = PIN_GLOBAL;
+ if (!HAS_LLC(engine->i915))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actualy map it).
+ */
+ flags |= PIN_MAPPABLE;
+ ret = i915_vma_pin(vma, 0, 4096, flags);
+ if (ret)
+ goto err;
- DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- engine->name, engine->status_page.gfx_addr);
+ engine->status_page.vma = vma;
+ engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
+ engine->status_page.page_addr =
+ i915_gem_object_pin_map(obj, I915_MAP_WB);
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ engine->name, i915_ggtt_offset(vma));
return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return ret;
}
static int init_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- if (!dev_priv->status_page_dmah) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
- if (!dev_priv->status_page_dmah)
- return -ENOMEM;
- }
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(engine->status_page.page_addr, 0, PAGE_SIZE);
@@ -1989,115 +1898,105 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(ringbuf->vma == NULL);
- GEM_BUG_ON(ringbuf->virtual_start == NULL);
-
- if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
- i915_gem_object_unpin_map(ringbuf->obj);
- else
- i915_vma_unpin_iomap(ringbuf->vma);
- ringbuf->virtual_start = NULL;
-
- i915_gem_object_ggtt_unpin(ringbuf->obj);
- ringbuf->vma = NULL;
-}
-
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
- struct intel_ringbuffer *ringbuf)
+int intel_ring_pin(struct intel_ring *ring)
{
- struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- unsigned flags = PIN_OFFSET_BIAS | 4096;
+ unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
+ enum i915_map_type map;
+ struct i915_vma *vma = ring->vma;
void *addr;
int ret;
- if (HAS_LLC(dev_priv) && !obj->stolen) {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
- if (ret)
- return ret;
+ GEM_BUG_ON(ring->vaddr);
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- goto err_unpin;
+ map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
- addr = i915_gem_object_pin_map(obj);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_unpin;
- }
- } else {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
- flags | PIN_MAPPABLE);
- if (ret)
- return ret;
+ if (vma->obj->stolen)
+ flags |= PIN_MAPPABLE;
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto err_unpin;
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ else
+ ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
+ if (unlikely(ret))
+ return ret;
+ }
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(dev_priv);
+ ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+ if (unlikely(ret))
+ return ret;
- addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_unpin;
- }
- }
+ if (i915_vma_is_map_and_fenceable(vma))
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ else
+ addr = i915_gem_object_pin_map(vma->obj, map);
+ if (IS_ERR(addr))
+ goto err;
- ringbuf->virtual_start = addr;
- ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+ ring->vaddr = addr;
return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(obj);
- return ret;
+err:
+ i915_vma_unpin(vma);
+ return PTR_ERR(addr);
}
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_ring_unpin(struct intel_ring *ring)
{
- drm_gem_object_unreference(&ringbuf->obj->base);
- ringbuf->obj = NULL;
+ GEM_BUG_ON(!ring->vma);
+ GEM_BUG_ON(!ring->vaddr);
+
+ if (i915_vma_is_map_and_fenceable(ring->vma))
+ i915_vma_unpin_iomap(ring->vma);
+ else
+ i915_gem_object_unpin_map(ring->vma->obj);
+ ring->vaddr = NULL;
+
+ i915_vma_unpin(ring->vma);
}
-static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
- struct intel_ringbuffer *ringbuf)
+static struct i915_vma *
+intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
- obj = NULL;
- if (!HAS_LLC(dev))
- obj = i915_gem_object_create_stolen(dev, ringbuf->size);
- if (obj == NULL)
- obj = i915_gem_object_create(dev, ringbuf->size);
+ obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
+ if (!obj)
+ obj = i915_gem_object_create(&dev_priv->drm, size);
if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- ringbuf->obj = obj;
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err;
- return 0;
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
}
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
- struct intel_ringbuffer *ring;
- int ret;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (ring == NULL) {
- DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
- engine->name);
+ if (!ring)
return ERR_PTR(-ENOMEM);
- }
ring->engine = engine;
- list_add(&ring->link, &engine->buffers);
+
+ INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
@@ -2111,23 +2010,20 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
ring->last_retired_head = -1;
intel_ring_update_space(ring);
- ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
- if (ret) {
- DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
- engine->name, ret);
- list_del(&ring->link);
+ vma = intel_ring_create_vma(engine->i915, size);
+ if (IS_ERR(vma)) {
kfree(ring);
- return ERR_PTR(ret);
+ return ERR_CAST(vma);
}
+ ring->vma = vma;
return ring;
}
void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
{
- intel_destroy_ringbuffer_obj(ring);
- list_del(&ring->link);
+ i915_vma_put(ring->vma);
kfree(ring);
}
@@ -2143,7 +2039,12 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+ ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
+ if (ret)
+ goto error;
+
+ ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
+ PIN_GLOBAL | PIN_HIGH);
if (ret)
goto error;
}
@@ -2158,7 +2059,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
if (ctx == ctx->i915->kernel_context)
ce->initialised = true;
- i915_gem_context_reference(ctx);
+ i915_gem_context_get(ctx);
return 0;
error:
@@ -2177,30 +2078,25 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
return;
if (ce->state)
- i915_gem_object_ggtt_unpin(ce->state);
+ i915_vma_unpin(ce->state);
- i915_gem_context_unreference(ctx);
+ i915_gem_context_put(ctx);
}
-static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_engine_cs *engine)
+static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_ring *ring;
int ret;
WARN_ON(engine->buffer);
- engine->i915 = dev_priv;
- INIT_LIST_HEAD(&engine->active_list);
- INIT_LIST_HEAD(&engine->request_list);
- INIT_LIST_HEAD(&engine->execlist_queue);
- INIT_LIST_HEAD(&engine->buffers);
- i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ intel_engine_setup_common(engine);
+
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
- ret = intel_engine_init_breadcrumbs(engine);
+ ret = intel_engine_init_common(engine);
if (ret)
goto error;
@@ -2215,44 +2111,38 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
- ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
- if (IS_ERR(ringbuf)) {
- ret = PTR_ERR(ringbuf);
+ ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
goto error;
}
- engine->buffer = ringbuf;
- if (I915_NEED_GFX_HWS(dev_priv)) {
- ret = init_status_page(engine);
+ if (HWS_NEEDS_PHYSICAL(dev_priv)) {
+ WARN_ON(engine->id != RCS);
+ ret = init_phys_status_page(engine);
if (ret)
goto error;
} else {
- WARN_ON(engine->id != RCS);
- ret = init_phys_status_page(engine);
+ ret = init_status_page(engine);
if (ret)
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+ ret = intel_ring_pin(ring);
if (ret) {
- DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- engine->name, ret);
- intel_destroy_ringbuffer_obj(ringbuf);
+ intel_ring_free(ring);
goto error;
}
-
- ret = i915_cmd_parser_init_ring(engine);
- if (ret)
- goto error;
+ engine->buffer = ring;
return 0;
error:
- intel_cleanup_engine(engine);
+ intel_engine_cleanup(engine);
return ret;
}
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
@@ -2262,49 +2152,39 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
- intel_stop_engine(engine);
- WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+ (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_unpin_ringbuffer_obj(engine->buffer);
- intel_ringbuffer_free(engine->buffer);
+ intel_ring_unpin(engine->buffer);
+ intel_ring_free(engine->buffer);
engine->buffer = NULL;
}
if (engine->cleanup)
engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(dev_priv)) {
- cleanup_status_page(engine);
- } else {
+ if (HWS_NEEDS_PHYSICAL(dev_priv)) {
WARN_ON(engine->id != RCS);
cleanup_phys_status_page(engine);
+ } else {
+ cleanup_status_page(engine);
}
- i915_cmd_parser_fini_ring(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
- intel_engine_fini_breadcrumbs(engine);
+ intel_engine_cleanup_common(engine);
intel_ring_context_unpin(dev_priv->kernel_context, engine);
engine->i915 = NULL;
}
-int intel_engine_idle(struct intel_engine_cs *engine)
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_gem_request *req;
+ struct intel_engine_cs *engine;
- /* Wait upon the last request to be completed */
- if (list_empty(&engine->request_list))
- return 0;
-
- req = list_entry(engine->request_list.prev,
- struct drm_i915_gem_request,
- list);
-
- /* Make sure we do not trigger any retires */
- return __i915_wait_request(req,
- req->i915->mm.interruptible,
- NULL, NULL);
+ for_each_engine(engine, dev_priv) {
+ engine->buffer->head = engine->buffer->tail;
+ engine->buffer->last_retired_head = -1;
+ }
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2317,7 +2197,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- request->ringbuf = request->engine->buffer;
+ request->ring = request->engine->buffer;
ret = intel_ring_begin(request, 0);
if (ret)
@@ -2329,12 +2209,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
+ int ret;
- intel_ring_update_space(ringbuf);
- if (ringbuf->space >= bytes)
+ intel_ring_update_space(ring);
+ if (ring->space >= bytes)
return 0;
/*
@@ -2348,35 +2228,37 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
*/
GEM_BUG_ON(!req->reserved_space);
- list_for_each_entry(target, &engine->request_list, list) {
+ list_for_each_entry(target, &ring->request_list, ring_link) {
unsigned space;
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ringbuf != ringbuf)
- continue;
-
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
+ space = __intel_ring_space(target->postfix, ring->tail,
+ ring->size);
if (space >= bytes)
break;
}
- if (WARN_ON(&target->list == &engine->request_list))
+ if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
- return i915_wait_request(target);
+ ret = i915_wait_request(target,
+ I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ NULL, NO_WAITBOOST);
+ if (ret)
+ return ret;
+
+ i915_gem_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
}
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ring *ring = req->ring;
+ int remain_actual = ring->size - ring->tail;
+ int remain_usable = ring->effective_size - ring->tail;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
@@ -2403,37 +2285,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
wait_bytes = total_bytes;
}
- if (wait_bytes > ringbuf->space) {
+ if (wait_bytes > ring->space) {
int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
-
- intel_ring_update_space(ringbuf);
- if (unlikely(ringbuf->space < wait_bytes))
- return -EAGAIN;
}
if (unlikely(need_wrap)) {
- GEM_BUG_ON(remain_actual > ringbuf->space);
- GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+ GEM_BUG_ON(remain_actual > ring->space);
+ GEM_BUG_ON(ring->tail + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ringbuf->virtual_start + ringbuf->tail,
- 0, remain_actual);
- ringbuf->tail = 0;
- ringbuf->space -= remain_actual;
+ memset(ring->vaddr + ring->tail, 0, remain_actual);
+ ring->tail = 0;
+ ring->space -= remain_actual;
}
- ringbuf->space -= bytes;
- GEM_BUG_ON(ringbuf->space < 0);
+ ring->space -= bytes;
+ GEM_BUG_ON(ring->space < 0);
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->engine;
- int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ struct intel_ring *ring = req->ring;
+ int num_dwords =
+ (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
@@ -2445,61 +2323,16 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
return ret;
while (num_dwords--)
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
return 0;
}
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- /* Our semaphore implementation is strictly monotonic (i.e. we proceed
- * so long as the semaphore value in the register/page is greater
- * than the sync value), so whenever we reset the seqno,
- * so long as we reset the tracking semaphore value to 0, it will
- * always be before the next request's seqno. If we don't reset
- * the semaphore value, then when the seqno moves backwards all
- * future waits will complete instantly (causing rendering corruption).
- */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
- if (HAS_VEBOX(dev_priv))
- I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
- }
- if (dev_priv->semaphore_obj) {
- struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
- struct page *page = i915_gem_object_get_dirty_page(obj, 0);
- void *semaphores = kmap(page);
- memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
- 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap(page);
- }
- memset(engine->semaphore.sync_seqno, 0,
- sizeof(engine->semaphore.sync_seqno));
-
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
- engine->last_submitted_seqno = seqno;
-
- engine->hangcheck.seqno = seqno;
-
- /* After manually advancing the seqno, fake the interrupt in case
- * there are any waiters for that seqno.
- */
- rcu_read_lock();
- intel_engine_wakeup(engine);
- rcu_read_unlock();
-}
-
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
- u32 value)
+static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *dev_priv = request->i915;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -2523,8 +2356,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
- I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
- POSTING_READ_FW(RING_TAIL(engine->mmio_base));
+ i9xx_submit_request(request);
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
@@ -2535,10 +2367,9 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
@@ -2563,30 +2394,29 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
- if (invalidate & I915_GEM_GPU_DOMAINS)
+ if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(engine, cmd);
- intel_ring_emit(engine,
- I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(engine, 0); /* upper addr */
- intel_ring_emit(engine, 0); /* value */
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
} else {
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
}
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
- bool ppgtt = USES_PPGTT(engine->dev) &&
+ struct intel_ring *ring = req->ring;
+ bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
@@ -2595,71 +2425,70 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(engine, lower_32_bits(offset));
- intel_ring_emit(engine, upper_32_bits(offset));
- intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, lower_32_bits(offset));
+ intel_ring_emit(ring, upper_32_bits(offset));
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
return 0;
}
static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(engine, offset);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
- u64 offset, u32 len,
- unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+ u64 offset, u32 len,
+ unsigned int dispatch_flags)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(engine,
+ intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(engine, offset);
- intel_ring_advance(engine);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
/* Blitter support (SandyBridge+) */
-static int gen6_ring_flush(struct drm_i915_gem_request *req,
- u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_ring *ring = req->ring;
uint32_t cmd;
int ret;
@@ -2684,19 +2513,19 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
- if (invalidate & I915_GEM_DOMAIN_RENDER)
+ if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(engine, cmd);
- intel_ring_emit(engine,
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(engine, 0); /* upper addr */
- intel_ring_emit(engine, 0); /* value */
+ intel_ring_emit(ring, 0); /* upper addr */
+ intel_ring_emit(ring, 0); /* value */
} else {
- intel_ring_emit(engine, 0);
- intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
}
- intel_ring_advance(engine);
+ intel_ring_advance(ring);
return 0;
}
@@ -2707,38 +2536,39 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
int ret, i;
- if (!i915_semaphore_is_enabled(dev_priv))
+ if (!i915.semaphores)
return;
- if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+ if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
+ struct i915_vma *vma;
+
obj = i915_gem_object_create(&dev_priv->drm, 4096);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
- if (ret != 0) {
- drm_gem_object_unreference(&obj->base);
- DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- dev_priv->semaphore_obj = obj;
- }
- }
- }
+ if (IS_ERR(obj))
+ goto err;
- if (!i915_semaphore_is_enabled(dev_priv))
- return;
+ vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ goto err_obj;
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_obj;
+
+ dev_priv->semaphore = vma;
+ }
if (INTEL_GEN(dev_priv) >= 8) {
- u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+ u32 offset = i915_ggtt_offset(dev_priv->semaphore);
- engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.sync_to = gen8_ring_sync_to;
engine->semaphore.signal = gen8_xcs_signal;
for (i = 0; i < I915_NUM_ENGINES; i++) {
- u64 ring_offset;
+ u32 ring_offset;
if (i != engine->id)
ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
@@ -2748,7 +2578,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
engine->semaphore.signal_ggtt[i] = ring_offset;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
- engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.sync_to = gen6_ring_sync_to;
engine->semaphore.signal = gen6_signal;
/*
@@ -2758,52 +2588,62 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
- for (i = 0; i < I915_NUM_ENGINES; i++) {
+ for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
static const struct {
u32 wait_mbox;
i915_reg_t mbox_reg;
- } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
- [RCS] = {
- [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
- [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
- [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+ } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
+ [RCS_HW] = {
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
},
- [VCS] = {
- [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
- [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
- [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+ [VCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
},
- [BCS] = {
- [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
- [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
- [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+ [BCS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
+ [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
},
- [VECS] = {
- [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
- [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
- [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+ [VECS_HW] = {
+ [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+ [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+ [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
},
};
u32 wait_mbox;
i915_reg_t mbox_reg;
- if (i == engine->id || i == VCS2) {
+ if (i == engine->hw_id) {
wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
mbox_reg = GEN6_NOSYNC;
} else {
- wait_mbox = sem_data[engine->id][i].wait_mbox;
- mbox_reg = sem_data[engine->id][i].mbox_reg;
+ wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
+ mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
}
engine->semaphore.mbox.wait[i] = wait_mbox;
engine->semaphore.mbox.signal[i] = mbox_reg;
}
}
+
+ return;
+
+err_obj:
+ i915_gem_object_put(obj);
+err:
+ DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
+ i915.semaphores = 0;
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
+
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable = gen8_irq_enable;
engine->irq_disable = gen8_irq_disable;
@@ -2828,83 +2668,76 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
+ intel_ring_init_irq(dev_priv, engine);
+ intel_ring_init_semaphores(dev_priv, engine);
+
engine->init_hw = init_ring_common;
- engine->write_tail = ring_write_tail;
+ engine->reset_hw = reset_ring_common;
- engine->add_request = i9xx_add_request;
- if (INTEL_GEN(dev_priv) >= 6)
- engine->add_request = gen6_add_request;
+ engine->emit_request = i9xx_emit_request;
+ if (i915.semaphores)
+ engine->emit_request = gen6_sema_emit_request;
+ engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->emit_bb_start = gen8_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 6)
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
- engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->emit_bb_start = i965_emit_bb_start;
else if (IS_I830(dev_priv) || IS_845G(dev_priv))
- engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+ engine->emit_bb_start = i830_emit_bb_start;
else
- engine->dispatch_execbuffer = i915_dispatch_execbuffer;
-
- intel_ring_init_irq(dev_priv, engine);
- intel_ring_init_semaphores(dev_priv, engine);
+ engine->emit_bb_start = i915_emit_bb_start;
}
-int intel_init_render_ring_buffer(struct drm_device *dev)
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- engine->name = "render ring";
- engine->id = RCS;
- engine->exec_id = I915_EXEC_RENDER;
- engine->hw_id = 0;
- engine->mmio_base = RENDER_RING_BASE;
-
intel_ring_default_vfuncs(dev_priv, engine);
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen8_render_add_request;
- engine->flush = gen8_render_ring_flush;
- if (i915_semaphore_is_enabled(dev_priv))
+ engine->emit_request = gen8_render_emit_request;
+ engine->emit_flush = gen8_render_ring_flush;
+ if (i915.semaphores)
engine->semaphore.signal = gen8_rcs_signal;
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
- engine->flush = gen7_render_ring_flush;
+ engine->emit_flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
- engine->flush = gen6_render_ring_flush;
+ engine->emit_flush = gen6_render_ring_flush;
} else if (IS_GEN5(dev_priv)) {
- engine->flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
- engine->flush = gen2_render_ring_flush;
+ engine->emit_flush = gen2_render_ring_flush;
else
- engine->flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(dev_priv))
- engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ engine->emit_bb_start = hsw_emit_bb_start;
engine->init_hw = init_render_ring;
engine->cleanup = render_ring_cleanup;
- ret = intel_init_ring_buffer(dev, engine);
+ ret = intel_init_ring_buffer(engine);
if (ret)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_init_pipe_control(engine, 4096);
+ ret = intel_engine_create_scratch(engine, 4096);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
- ret = intel_init_pipe_control(engine, I830_WA_SIZE);
+ ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
if (ret)
return ret;
}
@@ -2912,166 +2745,71 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return 0;
}
-int intel_init_bsd_ring_buffer(struct drm_device *dev)
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
- engine->name = "bsd ring";
- engine->id = VCS;
- engine->exec_id = I915_EXEC_BSD;
- engine->hw_id = 1;
+ struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
if (INTEL_GEN(dev_priv) >= 6) {
- engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev_priv))
- engine->write_tail = gen6_bsd_ring_write_tail;
- engine->flush = gen6_bsd_ring_flush;
- if (INTEL_GEN(dev_priv) >= 8)
- engine->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- else
+ engine->submit_request = gen6_bsd_submit_request;
+ engine->emit_flush = gen6_bsd_ring_flush;
+ if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
engine->mmio_base = BSD_RING_BASE;
- engine->flush = bsd_ring_flush;
+ engine->emit_flush = bsd_ring_flush;
if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
- return intel_init_ring_buffer(dev, engine);
+ return intel_init_ring_buffer(engine);
}
/**
* Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
*/
-int intel_init_bsd2_ring_buffer(struct drm_device *dev)
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
-
- engine->name = "bsd2 ring";
- engine->id = VCS2;
- engine->exec_id = I915_EXEC_BSD;
- engine->hw_id = 4;
- engine->mmio_base = GEN8_BSD2_RING_BASE;
+ struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_bsd_ring_flush;
- engine->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+ engine->emit_flush = gen6_bsd_ring_flush;
- return intel_init_ring_buffer(dev, engine);
+ return intel_init_ring_buffer(engine);
}
-int intel_init_blt_ring_buffer(struct drm_device *dev)
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine = &dev_priv->engine[BCS];
-
- engine->name = "blitter ring";
- engine->id = BCS;
- engine->exec_id = I915_EXEC_BLT;
- engine->hw_id = 2;
- engine->mmio_base = BLT_RING_BASE;
+ struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_ring_flush;
- if (INTEL_GEN(dev_priv) >= 8)
- engine->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- else
+ engine->emit_flush = gen6_ring_flush;
+ if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- return intel_init_ring_buffer(dev, engine);
+ return intel_init_ring_buffer(engine);
}
-int intel_init_vebox_ring_buffer(struct drm_device *dev)
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_engine_cs *engine = &dev_priv->engine[VECS];
-
- engine->name = "video enhancement ring";
- engine->id = VECS;
- engine->exec_id = I915_EXEC_VEBOX;
- engine->hw_id = 3;
- engine->mmio_base = VEBOX_RING_BASE;
+ struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_ring_flush;
+ engine->emit_flush = gen6_ring_flush;
- if (INTEL_GEN(dev_priv) >= 8) {
- engine->irq_enable_mask =
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- } else {
+ if (INTEL_GEN(dev_priv) < 8) {
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
engine->irq_enable = hsw_vebox_irq_enable;
engine->irq_disable = hsw_vebox_irq_disable;
}
- return intel_init_ring_buffer(dev, engine);
-}
-
-int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- if (!engine->gpu_caches_dirty)
- return 0;
-
- ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
-int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- uint32_t flush_domains;
- int ret;
-
- flush_domains = 0;
- if (engine->gpu_caches_dirty)
- flush_domains = I915_GEM_GPU_DOMAINS;
-
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- if (ret)
- return ret;
-
- trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
-void
-intel_stop_engine(struct intel_engine_cs *engine)
-{
- int ret;
-
- if (!intel_engine_initialized(engine))
- return;
-
- ret = intel_engine_idle(engine);
- if (ret)
- DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- engine->name, ret);
-
- stop_ring(engine);
+ return intel_init_ring_buffer(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 12cb7ed90014..ec0b4a0c605d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,6 +3,7 @@
#include <linux/hashtable.h>
#include "i915_gem_batch_pool.h"
+#include "i915_gem_request.h"
#define I915_CMD_HASH_ORDER 9
@@ -25,29 +26,29 @@
*/
#define I915_RING_FREE_SPACE 64
-struct intel_hw_status_page {
- u32 *page_addr;
- unsigned int gfx_addr;
- struct drm_i915_gem_object *obj;
+struct intel_hw_status_page {
+ struct i915_vma *vma;
+ u32 *page_addr;
+ u32 ggtt_offset;
};
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
+#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
+#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
+#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
+#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
-#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
+#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
-#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
-#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
+#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -56,13 +57,13 @@ struct intel_hw_status_page {
#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
(((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
- (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ (dev_priv->semaphore->node.start + \
GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
- (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ (dev_priv->semaphore->node.start + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
@@ -72,23 +73,22 @@ enum intel_ring_hangcheck_action {
#define HANGCHECK_SCORE_RING_HUNG 31
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
u64 acthd;
- unsigned long user_interrupts;
u32 seqno;
int score;
- enum intel_ring_hangcheck_action action;
+ enum intel_engine_hangcheck_action action;
int deadlock;
u32 instdone[I915_NUM_INSTDONE_REG];
};
-struct intel_ringbuffer {
- struct drm_i915_gem_object *obj;
- void __iomem *virtual_start;
+struct intel_ring {
struct i915_vma *vma;
+ void *vaddr;
struct intel_engine_cs *engine;
- struct list_head link;
+
+ struct list_head request_list;
u32 head;
u32 tail;
@@ -121,12 +121,12 @@ struct drm_i915_reg_table;
* an option for future use.
* size: size of the batch in DWORDS
*/
-struct i915_ctx_workarounds {
+struct i915_ctx_workarounds {
struct i915_wa_ctx_bb {
u32 offset;
u32 size;
} indirect_ctx, per_ctx;
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
};
struct drm_i915_gem_request;
@@ -144,11 +144,18 @@ struct intel_engine_cs {
#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
- unsigned int hw_id;
- unsigned int guc_id; /* XXX same as hw_id? */
+ enum intel_engine_hw_id {
+ RCS_HW = 0,
+ VCS_HW,
+ BCS_HW,
+ VECS_HW,
+ VCS2_HW
+ } hw_id;
+ enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
+ u64 fence_context;
u32 mmio_base;
- struct intel_ringbuffer *buffer;
- struct list_head buffers;
+ unsigned int irq_shift;
+ struct intel_ring *buffer;
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
@@ -167,8 +174,7 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- struct task_struct *irq_seqno_bh; /* bh for user interrupts */
- unsigned long irq_wakeups;
+ struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
bool irq_posted;
spinlock_t lock; /* protects the lists of requests */
@@ -178,6 +184,9 @@ struct intel_engine_cs {
struct task_struct *signaler; /* used for fence signalling */
struct drm_i915_gem_request *first_signal;
struct timer_list fake_irq; /* used after a missed interrupt */
+ struct timer_list hangcheck; /* detect missed interrupts */
+
+ unsigned long timeout;
bool irq_enabled : 1;
bool rpm_wakelock : 1;
@@ -192,36 +201,48 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
+ struct i915_vma *scratch;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- void (*irq_enable)(struct intel_engine_cs *ring);
- void (*irq_disable)(struct intel_engine_cs *ring);
+ void (*irq_enable)(struct intel_engine_cs *engine);
+ void (*irq_disable)(struct intel_engine_cs *engine);
- int (*init_hw)(struct intel_engine_cs *ring);
+ int (*init_hw)(struct intel_engine_cs *engine);
+ void (*reset_hw)(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *req);
int (*init_context)(struct drm_i915_gem_request *req);
- void (*write_tail)(struct intel_engine_cs *ring,
- u32 value);
- int __must_check (*flush)(struct drm_i915_gem_request *req,
- u32 invalidate_domains,
- u32 flush_domains);
- int (*add_request)(struct drm_i915_gem_request *req);
+ int (*emit_flush)(struct drm_i915_gem_request *request,
+ u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct drm_i915_gem_request *req,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS BIT(2)
+ int (*emit_request)(struct drm_i915_gem_request *req);
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
+ void (*submit_request)(struct drm_i915_gem_request *req);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
- int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
- u64 offset, u32 length,
- unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS 0x4
- void (*cleanup)(struct intel_engine_cs *ring);
+ void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
+ void (*cleanup)(struct intel_engine_cs *engine);
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
@@ -264,51 +285,36 @@ struct intel_engine_cs {
u32 sync_seqno[I915_NUM_ENGINES-1];
union {
+#define GEN6_SEMAPHORE_LAST VECS_HW
+#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
+#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
struct {
/* our mbox written by others */
- u32 wait[I915_NUM_ENGINES];
+ u32 wait[GEN6_NUM_SEMAPHORES];
/* mboxes this ring signals to */
- i915_reg_t signal[I915_NUM_ENGINES];
+ i915_reg_t signal[GEN6_NUM_SEMAPHORES];
} mbox;
u64 signal_ggtt[I915_NUM_ENGINES];
};
/* AKA wait() */
- int (*sync_to)(struct drm_i915_gem_request *to_req,
- struct intel_engine_cs *from,
- u32 seqno);
- int (*signal)(struct drm_i915_gem_request *signaller_req,
- /* num_dwords needed by caller */
- unsigned int num_dwords);
+ int (*sync_to)(struct drm_i915_gem_request *req,
+ struct drm_i915_gem_request *signal);
+ int (*signal)(struct drm_i915_gem_request *req);
} semaphore;
/* Execlists */
struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
+ struct execlist_port {
+ struct drm_i915_gem_request *request;
+ unsigned int count;
+ } execlist_port[2];
struct list_head execlist_queue;
unsigned int fw_domains;
- unsigned int next_context_status_buffer;
- unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
+ bool preempt_wa;
u32 ctx_desc_template;
- int (*emit_request)(struct drm_i915_gem_request *request);
- int (*emit_flush)(struct drm_i915_gem_request *request,
- u32 invalidate_domains,
- u32 flush_domains);
- int (*emit_bb_start)(struct drm_i915_gem_request *req,
- u64 offset, unsigned dispatch_flags);
-
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_read_req
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
/**
* List of breadcrumbs associated with GPU requests currently
@@ -322,23 +328,24 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
+ u32 last_pending_seqno;
- bool gpu_caches_dirty;
+ /* An RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_gem_active_get_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_gem_active last_request;
struct i915_gem_context *last_context;
- struct intel_ring_hangcheck hangcheck;
-
- struct {
- struct drm_i915_gem_object *obj;
- u32 gtt_offset;
- } scratch;
+ struct intel_engine_hangcheck hangcheck;
bool needs_cmd_parser;
/*
* Table of commands the command parser needs to know about
- * for this ring.
+ * for this engine.
*/
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
@@ -352,11 +359,11 @@ struct intel_engine_cs {
* Returns the bitmask for the length field of the specified command.
* Return 0 for an unrecognized/invalid command.
*
- * If the command parser finds an entry for a command in the ring's
+ * If the command parser finds an entry for a command in the engine's
* cmd_tables, it gets the command's length based on the table entry.
- * If not, it calls this function to determine the per-ring length field
- * encoding for the command (i.e. certain opcode ranges use certain bits
- * to encode the command length in the header).
+ * If not, it calls this function to determine the per-engine length
+ * field encoding for the command (i.e. different opcode ranges use
+ * certain bits to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
@@ -374,8 +381,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
}
static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
- struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+ struct intel_engine_cs *other)
{
int idx;
@@ -437,55 +444,76 @@ intel_write_status_page(struct intel_engine_cs *engine,
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
- struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
+
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *engine,
- u32 data)
+
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
- ringbuf->tail += 4;
+ *(uint32_t *)(ring->vaddr + ring->tail) = data;
+ ring->tail += 4;
}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
+
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
{
- intel_ring_emit(engine, i915_mmio_reg_offset(reg));
+ intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
-static inline void intel_ring_advance(struct intel_engine_cs *engine)
+
+static inline void intel_ring_advance(struct intel_ring *ring)
{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- ringbuf->tail &= ringbuf->size - 1;
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+}
+
+static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ return value & (ring->size - 1);
}
+
int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ring);
-int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
-void intel_fini_pipe_control(struct intel_engine_cs *engine);
+void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_init_common(struct intel_engine_cs *engine);
+int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
+void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_init_render_ring_buffer(struct drm_device *dev);
-int intel_init_bsd_ring_buffer(struct drm_device *dev);
-int intel_init_bsd2_ring_buffer(struct drm_device *dev);
-int intel_init_blt_ring_buffer(struct drm_device *dev);
-int intel_init_vebox_ring_buffer(struct drm_device *dev);
+static inline int intel_engine_idle(struct intel_engine_cs *engine,
+ unsigned int flags)
+{
+ /* Wait upon the last request to be completed */
+ return i915_gem_active_wait_unlocked(&engine->last_request,
+ flags, NULL, NULL);
+}
+
+int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
+int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -493,11 +521,6 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
int init_workarounds_ring(struct intel_engine_cs *engine);
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
-{
- return ringbuf->tail;
-}
-
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
@@ -509,21 +532,10 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
- return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+ return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
}
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-struct intel_wait {
- struct rb_node node;
- struct task_struct *tsk;
- u32 seqno;
-};
-
-struct intel_signal_node {
- struct rb_node node;
- struct intel_wait wait;
-};
-
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
@@ -543,31 +555,43 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
-static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
- return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
}
-static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
{
bool wakeup = false;
- struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+
/* Note that for this not to dangerously chase a dangling pointer,
- * the caller is responsible for ensure that the task remain valid for
- * wake_up_process() i.e. that the RCU grace period cannot expire.
+ * we must hold the rcu_read_lock here.
*
* Also note that tsk is likely to be in !TASK_RUNNING state so an
* early test for tsk->state != TASK_RUNNING before wake_up_process()
* is unlikely to be beneficial.
*/
- if (tsk)
- wakeup = wake_up_process(tsk);
+ if (intel_engine_has_waiter(engine)) {
+ struct task_struct *tsk;
+
+ rcu_read_lock();
+ tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
+ if (tsk)
+ wakeup = wake_up_process(tsk);
+ rcu_read_unlock();
+ }
+
return wakeup;
}
-void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
unsigned int intel_kick_waiters(struct drm_i915_private *i915);
unsigned int intel_kick_signalers(struct drm_i915_private *i915);
+static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
+{
+ return i915_gem_active_isset(&engine->last_request);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1c603bbe5784..a38c2fefe85a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -287,6 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
/*
@@ -299,9 +300,9 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
if (IS_BROADWELL(dev))
gen8_irq_power_well_post_enable(dev_priv,
@@ -318,7 +319,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_device *dev = &dev_priv->drm;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -331,9 +332,9 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
* and error messages.
*/
if (power_well->data == SKL_DISP_PW_2) {
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
gen8_irq_power_well_post_enable(dev_priv,
1 << PIPE_C | 1 << PIPE_B);
@@ -592,6 +593,8 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
}
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
@@ -854,7 +857,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->data;
- struct i915_power_well *cmn_a_well;
+ struct i915_power_well *cmn_a_well = NULL;
if (power_well_id == BXT_DPIO_CMN_BC) {
/*
@@ -867,7 +870,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
- if (power_well_id == BXT_DPIO_CMN_BC)
+ if (cmn_a_well)
intel_power_well_put(dev_priv, cmn_a_well);
}
@@ -1121,6 +1124,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
}
i915_redisable_vga_power_on(&dev_priv->drm);
+
+ intel_pps_unlock_regs_wa(dev_priv);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -1134,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv);
- intel_hpd_poll_init(dev_priv);
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -2284,7 +2291,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
{
- struct device *device = &dev_priv->drm.pdev->dev;
+ struct device *kdev = &dev_priv->drm.pdev->dev;
/*
* The i915.ko module is still not prepared to be loaded when
@@ -2306,7 +2313,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
* the platform doesn't support runtime PM.
*/
if (!HAS_RUNTIME_PM(dev_priv))
- pm_runtime_put(device);
+ pm_runtime_put(kdev);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2647,10 +2654,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
- pm_runtime_get_sync(device);
+ pm_runtime_get_sync(kdev);
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
@@ -2668,11 +2675,11 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
if (IS_ENABLED(CONFIG_PM)) {
- int ret = pm_runtime_get_if_in_use(device);
+ int ret = pm_runtime_get_if_in_use(kdev);
/*
* In cases runtime PM is disabled by the RPM core and we get
@@ -2710,11 +2717,11 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- pm_runtime_get_noresume(device);
+ pm_runtime_get_noresume(kdev);
atomic_inc(&dev_priv->pm.wakeref_count);
}
@@ -2729,15 +2736,15 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
- struct device *device = &dev->pdev->dev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
atomic_inc(&dev_priv->pm.atomic_seq);
- pm_runtime_mark_last_busy(device);
- pm_runtime_put_autosuspend(device);
+ pm_runtime_mark_last_busy(kdev);
+ pm_runtime_put_autosuspend(kdev);
}
/**
@@ -2752,11 +2759,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_device *dev = &dev_priv->drm;
- struct device *device = &dev->pdev->dev;
+ struct device *kdev = &pdev->dev;
- pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
- pm_runtime_mark_last_busy(device);
+ pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
+ pm_runtime_mark_last_busy(kdev);
/*
* Take a permanent reference to disable the RPM functionality and drop
@@ -2765,10 +2773,10 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev)) {
- pm_runtime_dont_use_autosuspend(device);
- pm_runtime_get_sync(device);
+ pm_runtime_dont_use_autosuspend(kdev);
+ pm_runtime_get_sync(kdev);
} else {
- pm_runtime_use_autosuspend(device);
+ pm_runtime_use_autosuspend(kdev);
}
/*
@@ -2776,6 +2784,5 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* We drop that here and will reacquire it during unloading in
* intel_power_domains_fini().
*/
- pm_runtime_put_autosuspend(device);
+ pm_runtime_put_autosuspend(kdev);
}
-
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e378f35365a2..c551024d4871 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1003,24 +1003,22 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct drm_display_mode *adjusted_mode)
+ struct intel_crtc_state *pipe_config)
{
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
- struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- adjusted_mode);
+ &pipe_config->base.adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
if (intel_sdvo->rgb_quant_range_selectable) {
- if (intel_crtc->config->limited_color_range)
+ if (pipe_config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
@@ -1125,7 +1123,8 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
}
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
@@ -1192,22 +1191,21 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
+static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
- struct drm_display_mode *mode = &crtc->config->base.mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
int rate;
- if (!mode)
- return;
-
/* First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
@@ -1240,11 +1238,11 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (crtc->config->has_hdmi_sink) {
+ if (crtc_state->has_hdmi_sink) {
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1260,7 +1258,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
- switch (crtc->config->pixel_multiplier) {
+ switch (crtc_state->pixel_multiplier) {
default:
WARN(1, "unknown pixel multiplier specified\n");
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
@@ -1275,7 +1273,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
- if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev) && crtc_state->limited_color_range)
sdvox |= HDMI_COLOR_RANGE_16_235;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@@ -1301,7 +1299,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
- sdvox |= (crtc->config->pixel_multiplier - 1)
+ sdvox |= (crtc_state->pixel_multiplier - 1)
<< SDVO_PORT_MULTIPLY_SHIFT;
}
@@ -1434,7 +1432,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
}
-static void intel_disable_sdvo(struct intel_encoder *encoder)
+static void intel_disable_sdvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1477,16 +1477,22 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
}
}
-static void pch_disable_sdvo(struct intel_encoder *encoder)
+static void pch_disable_sdvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
}
-static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+static void pch_post_disable_sdvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
- intel_disable_sdvo(encoder);
+ intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
}
-static void intel_enable_sdvo(struct intel_encoder *encoder)
+static void intel_enable_sdvo(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2930,10 +2936,12 @@ static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
struct drm_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
+
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
- sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.dev.parent = &pdev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7c08e4f29032..dbed12c484c9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
+#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -202,23 +203,24 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- u32 plane_ctl, stride_div, stride;
+ u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr;
- u32 tile_height, plane_offset, plane_size;
+ u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
- int x_offset, y_offset;
- int crtc_x = plane_state->dst.x1;
- int crtc_y = plane_state->dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->dst);
- uint32_t x = plane_state->src.x1 >> 16;
- uint32_t y = plane_state->src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ u32 stride = skl_plane_stride(fb, 0, rotation);
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -229,14 +231,8 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_rotation(rotation);
- stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
- fb->pixel_format);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- crtc_w--;
- crtc_h--;
+ if (wm->dirty_pipes & drm_crtc_mask(crtc))
+ skl_write_plane_wm(intel_crtc, wm, plane);
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
@@ -249,28 +245,15 @@ skl_update_plane(struct drm_plane *drm_plane,
else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
- surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
-
- if (intel_rotation_90_or_270(rotation)) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-
- /* stride: Surface height in tiles */
- tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
- stride = DIV_ROUND_UP(fb->height, tile_height);
- plane_size = (src_w << 16) | src_h;
- x_offset = stride * tile_height - y - (src_h + 1);
- y_offset = x;
- } else {
- stride = fb->pitches[0] / stride_div;
- plane_size = (src_h << 16) | src_w;
- x_offset = x;
- y_offset = y;
- }
- plane_offset = y_offset << 16 | x_offset;
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
- I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
+ I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
- I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+ I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
@@ -295,7 +278,8 @@ skl_update_plane(struct drm_plane *drm_plane,
}
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
+ I915_WRITE(PLANE_SURF(pipe, plane),
+ intel_fb_gtt_offset(fb, rotation) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -308,6 +292,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
+ /*
+ * We only populate skl_results on watermark updates, and if the
+ * plane's visiblity isn't actually changing neither is its watermarks.
+ */
+ if (!dplane->state->visible)
+ skl_write_plane_wm(to_intel_crtc(crtc),
+ &dev_priv->wm.skl_results, plane);
+
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
@@ -362,22 +354,20 @@ vlv_update_plane(struct drm_plane *dplane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
- unsigned int rotation = dplane->state->rotation;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->dst.x1;
- int crtc_y = plane_state->dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->dst);
- uint32_t x = plane_state->src.x1 >> 16;
- uint32_t y = plane_state->src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->base.src.x1 >> 16;
+ uint32_t y = plane_state->base.src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
sprctl = SP_ENABLE;
@@ -430,7 +420,7 @@ vlv_update_plane(struct drm_plane *dplane,
*/
sprctl |= SP_GAMMA_ENABLE;
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
/* Sizes are 0 based */
@@ -439,19 +429,18 @@ vlv_update_plane(struct drm_plane *dplane,
crtc_w--;
crtc_h--;
- linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
- fb->pitches[0], rotation);
- linear_offset -= sprsurf_offset;
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == DRM_ROTATE_180) {
sprctl |= SP_ROTATE_180;
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
if (key->flags) {
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
@@ -467,7 +456,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
@@ -476,8 +465,8 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
- sprsurf_offset);
+ I915_WRITE(SPSURF(pipe, plane),
+ intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -505,21 +494,19 @@ ivb_update_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
u32 sprsurf_offset, linear_offset;
unsigned int rotation = plane_state->base.rotation;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->dst.x1;
- int crtc_y = plane_state->dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->dst);
- uint32_t x = plane_state->src.x1 >> 16;
- uint32_t y = plane_state->src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->base.src.x1 >> 16;
+ uint32_t y = plane_state->base.src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
sprctl = SPRITE_ENABLE;
@@ -552,7 +539,7 @@ ivb_update_plane(struct drm_plane *plane,
*/
sprctl |= SPRITE_GAMMA_ENABLE;
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -572,22 +559,21 @@ ivb_update_plane(struct drm_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
- fb->pitches[0], rotation);
- linear_offset -= sprsurf_offset;
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == DRM_ROTATE_180) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
}
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
if (key->flags) {
I915_WRITE(SPRKEYVAL(pipe), key->min_value);
I915_WRITE(SPRKEYMAX(pipe), key->max_value);
@@ -606,7 +592,7 @@ ivb_update_plane(struct drm_plane *plane,
* register */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
- else if (obj->tiling_mode != I915_TILING_NONE)
+ else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
@@ -616,7 +602,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
+ intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -646,21 +632,19 @@ ilk_update_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
u32 dvscntr, dvsscale;
u32 dvssurf_offset, linear_offset;
unsigned int rotation = plane_state->base.rotation;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->dst.x1;
- int crtc_y = plane_state->dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->dst);
- uint32_t x = plane_state->src.x1 >> 16;
- uint32_t y = plane_state->src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->base.src.x1 >> 16;
+ uint32_t y = plane_state->base.src.y1 >> 16;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
dvscntr = DVS_ENABLE;
@@ -693,7 +677,7 @@ ilk_update_plane(struct drm_plane *plane,
*/
dvscntr |= DVS_GAMMA_ENABLE;
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
if (IS_GEN6(dev))
@@ -709,19 +693,18 @@ ilk_update_plane(struct drm_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- linear_offset = y * fb->pitches[0] + x * cpp;
- dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
- fb->pitches[0], rotation);
- linear_offset -= dvssurf_offset;
+ intel_add_fb_offsets(&x, &y, plane_state, 0);
+ dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
- if (rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == DRM_ROTATE_180) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
y += src_h;
- linear_offset += src_h * fb->pitches[0] + src_w * cpp;
}
+ linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+
if (key->flags) {
I915_WRITE(DVSKEYVAL(pipe), key->min_value);
I915_WRITE(DVSKEYMAX(pipe), key->max_value);
@@ -736,7 +719,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- if (obj->tiling_mode != I915_TILING_NONE)
+ if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
@@ -745,7 +728,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
- i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
+ intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@@ -778,15 +761,26 @@ intel_check_sprite_plane(struct drm_plane *plane,
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
- struct drm_rect *src = &state->src;
- struct drm_rect *dst = &state->dst;
+ struct drm_rect *src = &state->base.src;
+ struct drm_rect *dst = &state->base.dst;
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
bool can_scale;
+ int ret;
+
+ src->x1 = state->base.src_x;
+ src->y1 = state->base.src_y;
+ src->x2 = state->base.src_x + state->base.src_w;
+ src->y2 = state->base.src_y + state->base.src_h;
+
+ dst->x1 = state->base.crtc_x;
+ dst->y1 = state->base.crtc_y;
+ dst->x2 = state->base.crtc_x + state->base.crtc_w;
+ dst->y2 = state->base.crtc_y + state->base.crtc_h;
if (!fb) {
- state->visible = false;
+ state->base.visible = false;
return 0;
}
@@ -834,14 +828,14 @@ intel_check_sprite_plane(struct drm_plane *plane,
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
- state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+ state->base.visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
crtc_x = dst->x1;
crtc_y = dst->y1;
crtc_w = drm_rect_width(dst);
crtc_h = drm_rect_height(dst);
- if (state->visible) {
+ if (state->base.visible) {
/* check again in case clipping clamped the results */
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
@@ -898,12 +892,12 @@ intel_check_sprite_plane(struct drm_plane *plane,
crtc_w &= ~1;
if (crtc_w == 0)
- state->visible = false;
+ state->base.visible = false;
}
}
/* Check size restrictions when scaling */
- if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
+ if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -912,10 +906,10 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* FIXME interlacing min height is 6 */
if (crtc_w < 3 || crtc_h < 3)
- state->visible = false;
+ state->base.visible = false;
if (src_w < 3 || src_h < 3)
- state->visible = false;
+ state->base.visible = false;
width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
@@ -926,7 +920,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
}
- if (state->visible) {
+ if (state->base.visible) {
src->x1 = src_x << 16;
src->x2 = (src_x + src_w) << 16;
src->y1 = src_y << 16;
@@ -938,6 +932,12 @@ intel_check_sprite_plane(struct drm_plane *plane,
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
+ if (INTEL_GEN(dev) >= 9) {
+ ret = skl_check_plane_surface(state);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 49136ad5473e..d960e4866595 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -838,7 +838,9 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
}
static void
-intel_enable_tv(struct intel_encoder *encoder)
+intel_enable_tv(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -851,7 +853,9 @@ intel_enable_tv(struct intel_encoder *encoder)
}
static void
-intel_disable_tv(struct intel_encoder *encoder)
+intel_disable_tv(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -908,7 +912,8 @@ intel_tv_get_config(struct intel_encoder *encoder,
static bool
intel_tv_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1010,7 +1015,9 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
color_conversion->av);
}
-static void intel_tv_pre_enable(struct intel_encoder *encoder)
+static void intel_tv_pre_enable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ff80a81b1a84..ee2306a79747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_disable_gt_powersave(dev_priv);
+ intel_sanitize_gt_powersave(dev_priv);
}
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(dev_priv),
- "Unclaimed register detected %s %s register 0x%x\n",
- before ? "before" : "after",
- read ? "reading" : "writing to",
+ if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+ "Unclaimed %s register 0x%x\n",
+ read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
}
@@ -1018,11 +1017,9 @@ gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
__gen5_write(8)
__gen5_write(16)
__gen5_write(32)
-__gen5_write(64)
__gen2_write(8)
__gen2_write(16)
__gen2_write(32)
-__gen2_write(64)
#undef __gen5_write
#undef __gen2_write
@@ -1112,23 +1109,18 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
__gen9_write(8)
__gen9_write(16)
__gen9_write(32)
-__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
-__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
-__gen8_write(64)
__hsw_write(8)
__hsw_write(16)
__hsw_write(32)
-__hsw_write(64)
__gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-__gen6_write(64)
#undef __gen9_write
#undef __chv_write
@@ -1158,7 +1150,6 @@ static void vgpu_write##x(struct drm_i915_private *dev_priv, \
__vgpu_write(8)
__vgpu_write(16)
__vgpu_write(32)
-__vgpu_write(64)
#undef __vgpu_write
#undef VGPU_WRITE_FOOTER
@@ -1169,7 +1160,6 @@ do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
dev_priv->uncore.funcs.mmio_writew = x##_write16; \
dev_priv->uncore.funcs.mmio_writel = x##_write32; \
- dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
} while (0)
#define ASSIGN_READ_MMIO_VFUNCS(x) \
@@ -1597,8 +1587,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
+ unsigned int tmp;
+
hw_mask = 0;
- for_each_engine_masked(engine, dev_priv, engine_mask)
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
hw_mask |= hw_engine_mask[engine->id];
}
@@ -1618,8 +1610,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
* @timeout_ms: timeout in millisecond
*
* This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- * (I915_READ_FW(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ * (I915_READ_FW(reg) & mask) == value
+ *
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
* Note that this routine assumes the caller holds forcewake asserted, it is
@@ -1652,8 +1646,10 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
* @timeout_ms: timeout in millisecond
*
* This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- * (I915_READ(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ * (I915_READ(reg) & mask) == value
+ *
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
@@ -1710,15 +1706,16 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
unsigned engine_mask)
{
struct intel_engine_cs *engine;
+ unsigned int tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask)
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
if (gen8_request_engine_reset(engine))
goto not_ready;
return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
- for_each_engine_masked(engine, dev_priv, engine_mask)
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
gen8_unrequest_engine_reset(engine);
return -EIO;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 68db9621f1f0..8886cab19f98 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -280,7 +280,8 @@ struct common_child_dev_config {
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
- u8 not_common3[12];
+ u8 aux_channel;
+ u8 not_common3[11];
u8 iboost_level;
} __packed;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 7bf90e9e6139..9672b579f950 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -16,7 +16,6 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reservation.h>
@@ -58,12 +57,6 @@ static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
#endif
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
-{
- return drm_crtc_index(crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_id);
-
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
@@ -71,43 +64,6 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
}
-static int imx_drm_driver_unload(struct drm_device *drm)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
-
- drm_kms_helper_poll_fini(drm);
-
- if (imxdrm->fbhelper)
- drm_fbdev_cma_fini(imxdrm->fbhelper);
-
- component_unbind_all(drm->dev, drm);
-
- drm_vblank_cleanup(drm);
- drm_mode_config_cleanup(drm);
-
- platform_set_drvdata(drm->platformdev, NULL);
-
- return 0;
-}
-
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
-{
- return drm_crtc_vblank_get(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
-
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
-{
- drm_crtc_vblank_put(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
-
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
-{
- drm_crtc_handle_vblank(imx_drm_crtc->crtc);
-}
-EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
-
static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
{
struct imx_drm_device *imxdrm = drm->dev_private;
@@ -195,54 +151,49 @@ static int imx_drm_atomic_check(struct drm_device *dev,
return ret;
}
+static int imx_drm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ struct dma_buf *dma_buf;
+ int i;
+
+ /*
+ * If the plane fb has an dma-buf attached, fish out the exclusive
+ * fence for the atomic helper to wait on.
+ */
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+ dma_buf = drm_fb_cma_get_gem_obj(plane_state->fb,
+ 0)->base.dma_buf;
+ if (!dma_buf)
+ continue;
+ plane_state->fence =
+ reservation_object_get_excl_rcu(dma_buf->resv);
+ }
+ }
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+}
+
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = imx_drm_atomic_commit,
};
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_plane_state *plane_state;
- struct drm_gem_cma_object *cma_obj;
- struct fence *excl;
- unsigned shared_count;
- struct fence **shared;
- unsigned int i, j;
- int ret;
-
- /* Wait for fences. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- plane_state = crtc->primary->state;
- if (plane_state->fb) {
- cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
- if (cma_obj->base.dma_buf) {
- ret = reservation_object_get_fences_rcu(
- cma_obj->base.dma_buf->resv, &excl,
- &shared_count, &shared);
- if (unlikely(ret))
- DRM_ERROR("failed to get fences "
- "for buffer\n");
-
- if (excl) {
- fence_wait(excl, false);
- fence_put(excl);
- }
- for (j = 0; j < shared_count; i++) {
- fence_wait(shared[j], false);
- fence_put(shared[j]);
- }
- }
- }
- }
drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, true);
+ drm_atomic_helper_commit_planes(dev, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY |
+ DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET);
drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -258,111 +209,6 @@ static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
};
/*
- * Main DRM initialisation. This binds, initialises and registers
- * with DRM the subcomponents of the driver.
- */
-static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
-{
- struct imx_drm_device *imxdrm;
- struct drm_connector *connector;
- int ret;
-
- imxdrm = devm_kzalloc(drm->dev, sizeof(*imxdrm), GFP_KERNEL);
- if (!imxdrm)
- return -ENOMEM;
-
- imxdrm->drm = drm;
-
- drm->dev_private = imxdrm;
-
- /*
- * enable drm irq mode.
- * - with irq_enabled = true, we can use the vblank feature.
- *
- * P.S. note that we wouldn't use drm irq handler but
- * just specific driver own one instead because
- * drm framework supports only one irq handler and
- * drivers can well take care of their interrupts
- */
- drm->irq_enabled = true;
-
- /*
- * set max width and height as default value(4096x4096).
- * this value would be used to check framebuffer size limitation
- * at drm_mode_addfb().
- */
- drm->mode_config.min_width = 64;
- drm->mode_config.min_height = 64;
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
- drm->mode_config.funcs = &imx_drm_mode_config_funcs;
- drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
-
- drm_mode_config_init(drm);
-
- ret = drm_vblank_init(drm, MAX_CRTC);
- if (ret)
- goto err_kms;
-
- platform_set_drvdata(drm->platformdev, drm);
-
- /* Now try and bind all our sub-components */
- ret = component_bind_all(drm->dev, drm);
- if (ret)
- goto err_vblank;
-
- /*
- * All components are now added, we can publish the connector sysfs
- * entries to userspace. This will generate hotplug events and so
- * userspace will expect to be able to access DRM at this point.
- */
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret) {
- dev_err(drm->dev,
- "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
- connector->base.id,
- connector->name, ret);
- goto err_unbind;
- }
- }
-
- drm_mode_config_reset(drm);
-
- /*
- * All components are now initialised, so setup the fb helper.
- * The fb helper takes copies of key hardware information, so the
- * crtcs/connectors/encoders must not change after this point.
- */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
- if (legacyfb_depth != 16 && legacyfb_depth != 32) {
- dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
- legacyfb_depth = 16;
- }
- imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
- drm->mode_config.num_crtc, MAX_CRTC);
- if (IS_ERR(imxdrm->fbhelper)) {
- ret = PTR_ERR(imxdrm->fbhelper);
- imxdrm->fbhelper = NULL;
- goto err_unbind;
- }
-#endif
-
- drm_kms_helper_poll_init(drm);
-
- return 0;
-
-err_unbind:
- component_unbind_all(drm->dev, drm);
-err_vblank:
- drm_vblank_cleanup(drm);
-err_kms:
- drm_mode_config_cleanup(drm);
-
- return ret;
-}
-
-/*
* imx_drm_add_crtc - add a new crtc
*/
int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
@@ -454,8 +300,6 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .load = imx_drm_driver_load,
- .unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -508,12 +352,124 @@ static int compare_of(struct device *dev, void *data)
static int imx_drm_bind(struct device *dev)
{
- return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
+ struct drm_device *drm;
+ struct imx_drm_device *imxdrm;
+ int ret;
+
+ drm = drm_dev_alloc(&imx_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
+ if (!imxdrm) {
+ ret = -ENOMEM;
+ goto err_unref;
+ }
+
+ imxdrm->drm = drm;
+ drm->dev_private = imxdrm;
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ *
+ * P.S. note that we wouldn't use drm irq handler but
+ * just specific driver own one instead because
+ * drm framework supports only one irq handler and
+ * drivers can well take care of their interrupts
+ */
+ drm->irq_enabled = true;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ drm->mode_config.min_width = 64;
+ drm->mode_config.min_height = 64;
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+
+ drm_mode_config_init(drm);
+
+ ret = drm_vblank_init(drm, MAX_CRTC);
+ if (ret)
+ goto err_kms;
+
+ dev_set_drvdata(dev, drm);
+
+ /* Now try and bind all our sub-components */
+ ret = component_bind_all(dev, drm);
+ if (ret)
+ goto err_vblank;
+
+ drm_mode_config_reset(drm);
+
+ /*
+ * All components are now initialised, so setup the fb helper.
+ * The fb helper takes copies of key hardware information, so the
+ * crtcs/connectors/encoders must not change after this point.
+ */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
+ if (legacyfb_depth != 16 && legacyfb_depth != 32) {
+ dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
+ legacyfb_depth = 16;
+ }
+ imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
+ drm->mode_config.num_crtc, MAX_CRTC);
+ if (IS_ERR(imxdrm->fbhelper)) {
+ ret = PTR_ERR(imxdrm->fbhelper);
+ imxdrm->fbhelper = NULL;
+ goto err_unbind;
+ }
+#endif
+
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_fbhelper;
+
+ return 0;
+
+err_fbhelper:
+ drm_kms_helper_poll_fini(drm);
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
+ if (imxdrm->fbhelper)
+ drm_fbdev_cma_fini(imxdrm->fbhelper);
+err_unbind:
+#endif
+ component_unbind_all(drm->dev, drm);
+err_vblank:
+ drm_vblank_cleanup(drm);
+err_kms:
+ drm_mode_config_cleanup(drm);
+err_unref:
+ drm_dev_unref(drm);
+
+ return ret;
}
static void imx_drm_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct imx_drm_device *imxdrm = drm->dev_private;
+
+ drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+
+ if (imxdrm->fbhelper)
+ drm_fbdev_cma_fini(imxdrm->fbhelper);
+
+ drm_mode_config_cleanup(drm);
+
+ component_unbind_all(drm->dev, drm);
+ dev_set_drvdata(dev, NULL);
+
+ drm_dev_unref(drm);
}
static const struct component_master_ops imx_drm_ops = {
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 07d33e45f90f..5a91cb16c8fa 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -13,8 +13,6 @@ struct drm_plane;
struct imx_drm_crtc;
struct platform_device;
-unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
-
struct imx_crtc_state {
struct drm_crtc_state base;
u32 bus_format;
@@ -44,10 +42,6 @@ int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
int imx_drm_exit_drm(void);
-int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc);
-void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc);
-
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index b03919ed60ba..3ce391c239b0 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -57,7 +57,11 @@ struct imx_ldb_channel {
struct imx_ldb *ldb;
struct drm_connector connector;
struct drm_encoder encoder;
+
+ /* Defines what is connected to the ldb, only one at a time */
struct drm_panel *panel;
+ struct drm_bridge *bridge;
+
struct device_node *child;
struct i2c_adapter *ddc;
int chno;
@@ -66,6 +70,7 @@ struct imx_ldb_channel {
struct drm_display_mode mode;
int mode_valid;
u32 bus_format;
+ u32 bus_flags;
};
static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
@@ -251,11 +256,13 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
drm_panel_enable(imx_ldb_ch->panel);
}
-static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
+static void
+imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct imx_ldb *ldb = imx_ldb_ch->ldb;
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
unsigned long serial_clk;
@@ -297,17 +304,11 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
}
if (!bus_format) {
- struct drm_connector *connector;
-
- drm_for_each_connector(connector, encoder->dev) {
- struct drm_display_info *di = &connector->display_info;
+ struct drm_connector *connector = connector_state->connector;
+ struct drm_display_info *di = &connector->display_info;
- if (connector->encoder == encoder &&
- di->num_bus_formats) {
- bus_format = di->bus_formats[0];
- break;
- }
- }
+ if (di->num_bus_formats)
+ bus_format = di->bus_formats[0];
}
imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
}
@@ -379,8 +380,13 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
u32 bus_format = imx_ldb_ch->bus_format;
/* Bus format description in DT overrides connector display info. */
- if (!bus_format && di->num_bus_formats)
+ if (!bus_format && di->num_bus_formats) {
bus_format = di->bus_formats[0];
+ imx_crtc_state->bus_flags = di->bus_flags;
+ } else {
+ bus_format = imx_ldb_ch->bus_format;
+ imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags;
+ }
switch (bus_format) {
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
@@ -420,7 +426,7 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
- .mode_set = imx_ldb_encoder_mode_set,
+ .atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
.enable = imx_ldb_encoder_enable,
.disable = imx_ldb_encoder_disable,
.atomic_check = imx_ldb_encoder_atomic_check,
@@ -466,10 +472,30 @@ static int imx_ldb_register(struct drm_device *drm,
drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
- drm_connector_helper_add(&imx_ldb_ch->connector,
- &imx_ldb_connector_helper_funcs);
- drm_connector_init(drm, &imx_ldb_ch->connector,
- &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
+ if (imx_ldb_ch->bridge) {
+ imx_ldb_ch->bridge->encoder = encoder;
+
+ imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge;
+ ret = drm_bridge_attach(drm, imx_ldb_ch->bridge);
+ if (ret) {
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ return ret;
+ }
+ } else {
+ /*
+ * We want to add the connector whenever there is no bridge
+ * that brings its own, not only when there is a panel. For
+ * historical reasons, the ldb driver can also work without
+ * a panel.
+ */
+ drm_connector_helper_add(&imx_ldb_ch->connector,
+ &imx_ldb_connector_helper_funcs);
+ drm_connector_init(drm, &imx_ldb_ch->connector,
+ &imx_ldb_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
+ encoder);
+ }
if (imx_ldb_ch->panel) {
ret = drm_panel_attach(imx_ldb_ch->panel,
@@ -478,8 +504,6 @@ static int imx_ldb_register(struct drm_device *drm,
return ret;
}
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
-
return 0;
}
@@ -548,6 +572,46 @@ static const struct of_device_id imx_ldb_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
+static int imx_ldb_panel_ddc(struct device *dev,
+ struct imx_ldb_channel *channel, struct device_node *child)
+{
+ struct device_node *ddc_node;
+ const u8 *edidp;
+ int ret;
+
+ ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+ if (ddc_node) {
+ channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
+ if (!channel->ddc) {
+ dev_warn(dev, "failed to get ddc i2c adapter\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (!channel->ddc) {
+ /* if no DDC available, fallback to hardcoded EDID */
+ dev_dbg(dev, "no ddc available\n");
+
+ edidp = of_get_property(child, "edid",
+ &channel->edid_len);
+ if (edidp) {
+ channel->edid = kmemdup(edidp,
+ channel->edid_len,
+ GFP_KERNEL);
+ } else if (!channel->panel) {
+ /* fallback to display-timings node */
+ ret = of_get_drm_display_mode(child,
+ &channel->mode,
+ &channel->bus_flags,
+ OF_USE_NATIVE_MODE);
+ if (!ret)
+ channel->mode_valid = 1;
+ }
+ }
+ return 0;
+}
+
static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
@@ -555,7 +619,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
const struct of_device_id *of_id =
of_match_device(imx_ldb_dt_ids, dev);
struct device_node *child;
- const u8 *edidp;
struct imx_ldb *imx_ldb;
int dual;
int ret;
@@ -605,7 +668,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
- struct device_node *ddc_node;
struct device_node *ep;
int bus_format;
@@ -638,46 +700,25 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
remote = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
- if (remote)
+ if (remote) {
channel->panel = of_drm_find_panel(remote);
- else
+ channel->bridge = of_drm_find_bridge(remote);
+ } else
return -EPROBE_DEFER;
of_node_put(remote);
- if (!channel->panel) {
- dev_err(dev, "panel not found: %s\n",
+
+ if (!channel->panel && !channel->bridge) {
+ dev_err(dev, "panel/bridge not found: %s\n",
remote->full_name);
return -EPROBE_DEFER;
}
}
- ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
- if (ddc_node) {
- channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
- of_node_put(ddc_node);
- if (!channel->ddc) {
- dev_warn(dev, "failed to get ddc i2c adapter\n");
- return -EPROBE_DEFER;
- }
- }
-
- if (!channel->ddc) {
- /* if no DDC available, fallback to hardcoded EDID */
- dev_dbg(dev, "no ddc available\n");
-
- edidp = of_get_property(child, "edid",
- &channel->edid_len);
- if (edidp) {
- channel->edid = kmemdup(edidp,
- channel->edid_len,
- GFP_KERNEL);
- } else if (!channel->panel) {
- /* fallback to display-timings node */
- ret = of_get_drm_display_mode(child,
- &channel->mode,
- OF_USE_NATIVE_MODE);
- if (!ret)
- channel->mode_valid = 1;
- }
+ /* panel ddc only if there is no bridge */
+ if (!channel->bridge) {
+ ret = imx_ldb_panel_ddc(dev, channel, child);
+ if (ret)
+ return ret;
}
bus_format = of_get_bus_format(dev, child);
@@ -716,11 +757,10 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
- if (!channel->connector.funcs)
- continue;
-
- channel->connector.funcs->destroy(&channel->connector);
- channel->encoder.funcs->destroy(&channel->encoder);
+ if (channel->bridge)
+ drm_bridge_detach(channel->bridge);
+ if (channel->panel)
+ drm_panel_detach(channel->panel);
kfree(channel->edid);
i2c_put_adapter(channel->ddc);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5e875944ffa2..8fc088843e55 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -685,9 +685,6 @@ static void imx_tve_unbind(struct device *dev, struct device *master,
{
struct imx_tve *tve = dev_get_drvdata(dev);
- tve->connector.funcs->destroy(&tve->connector);
- tve->encoder.funcs->destroy(&tve->encoder);
-
if (!IS_ERR(tve->dac_reg))
regulator_disable(tve->dac_reg);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 462056e4b9e4..6be515a9fb69 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -21,7 +21,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <drm/drm_gem_cma_helper.h>
@@ -61,13 +60,20 @@ static void ipu_crtc_enable(struct drm_crtc *crtc)
ipu_di_enable(ipu_crtc->di);
}
-static void ipu_crtc_disable(struct drm_crtc *crtc)
+static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
+ /*
+ * Planes must be disabled before DC clock is removed, as otherwise the
+ * attached IDMACs will be left in undefined state, possibly hanging
+ * the IPU or even system.
+ */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock);
@@ -123,9 +129,14 @@ static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
kfree(to_imx_crtc_state(state));
}
+static void imx_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ imx_drm_remove_crtc(to_ipu_crtc(crtc)->imx_crtc);
+}
+
static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
+ .destroy = imx_drm_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -136,7 +147,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
{
struct ipu_crtc *ipu_crtc = dev_id;
- imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+ drm_crtc_handle_vblank(&ipu_crtc->base);
return IRQ_HANDLED;
}
@@ -246,7 +257,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.mode_set_nofb = ipu_crtc_mode_set_nofb,
.atomic_check = ipu_crtc_atomic_check,
.atomic_begin = ipu_crtc_atomic_begin,
- .disable = ipu_crtc_disable,
+ .atomic_disable = ipu_crtc_atomic_disable,
.enable = ipu_crtc_enable,
};
@@ -414,8 +425,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
{
struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
- imx_drm_remove_crtc(ipu_crtc->imx_crtc);
-
ipu_put_resources(ipu_crtc);
if (ipu_crtc->plane[1])
ipu_plane_put_resources(ipu_crtc->plane[1]);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 29423e757d36..d5864ed4d772 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -103,11 +103,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
(state->src_x >> 16) / 2 - eba;
}
-static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
- struct drm_plane_state *old_state)
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane)
{
struct drm_plane *plane = &ipu_plane->base;
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
struct drm_framebuffer *fb = state->fb;
unsigned long eba, ubo, vbo;
int active;
@@ -117,7 +117,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
- if (old_state->fb)
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
break;
/*
@@ -149,7 +149,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
break;
}
- if (old_state->fb) {
+ if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -213,8 +213,12 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
ipu_dp_enable_channel(ipu_plane->dp);
}
-static void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static int ipu_disable_plane(struct drm_plane *plane)
{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
if (ipu_plane->dp)
@@ -223,15 +227,6 @@ static void ipu_plane_disable(struct ipu_plane *ipu_plane)
ipu_dmfc_disable_channel(ipu_plane->dmfc);
if (ipu_plane->dp)
ipu_dp_disable(ipu_plane->ipu);
-}
-
-static int ipu_disable_plane(struct drm_plane *plane)
-{
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- ipu_plane_disable(ipu_plane);
return 0;
}
@@ -242,7 +237,6 @@ static void ipu_plane_destroy(struct drm_plane *plane)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- ipu_disable_plane(plane);
drm_plane_cleanup(plane);
kfree(ipu_plane);
}
@@ -265,6 +259,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+ int hsub, vsub;
/* Ok to disable */
if (!fb)
@@ -320,8 +315,10 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
/*
* We support resizing active plane or changing its format by
- * forcing CRTC mode change and disabling-enabling plane in plane's
- * ->atomic_update callback.
+ * forcing CRTC mode change in plane's ->atomic_check callback
+ * and disabling all affected active planes in CRTC's ->atomic_disable
+ * callback. The planes will be reenabled in plane's ->atomic_update
+ * callback.
*/
if (old_fb && (state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
@@ -359,7 +356,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
return -EINVAL;
- if (old_fb) {
+ if (old_fb &&
+ (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
+ old_fb->pixel_format == DRM_FORMAT_YVU420)) {
old_ubo = drm_plane_state_to_ubo(old_state);
old_vbo = drm_plane_state_to_vbo(old_state);
if (ubo != old_ubo || vbo != old_vbo)
@@ -374,6 +373,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
crtc_state->mode_changed = true;
+
+ /*
+ * The x/y offsets must be even in case of horizontal/vertical
+ * chroma subsampling.
+ */
+ hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+ if (((state->src_x >> 16) & (hsub - 1)) ||
+ ((state->src_y >> 16) & (vsub - 1)))
+ return -EINVAL;
}
return 0;
@@ -395,12 +404,10 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
if (old_state->fb) {
struct drm_crtc_state *crtc_state = state->crtc->state;
- if (!crtc_state->mode_changed) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
+ if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
+ ipu_plane_atomic_set_base(ipu_plane);
return;
}
-
- ipu_disable_plane(plane);
}
switch (ipu_plane->dp_flow) {
@@ -430,6 +437,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
break;
default:
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
}
}
@@ -443,7 +451,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
- ipu_plane_atomic_set_base(ipu_plane, old_state);
+ ipu_plane_atomic_set_base(ipu_plane);
ipu_plane_enable(ipu_plane);
}
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 1dad297b01fd..d796ada2a47a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -33,6 +33,7 @@ struct imx_parallel_display {
void *edid;
int edid_len;
u32 bus_format;
+ u32 bus_flags;
struct drm_display_mode mode;
struct drm_panel *panel;
struct drm_bridge *bridge;
@@ -80,6 +81,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
return -EINVAL;
ret = of_get_drm_display_mode(np, &imxpd->mode,
+ &imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret)
return ret;
@@ -125,11 +127,13 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_display_info *di = &conn_state->connector->display_info;
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_crtc_state->bus_flags = di->bus_flags;
- if (!imxpd->bus_format && di->num_bus_formats)
+ if (!imxpd->bus_format && di->num_bus_formats) {
+ imx_crtc_state->bus_flags = di->bus_flags;
imx_crtc_state->bus_format = di->bus_formats[0];
- else
+ } else {
+ imx_crtc_state->bus_flags = imxpd->bus_flags;
imx_crtc_state->bus_format = imxpd->bus_format;
+ }
imx_crtc_state->di_hsync_pin = 2;
imx_crtc_state->di_vsync_pin = 3;
@@ -289,8 +293,10 @@ static void imx_pd_unbind(struct device *dev, struct device *master,
{
struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
- imxpd->encoder.funcs->destroy(&imxpd->encoder);
- imxpd->connector.funcs->destroy(&imxpd->connector);
+ if (imxpd->bridge)
+ drm_bridge_detach(imxpd->bridge);
+ if (imxpd->panel)
+ drm_panel_detach(imxpd->panel);
kfree(imxpd->edid);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 8f62671fcfbf..f75c5b5a536c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
ddp_comp);
priv->crtc = crtc;
+ writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
@@ -103,7 +104,8 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
}
static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh)
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
{
if (w != 0 && h != 0)
writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 5fb80cbe4c5b..0df05f95b916 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -106,7 +106,8 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
}
static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
- unsigned int height, unsigned int vrefresh)
+ unsigned int height, unsigned int vrefresh,
+ unsigned int bpc)
{
unsigned int threshold;
unsigned int reg;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0186e500d2a5..90fb831ef031 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
unsigned long pll_rate;
unsigned int factor;
+ /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
pix_rate = 1000UL * mode->clock;
- if (mode->clock <= 74000)
+ if (mode->clock <= 27000)
+ factor = 16 * 3;
+ else if (mode->clock <= 84000)
factor = 8 * 3;
- else
+ else if (mode->clock <= 167000)
factor = 4 * 3;
+ else
+ factor = 2 * 3;
pll_rate = pix_rate * factor;
dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 24aa3bad1e76..01a21dd835b5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -31,7 +31,7 @@
* struct mtk_drm_crtc - MediaTek specific crtc structure.
* @base: crtc object.
* @enabled: records whether crtc_enable succeeded
- * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane
+ * @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
* @config_regs: memory mapped mmsys configuration register space
* @mutex: handle to one of the ten disp_mutex streams
@@ -45,7 +45,7 @@ struct mtk_drm_crtc {
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
- struct mtk_drm_plane planes[OVL_LAYER_NR];
+ struct drm_plane planes[OVL_LAYER_NR];
bool pending_planes;
void __iomem *config_regs;
@@ -112,8 +112,7 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
struct mtk_crtc_state *state;
if (crtc->state) {
- if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
state = to_mtk_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
@@ -222,7 +221,9 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
- unsigned int width, height, vrefresh;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
int ret;
int i;
@@ -234,6 +235,19 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
height = crtc->state->adjusted_mode.vdisplay;
vrefresh = crtc->state->adjusted_mode.vrefresh;
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ drm_for_each_connector(connector, crtc->dev) {
+ if (connector->encoder != encoder)
+ continue;
+ if (connector->display_info.bpc != 0 &&
+ bpc > connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ }
+
ret = pm_runtime_get_sync(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
@@ -266,13 +280,13 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
- mtk_ddp_comp_config(comp, width, height, vrefresh);
+ mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
mtk_ddp_comp_start(comp);
}
/* Initially configure all planes */
for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
@@ -351,7 +365,7 @@ static void mtk_drm_crtc_disable(struct drm_crtc *crtc)
/* Set all pending plane state to disabled */
for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
@@ -397,7 +411,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
@@ -409,6 +423,9 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
if (pending_planes)
mtk_crtc->pending_planes = true;
+ if (crtc->state->color_mgmt_changed)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -418,6 +435,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
.reset = mtk_drm_crtc_reset,
.atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
.atomic_destroy_state = mtk_drm_crtc_destroy_state,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
@@ -464,14 +482,14 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
if (state->pending_config) {
mtk_ddp_comp_config(ovl, state->pending_width,
state->pending_height,
- state->pending_vrefresh);
+ state->pending_vrefresh, 0);
state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
@@ -559,16 +577,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
DRM_PLANE_TYPE_OVERLAY;
ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
- BIT(pipe), type, zpos);
+ BIT(pipe), type);
if (ret)
goto unprepare;
}
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base,
- &mtk_crtc->planes[1].base, pipe);
+ ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
+ &mtk_crtc->planes[1], pipe);
if (ret < 0)
goto unprepare;
-
+ drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
priv->crtc[pipe] = &mtk_crtc->base;
priv->num_pipes++;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 81e5566ec82f..a1550fa3c9d2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,10 +19,12 @@
#include "mtk_drm_plane.h"
#define OVL_LAYER_NR 4
+#define MTK_LUT_SIZE 512
+#define MTK_MAX_BPC 10
+#define MTK_MIN_BPC 3
int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe);
void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe);
-void mtk_drm_crtc_check_flush(struct drm_crtc *crtc);
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 3970fcf0f05f..df33b3ca6ffd 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -24,12 +24,17 @@
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_crtc.h"
#define DISP_OD_EN 0x0000
#define DISP_OD_INTEN 0x0008
#define DISP_OD_INTSTA 0x000c
#define DISP_OD_CFG 0x0020
#define DISP_OD_SIZE 0x0030
+#define DISP_DITHER_5 0x0114
+#define DISP_DITHER_7 0x011c
+#define DISP_DITHER_15 0x013c
+#define DISP_DITHER_16 0x0140
#define DISP_REG_UFO_START 0x0000
@@ -38,15 +43,69 @@
#define DISP_COLOR_WIDTH 0x0c50
#define DISP_COLOR_HEIGHT 0x0c54
-#define OD_RELAY_MODE BIT(0)
+#define DISP_AAL_EN 0x0000
+#define DISP_AAL_SIZE 0x0030
-#define UFO_BYPASS BIT(2)
+#define DISP_GAMMA_EN 0x0000
+#define DISP_GAMMA_CFG 0x0020
+#define DISP_GAMMA_SIZE 0x0030
+#define DISP_GAMMA_LUT 0x0700
-#define COLOR_BYPASS_ALL BIT(7)
-#define COLOR_SEQ_SEL BIT(13)
+#define LUT_10BIT_MASK 0x03ff
+
+#define COLOR_BYPASS_ALL BIT(7)
+#define COLOR_SEQ_SEL BIT(13)
+
+#define OD_RELAYMODE BIT(0)
+
+#define UFO_BYPASS BIT(2)
+
+#define AAL_EN BIT(0)
+
+#define GAMMA_EN BIT(0)
+#define GAMMA_LUT_EN BIT(1)
+
+#define DISP_DITHERING BIT(2)
+#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16)
+#define DITHER_NEW_BIT_MODE BIT(0)
+#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28)
+#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24)
+#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20)
+#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16)
+#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12)
+#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8)
+#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
+#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+ unsigned int CFG)
+{
+ /* If bpc equal to 0, the dithering function didn't be enabled */
+ if (bpc == 0)
+ return;
+
+ if (bpc >= MTK_MIN_BPC) {
+ writel(0, comp->regs + DISP_DITHER_5);
+ writel(0, comp->regs + DISP_DITHER_7);
+ writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) |
+ DITHER_NEW_BIT_MODE,
+ comp->regs + DISP_DITHER_15);
+ writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) |
+ DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) |
+ DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc),
+ comp->regs + DISP_DITHER_16);
+ writel(DISP_DITHERING, comp->regs + CFG);
+ }
+}
static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh)
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
{
writel(w, comp->regs + DISP_COLOR_WIDTH);
writel(h, comp->regs + DISP_COLOR_HEIGHT);
@@ -60,14 +119,16 @@ static void mtk_color_start(struct mtk_ddp_comp *comp)
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh)
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
{
writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
+ writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+ mtk_dither_set(comp, bpc, DISP_OD_CFG);
}
static void mtk_od_start(struct mtk_ddp_comp *comp)
{
- writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG);
writel(1, comp->regs + DISP_OD_EN);
}
@@ -76,6 +137,78 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
}
+static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_AAL_SIZE);
+}
+
+static void mtk_aal_start(struct mtk_ddp_comp *comp)
+{
+ writel(AAL_EN, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_aal_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
+}
+
+static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE);
+ mtk_dither_set(comp, bpc, DISP_GAMMA_CFG);
+}
+
+static void mtk_gamma_start(struct mtk_ddp_comp *comp)
+{
+ writel(GAMMA_EN, comp->regs + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_GAMMA_EN);
+}
+
+static void mtk_gamma_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ unsigned int i, reg;
+ struct drm_color_lut *lut;
+ void __iomem *lut_base;
+ u32 word;
+
+ if (state->gamma_lut) {
+ reg = readl(comp->regs + DISP_GAMMA_CFG);
+ reg = reg | GAMMA_LUT_EN;
+ writel(reg, comp->regs + DISP_GAMMA_CFG);
+ lut_base = comp->regs + DISP_GAMMA_LUT;
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ for (i = 0; i < MTK_LUT_SIZE; i++) {
+ word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) +
+ (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) +
+ ((lut[i].blue >> 6) & LUT_10BIT_MASK);
+ writel(word, (lut_base + i * 4));
+ }
+ }
+}
+
+static const struct mtk_ddp_comp_funcs ddp_aal = {
+ .gamma_set = mtk_gamma_set,
+ .config = mtk_aal_config,
+ .start = mtk_aal_start,
+ .stop = mtk_aal_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_gamma = {
+ .gamma_set = mtk_gamma_set,
+ .config = mtk_gamma_config,
+ .start = mtk_gamma_start,
+ .stop = mtk_gamma_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_color = {
.config = mtk_color_config,
.start = mtk_color_start,
@@ -112,13 +245,13 @@ struct mtk_ddp_comp_match {
};
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, NULL },
+ [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
[DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
- [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, NULL },
+ [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma },
[DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 6b13ba97094d..22a33ee451c4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -21,6 +21,7 @@ struct device_node;
struct drm_crtc;
struct drm_device;
struct mtk_plane_state;
+struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
@@ -64,7 +65,7 @@ struct mtk_ddp_comp;
struct mtk_ddp_comp_funcs {
void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
- unsigned int h, unsigned int vrefresh);
+ unsigned int h, unsigned int vrefresh, unsigned int bpc);
void (*start)(struct mtk_ddp_comp *comp);
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
@@ -73,6 +74,8 @@ struct mtk_ddp_comp_funcs {
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state);
+ void (*gamma_set)(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state);
};
struct mtk_ddp_comp {
@@ -86,10 +89,10 @@ struct mtk_ddp_comp {
static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
unsigned int w, unsigned int h,
- unsigned int vrefresh)
+ unsigned int vrefresh, unsigned int bpc)
{
if (comp->funcs && comp->funcs->config)
- comp->funcs->config(comp, w, h, vrefresh);
+ comp->funcs->config(comp, w, h, vrefresh, bpc);
}
static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
@@ -139,6 +142,13 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
comp->funcs->layer_config(comp, idx, state);
}
+static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
+ struct drm_crtc_state *state)
+{
+ if (comp->funcs && comp->funcs->gamma_set)
+ comp->funcs->gamma_set(comp, state);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
@@ -146,5 +156,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
const struct mtk_ddp_comp_funcs *funcs);
int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
+void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
+ unsigned int CFG);
#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index eebb7d881c2b..cf83f6507ec8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -61,10 +61,27 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
mtk_atomic_wait_for_fences(state);
+ /*
+ * Mediatek drm supports runtime PM, so plane registers cannot be
+ * written when their crtc is disabled.
+ *
+ * The comment for drm_atomic_helper_commit states:
+ * For drivers supporting runtime PM the recommended sequence is
+ *
+ * drm_atomic_helper_commit_modeset_disables(dev, state);
+ * drm_atomic_helper_commit_modeset_enables(dev, state);
+ * drm_atomic_helper_commit_planes(dev, state,
+ * DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ *
+ * See the kerneldoc entries for these three functions for more details.
+ */
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, false);
drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
drm_atomic_helper_wait_for_vblanks(drm, state);
+
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_state_free(state);
}
@@ -277,8 +294,8 @@ static int mtk_drm_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&mtk_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
drm->dev_private = private;
private->drm = drm;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 3995765a90dc..c461a232cbf5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -30,57 +30,12 @@ static const u32 formats[] = {
DRM_FORMAT_RGB565,
};
-static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable,
- dma_addr_t addr, struct drm_rect *dest)
-{
- struct drm_plane *plane = &mtk_plane->base;
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
- unsigned int pitch, format;
- int x, y;
-
- if (WARN_ON(!plane->state || (enable && !plane->state->fb)))
- return;
-
- if (plane->state->fb) {
- pitch = plane->state->fb->pitches[0];
- format = plane->state->fb->pixel_format;
- } else {
- pitch = 0;
- format = DRM_FORMAT_RGBA8888;
- }
-
- x = plane->state->crtc_x;
- y = plane->state->crtc_y;
-
- if (x < 0) {
- addr -= x * 4;
- x = 0;
- }
-
- if (y < 0) {
- addr -= y * pitch;
- y = 0;
- }
-
- state->pending.enable = enable;
- state->pending.pitch = pitch;
- state->pending.format = format;
- state->pending.addr = addr;
- state->pending.x = x;
- state->pending.y = y;
- state->pending.width = dest->x2 - dest->x1;
- state->pending.height = dest->y2 - dest->y1;
- wmb(); /* Make sure the above parameters are set before update */
- state->pending.dirty = true;
-}
-
static void mtk_plane_reset(struct drm_plane *plane)
{
struct mtk_plane_state *state;
if (plane->state) {
- if (plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ __drm_atomic_helper_plane_destroy_state(plane->state);
state = to_mtk_plane_state(plane->state);
memset(state, 0, sizeof(*state));
@@ -134,20 +89,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
- bool visible;
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
- struct drm_rect src = {
- /* 16.16 fixed point */
- .x1 = state->src_x,
- .y1 = state->src_y,
- .x2 = state->src_x + state->src_w,
- .y2 = state->src_y + state->src_h,
- };
struct drm_rect clip = { 0, };
if (!fb)
@@ -168,40 +109,45 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
clip.x2 = crtc_state->mode.hdisplay;
clip.y2 = crtc_state->mode.vdisplay;
- return drm_plane_helper_check_update(plane, state->crtc, fb,
- &src, &dest, &clip,
- state->rotation,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true, &visible);
+ return drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
- struct drm_crtc *crtc = state->base.crtc;
+ struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_framebuffer *fb = plane->state->fb;
struct drm_gem_object *gem;
struct mtk_drm_gem_obj *mtk_gem;
- struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane);
- struct drm_rect dest = {
- .x1 = state->base.crtc_x,
- .y1 = state->base.crtc_y,
- .x2 = state->base.crtc_x + state->base.crtc_w,
- .y2 = state->base.crtc_y + state->base.crtc_h,
- };
- struct drm_rect clip = { 0, };
+ unsigned int pitch, format;
+ dma_addr_t addr;
- if (!crtc)
+ if (!crtc || WARN_ON(!fb))
return;
- clip.x2 = state->base.crtc->state->mode.hdisplay;
- clip.y2 = state->base.crtc->state->mode.vdisplay;
- drm_rect_intersect(&dest, &clip);
-
- gem = mtk_fb_get_gem_obj(state->base.fb);
+ gem = mtk_fb_get_gem_obj(fb);
mtk_gem = to_mtk_gem_obj(gem);
- mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest);
+ addr = mtk_gem->dma_addr;
+ pitch = fb->pitches[0];
+ format = fb->pixel_format;
+
+ addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0);
+ addr += (plane->state->src.y1 >> 16) * pitch;
+
+ state->pending.enable = true;
+ state->pending.pitch = pitch;
+ state->pending.format = format;
+ state->pending.addr = addr;
+ state->pending.x = plane->state->dst.x1;
+ state->pending.y = plane->state->dst.y1;
+ state->pending.width = drm_rect_width(&plane->state->dst);
+ state->pending.height = drm_rect_height(&plane->state->dst);
+ wmb(); /* Make sure the above parameters are set before update */
+ state->pending.dirty = true;
}
static void mtk_plane_atomic_disable(struct drm_plane *plane,
@@ -220,13 +166,12 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.atomic_disable = mtk_plane_atomic_disable,
};
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
- unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int zpos)
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs, enum drm_plane_type type)
{
int err;
- err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs,
+ err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
ARRAY_SIZE(formats), type, NULL);
if (err) {
@@ -234,8 +179,7 @@ int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
return err;
}
- drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs);
- mtk_plane->idx = zpos;
+ drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 72a7b3e4c126..6a20b49e0f2f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -18,11 +18,6 @@
#include <drm/drm_crtc.h>
#include <linux/types.h>
-struct mtk_drm_plane {
- struct drm_plane base;
- unsigned int idx;
-};
-
struct mtk_plane_pending_state {
bool config;
bool enable;
@@ -41,19 +36,13 @@ struct mtk_plane_state {
struct mtk_plane_pending_state pending;
};
-static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane)
-{
- return container_of(plane, struct mtk_drm_plane, base);
-}
-
static inline struct mtk_plane_state *
to_mtk_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct mtk_plane_state, base);
}
-int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
- unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int zpos);
+int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs, enum drm_plane_type type);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 334562d06731..0e8c4d9af340 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1086,20 +1086,20 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
return 0;
}
-void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
+static void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_aud_enable_packet(hdmi, true);
hdmi->audio_enable = true;
}
-void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
+static void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
{
mtk_hdmi_aud_enable_packet(hdmi, false);
hdmi->audio_enable = false;
}
-int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
- struct hdmi_audio_param *param)
+static int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
+ struct hdmi_audio_param *param)
{
if (!hdmi->audio_enable) {
dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
phy_power_on(hdmi->phy);
mtk_hdmi_aud_output_config(hdmi, mode);
- mtk_hdmi_setup_audio_infoframe(hdmi);
- mtk_hdmi_setup_avi_infoframe(hdmi, mode);
- mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
- if (mode->flags & DRM_MODE_FLAG_3D_MASK)
- mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
-
mtk_hdmi_hw_vid_black(hdmi, false);
mtk_hdmi_hw_aud_unmute(hdmi);
mtk_hdmi_hw_send_av_unmute(hdmi);
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi->powered = true;
}
+static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_setup_audio_infoframe(hdmi);
+ mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+ mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+}
+
static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
phy_power_on(hdmi->phy);
+ mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
hdmi->enabled = true;
}
@@ -1624,7 +1629,8 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
mtk_hdmi_audio_disable(hdmi);
}
-int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int
+mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 8a24754b440f..51cb9cfb6646 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned int pre_div;
unsigned int div;
+ unsigned int pre_ibias;
+ unsigned int hdmi_ibias;
+ unsigned int imp_en;
dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
rate, parent_rate);
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
(0x1 << PLL_BR_SHIFT),
RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
RG_HDMITX_PLL_BR);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+ if (rate < 165000000) {
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x3;
+ imp_en = 0x0;
+ hdmi_ibias = hdmi_phy->ibias;
+ } else {
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_PRD_IMP_EN);
+ pre_ibias = 0x6;
+ imp_en = 0xf;
+ hdmi_ibias = hdmi_phy->ibias_up;
+ }
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
- (0x3 << PRD_IBIAS_CLK_SHIFT) |
- (0x3 << PRD_IBIAS_D2_SHIFT) |
- (0x3 << PRD_IBIAS_D1_SHIFT) |
- (0x3 << PRD_IBIAS_D0_SHIFT),
+ (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D2_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D1_SHIFT) |
+ (pre_ibias << PRD_IBIAS_D0_SHIFT),
RG_HDMITX_PRD_IBIAS_CLK |
RG_HDMITX_PRD_IBIAS_D2 |
RG_HDMITX_PRD_IBIAS_D1 |
RG_HDMITX_PRD_IBIAS_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
- (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+ (imp_en << DRV_IMP_EN_SHIFT),
+ RG_HDMITX_DRV_IMP_EN);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
(hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
(hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
- (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
- (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
- RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
- RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+ (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
+ (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
+ RG_HDMITX_DRV_IBIAS_CLK |
+ RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 |
+ RG_HDMITX_DRV_IBIAS_D0);
return 0;
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5e2f131a6a72..25b2a1a424e6 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -58,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 2b4b125eebc3..1443b3a34775 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -56,7 +56,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
kfree(ap);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index d9b04b008feb..88dd2214114d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -15,8 +15,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <linux/fb.h>
-
#include "mgag200_drv.h"
static void mga_dirty_update(struct mga_fbdev *mfbdev,
@@ -185,8 +183,10 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
}
sysram = vmalloc(size);
- if (!sysram)
+ if (!sysram) {
+ ret = -ENOMEM;
goto err_sysram;
+ }
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 13798b3e6beb..e79cbc25ae3c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -135,7 +135,7 @@ static int mga_vram_init(struct mga_device *mdev)
aper->ranges[0].base = mdev->mc.vram_base;
aper->ranges[0].size = mdev->mc.vram_window;
- remove_conflicting_framebuffers(aper, "mgafb", true);
+ drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
kfree(aper);
if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 68268e55d595..dcf7d11ac380 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -150,7 +150,8 @@ static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *f
{
struct mgag200_bo *mgabo = mgag200_bo(bo);
- return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&mgabo->gem.vma_node,
+ filp->private_data);
}
static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -265,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -273,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7c7a0314a756..d96b2b6898a3 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -11,6 +11,7 @@ config DRM_MSM
select TMPFS
select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC
+ select SYNC_FILE
default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f05ed0e1f3d6..6f240021705b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -139,6 +139,7 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
+ struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
wmb(); /* make sure dsi controller enabled again */
}
+static void dsi_hpd_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, hpd_work);
+
+ drm_helper_hpd_irq_event(msm_host->dev);
+}
+
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id);
if (msm_host->dev)
- drm_helper_hpd_irq_event(msm_host->dev);
+ queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41..26e3a01a99c2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
.parent_names = (const char *[]){ "xo" },
.num_parents = 1,
.name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb002..49008451085b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" },
.num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco,
};
struct device *dev = &pll_28nm->pdev->dev;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 973720792236..a968cad509c2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -422,12 +422,29 @@ static const struct {
static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
{
- int gpio = of_get_named_gpio(of_node, name, 0);
+ int gpio;
+
+ /* try with the gpio names as in the table (downstream bindings) */
+ gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
char name2[32];
- snprintf(name2, sizeof(name2), "%s-gpio", name);
+
+ /* try with the gpio names as in the upstream bindings */
+ snprintf(name2, sizeof(name2), "%s-gpios", name);
gpio = of_get_named_gpio(of_node, name2, 0);
if (gpio < 0) {
+ char name3[32];
+
+ /*
+ * try again after stripping out the "qcom,hdmi-tx"
+ * prefix. This is mainly to match "hpd-gpios" used
+ * in the upstream bindings
+ */
+ if (sscanf(name2, "qcom,hdmi-tx-%s", name3))
+ gpio = of_get_named_gpio(of_node, name3, 0);
+ }
+
+ if (gpio < 0) {
DBG("failed to get gpio: %s (%d)", name, gpio);
gpio = -1;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index de9007e72f4e..73e20219d431 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -243,7 +243,6 @@ void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
{
- struct drm_device *dev = hdmi->dev;
struct hdmi_i2c_adapter *hdmi_i2c;
struct i2c_adapter *i2c = NULL;
int ret;
@@ -267,10 +266,8 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
i2c->algo = &msm_hdmi_i2c_algorithm;
ret = i2c_add_adapter(i2c);
- if (ret) {
- dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+ if (ret)
goto fail;
- }
return i2c;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index aa94a553794f..143eab46ba68 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8996_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 92da69aa6187..99590758c68b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_pll_ops,
.parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
};
int msm_hdmi_pll_8960_init(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 7b39e89fbc2b..571a91ee9607 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,18 +228,21 @@ static struct device_node *mdp4_detect_lcdc_panel(struct drm_device *dev)
struct device_node *endpoint, *panel_node;
struct device_node *np = dev->dev->of_node;
- endpoint = of_graph_get_next_endpoint(np, NULL);
+ /*
+ * LVDS/LCDC is the first port described in the list of ports in the
+ * MDP4 DT node.
+ */
+ endpoint = of_graph_get_endpoint_by_regs(np, 0, -1);
if (!endpoint) {
- DBG("no endpoint in MDP4 to fetch LVDS panel\n");
+ DBG("no LVDS remote endpoint\n");
return NULL;
}
- /* don't proceed if we have an endpoint but no panel_node tied to it */
panel_node = of_graph_get_remote_port_parent(endpoint);
if (!panel_node) {
- dev_err(dev->dev, "no valid panel node\n");
+ DBG("no valid panel node in LVDS endpoint\n");
of_node_put(endpoint);
- return ERR_PTR(-ENODEV);
+ return NULL;
}
of_node_put(endpoint);
@@ -262,14 +265,12 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
switch (intf_type) {
case DRM_MODE_ENCODER_LVDS:
/*
- * bail out early if:
- * - there is no panel node (no need to initialize lcdc
- * encoder and lvds connector), or
- * - panel node is a bad pointer
+ * bail out early if there is no panel node (no need to
+ * initialize LCDC encoder and LVDS connector)
*/
panel_node = mdp4_detect_lcdc_panel(dev);
- if (IS_ERR_OR_NULL(panel_node))
- return PTR_ERR(panel_node);
+ if (!panel_node)
+ return 0;
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index bc3d8e719c6c..a06b064f86c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -93,7 +93,7 @@ static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
};
/* this should probably be a helper: */
-struct drm_connector *get_connector(struct drm_encoder *encoder)
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 9f96dfe67769..3903dbcda763 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -81,7 +81,7 @@ static void mdp4_plane_install_properties(struct drm_plane *plane,
// XXX
}
-int mdp4_plane_set_property(struct drm_plane *plane,
+static int mdp4_plane_set_property(struct drm_plane *plane,
struct drm_property *property, uint64_t val)
{
// XXX
@@ -99,7 +99,7 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
};
static int mdp4_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
+ struct drm_plane_state *new_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
@@ -113,7 +113,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
+ struct drm_plane_state *old_state)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
struct mdp4_kms *mdp4_kms = get_kms(plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ac9e4cde1380..8b4e3004f451 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
- MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
- .nb_stages = 5,
+ .nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index fa2be7ce9468..c205c360e16d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
plane_cnt++;
}
- /*
- * If there is no base layer, enable border color.
- * Although it's not possbile in current blend logic,
- * put it here as a reminder.
- */
- if (!pstates[STAGE_BASE] && plane_cnt) {
+ if (!pstates[STAGE_BASE]) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled");
}
@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
return pa->state->zpos - pb->state->zpos;
}
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
- int cnt = 0, i;
+ int cnt = 0, base = 0, i;
DBG("%s: check", mdp5_crtc->name);
- /* verify that there are not too many planes attached to crtc
- * and that we don't have conflicting mixer stages:
- */
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
- if (cnt >= (hw_cfg->lm.nb_stages)) {
- dev_err(dev->dev, "too many planes!\n");
- return -EINVAL;
- }
-
-
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
/* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
+ base++;
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ if ((cnt + base) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes!\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < cnt; i++) {
- pstates[i].state->stage = STAGE_BASE + i;
+ pstates[i].state->stage = STAGE_BASE + i + base;
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
pipe2name(mdp5_plane_pipe(pstates[i].plane)),
pstates[i].state->stage);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 432c09836b0e..83bf997dda03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -78,12 +78,12 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
- BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+ DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y);
if (dev->mode_config.rotation_property)
drm_object_attach_property(&plane->base,
dev->mode_config.rotation_property,
- 0);
+ DRM_ROTATE_0);
}
/* helper to install properties which are common to planes and crtcs */
@@ -250,7 +250,7 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
};
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
+ struct drm_plane_state *new_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -264,7 +264,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
+ struct drm_plane_state *old_state)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct mdp5_kms *mdp5_kms = get_kms(plane);
@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) &&
!pipe_supports_yuv(mdp5_plane->caps)) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support YUV\n");
+ DBG("Pipe doesn't support YUV\n");
return -EINVAL;
}
@@ -301,20 +300,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
(((state->src_w >> 16) != state->crtc_w) ||
((state->src_h >> 16) != state->crtc_h))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+ DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
state->src_w >> 16, state->src_h >> 16,
state->crtc_w, state->crtc_h);
return -EINVAL;
}
- hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
- vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+ hflip = !!(state->rotation & DRM_REFLECT_X);
+ vflip = !!(state->rotation & DRM_REFLECT_Y);
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
- dev_err(plane->dev->dev,
- "Pipe doesn't support flip\n");
+ DBG("Pipe doesn't support flip\n");
return -EINVAL;
}
@@ -743,8 +740,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
config |= get_scale_config(format, src_h, crtc_h, false);
DBG("scale config = %x", config);
- hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
- vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
+ hflip = !!(pstate->rotation & DRM_REFLECT_X);
+ vflip = !!(pstate->rotation & DRM_REFLECT_Y);
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4a8a6f1f1151..73bae382eac3 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -112,13 +112,13 @@ static void complete_commit(struct msm_commit *c, bool async)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
- drm_atomic_helper_wait_for_fences(dev, state);
+ drm_atomic_helper_wait_for_fences(dev, state, false);
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, false);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8a0237008f74..46568fc80848 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -26,9 +26,10 @@
* MSM driver version:
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ * - 1.2.0 - adds explicit fence support for submit ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 1
+#define MSM_VERSION_MINOR 2
#define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -227,7 +228,7 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms)
+ if (kms && kms->funcs)
kms->funcs->destroy(kms);
if (gpu) {
@@ -347,9 +348,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
int ret;
ddev = drm_dev_alloc(drv, dev);
- if (!ddev) {
+ if (IS_ERR(ddev)) {
dev_err(dev, "failed to allocate drm_device\n");
- return -ENOMEM;
+ return PTR_ERR(ddev);
}
platform_set_drvdata(pdev, ddev);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 85f3047e05ae..b6ac27e31929 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -593,18 +593,16 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
bool write = !!(op & MSM_PREP_WRITE);
-
- if (op & MSM_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
- return -EBUSY;
- } else {
- int ret;
-
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
- true, timeout_to_jiffies(timeout));
- if (ret <= 0)
- return ret == 0 ? -ETIMEDOUT : ret;
- }
+ unsigned long remain =
+ op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
+ long ret;
+
+ ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ true, remain);
+ if (ret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
/* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 283d2841ba58..192b2d3a79cb 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+
+ if (priv->shrinker.nr_deferred) {
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 880d6a9af7c8..b6a0f37a65f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/sync_file.h>
+
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
@@ -378,6 +380,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
+ struct fence *in_fence = NULL;
+ struct sync_file *sync_file = NULL;
+ int out_fence_fd = -1;
unsigned i;
int ret;
@@ -387,13 +392,23 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_unlock;
+ }
+ }
priv->struct_mutex_task = current;
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
@@ -410,9 +425,32 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = submit_fence_sync(submit);
- if (ret)
- goto out;
+ if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* TODO if we get an array-fence due to userspace merging multiple
+ * fences, we need a way to determine if all the backing fences
+ * are from our own context..
+ */
+
+ if (in_fence->context != gpu->fctx->context) {
+ ret = fence_wait(in_fence, true);
+ if (ret)
+ goto out;
+ }
+
+ }
+
+ if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto out;
+ }
ret = submit_pin_objects(submit);
if (ret)
@@ -478,15 +516,39 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- ret = msm_gpu_submit(gpu, submit, ctx);
+ submit->fence = msm_fence_alloc(gpu->fctx);
+ if (IS_ERR(submit->fence)) {
+ ret = PTR_ERR(submit->fence);
+ submit->fence = NULL;
+ goto out;
+ }
+
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ sync_file = sync_file_create(submit->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ msm_gpu_submit(gpu, submit, ctx);
args->fence = submit->fence->seqno;
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+
out:
+ if (in_fence)
+ fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
out_unlock:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 36ed53e661fe..5bb09838b5ae 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -509,22 +509,15 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- int i, ret;
+ int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = msm_fence_alloc(gpu->fctx);
- if (IS_ERR(submit->fence)) {
- ret = PTR_ERR(submit->fence);
- submit->fence = NULL;
- return ret;
- }
-
inactive_cancel(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
@@ -557,8 +550,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
priv->lastctx = ctx;
hangcheck_timer_reset(gpu);
-
- return 0;
}
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c9022837a1a4..d61d98a6e047 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -163,7 +163,7 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 17fe4e53e0d1..1627294575cb 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -229,8 +229,8 @@ int msm_perf_debugfs_init(struct drm_minor *minor)
perf->ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
minor->debugfs_root, perf, &perf_debugfs_fops);
if (!perf->ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/perf\n",
- minor->debugfs_root->d_name.name);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n",
+ minor->debugfs_root);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3a5fdfcd67ae..8487f461f05f 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -243,8 +243,8 @@ int msm_rd_debugfs_init(struct drm_minor *minor)
rd->ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops);
if (!rd->ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/rd\n",
- minor->debugfs_root->d_name.name);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n",
+ minor->debugfs_root);
goto fail;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e074..193573d191e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_adev)
return false;
- return acpi_has_method(parent_adev->handle, "_PR3");
+ return parent_adev->power.flags.power_resources &&
+ acpi_has_method(parent_adev->handle, "_PR3");
}
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 864323b19cf7..343b8659472c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1152,7 +1152,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1180,7 +1180,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
@@ -1298,7 +1298,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1316,7 +1316,8 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
+ return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+ filp->private_data);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 66c1280c0f1f..3100fd88a015 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -351,7 +351,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
if (nouveau_modeset != 2)
- remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
@@ -1067,8 +1067,8 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
drm = drm_dev_alloc(&driver_platform, &pdev->dev);
- if (!drm) {
- err = -ENOMEM;
+ if (IS_ERR(drm)) {
+ err = PTR_ERR(drm);
goto err_free;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index d1f248fd3506..9f5692726c16 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -32,7 +32,6 @@
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 0eae8afaed90..b1f3b818edf4 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -13,7 +13,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index fc4c238c9583..9f3d6f48f3e1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 157c512205d1..3557a4c7dd7b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -28,7 +28,6 @@
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/backlight.h>
-#include <linux/fb.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index e256d879b25c..dfd4e9621e3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -125,16 +125,15 @@ u32 dss_of_port_get_port_number(struct device_node *port)
static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
{
- struct device_node *np, *np_parent;
+ struct device_node *np;
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
- np_parent = of_get_next_parent(np);
- of_node_put(np);
+ np = of_get_next_parent(np);
- return np_parent;
+ return np;
}
struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 26c6134eb744..e1cfba51cff6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -96,7 +96,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_get();
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, false);
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
omap_atomic_wait_for_completion(dev, old_state);
@@ -295,9 +295,9 @@ static int omap_modeset_init_properties(struct drm_device *dev)
if (priv->has_dmm) {
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) | BIT(DRM_ROTATE_90) |
- BIT(DRM_ROTATE_180) | BIT(DRM_ROTATE_270) |
- BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+ DRM_ROTATE_0 | DRM_ROTATE_90 |
+ DRM_ROTATE_180 | DRM_ROTATE_270 |
+ DRM_REFLECT_X | DRM_REFLECT_Y);
if (!dev->mode_config.rotation_property)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 31f5178c22c7..5f3337f1e9aa 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -179,24 +179,24 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
(uint32_t)win->rotation);
/* fallthru to default to no rotation */
case 0:
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
orient = 0;
break;
- case BIT(DRM_ROTATE_90):
+ case DRM_ROTATE_90:
orient = MASK_XY_FLIP | MASK_X_INVERT;
break;
- case BIT(DRM_ROTATE_180):
+ case DRM_ROTATE_180:
orient = MASK_X_INVERT | MASK_Y_INVERT;
break;
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_270:
orient = MASK_XY_FLIP | MASK_Y_INVERT;
break;
}
- if (win->rotation & BIT(DRM_REFLECT_X))
+ if (win->rotation & DRM_REFLECT_X)
orient ^= MASK_X_INVERT;
- if (win->rotation & BIT(DRM_REFLECT_Y))
+ if (win->rotation & DRM_REFLECT_Y)
orient ^= MASK_Y_INVERT;
/* adjust x,y offset for flip/invert: */
@@ -213,7 +213,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
} else {
switch (win->rotation & DRM_ROTATE_MASK) {
case 0:
- case BIT(DRM_ROTATE_0):
+ case DRM_ROTATE_0:
/* OK */
break;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 5252ab720e70..66ac8c40db26 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -60,7 +60,7 @@ to_omap_plane_state(struct drm_plane_state *state)
}
static int omap_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
+ struct drm_plane_state *new_state)
{
if (!new_state->fb)
return 0;
@@ -69,7 +69,7 @@ static int omap_plane_prepare_fb(struct drm_plane *plane,
}
static void omap_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
+ struct drm_plane_state *old_state)
{
if (old_state->fb)
omap_framebuffer_unpin(old_state->fb);
@@ -109,8 +109,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_y = state->src_y >> 16;
switch (state->rotation & DRM_ROTATE_MASK) {
- case BIT(DRM_ROTATE_90):
- case BIT(DRM_ROTATE_270):
+ case DRM_ROTATE_90:
+ case DRM_ROTATE_270:
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
break;
@@ -149,7 +149,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
struct omap_plane_state *omap_state = to_omap_plane_state(plane->state);
struct omap_plane *omap_plane = to_omap_plane(plane);
- plane->state->rotation = BIT(DRM_ROTATE_0);
+ plane->state->rotation = DRM_ROTATE_0;
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
@@ -178,7 +178,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (state->fb) {
- if (state->rotation != BIT(DRM_ROTATE_0) &&
+ if (state->rotation != DRM_ROTATE_0 &&
!omap_framebuffer_supports_rotation(state->fb))
return -EINVAL;
}
@@ -269,7 +269,7 @@ static void omap_plane_reset(struct drm_plane *plane)
*/
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- omap_state->base.rotation = BIT(DRM_ROTATE_0);
+ omap_state->base.rotation = DRM_ROTATE_0;
plane->state = &omap_state->base;
plane->state->plane = plane;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 1500ab99f548..62aba976e744 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,17 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_JDI_LT070ME05000
+ tristate "JDI LT070ME05000 WUXGA DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI DSI video mode
+ panel as found in Google Nexus 7 (2013) devices.
+ The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses
+ 24 bit per pixel.
+
config DRM_PANEL_SAMSUNG_LD9040
tristate "Samsung LD9040 RGB/SPI panel"
depends on OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f277eed933d6..a5c7ec0236e0 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
new file mode 100644
index 000000000000..5b2340ef74ed
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) 2016 InforceComputing
+ * Author: Vinay Simha BN <simhavcs@gmail.com>
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * From internet archives, the panel for Nexus 7 2nd Gen, 2013 model is a
+ * JDI model LT070ME05000, and its data sheet is at:
+ * http://panelone.net/en/7-0-inch/JDI_LT070ME05000_7.0_inch-datasheet
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+ "vddp",
+ "iovcc"
+};
+
+struct jdi_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *dcdc_en_gpio;
+ struct backlight_device *backlight;
+
+ bool prepared;
+ bool enabled;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct jdi_panel *to_jdi_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct jdi_panel, base);
+}
+
+static int jdi_panel_init(struct jdi_panel *jdi)
+{
+ struct mipi_dsi_device *dsi = jdi->dsi;
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10000, 20000);
+
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ if (ret < 0) {
+ dev_err(dev, "failed to set pixel format: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0, jdi->mode->hdisplay - 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to set column address: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, jdi->mode->vdisplay - 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to set page address: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * BIT(5) BCTRL = 1 Backlight Control Block On, Brightness registers
+ * are active
+ * BIT(3) BL = 1 Backlight Control On
+ * BIT(2) DD = 0 Display Dimming is Off
+ */
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ (u8[]){ 0x24 }, 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to write control display: %d\n", ret);
+ return ret;
+ }
+
+ /* CABC off */
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE,
+ (u8[]){ 0x00 }, 1);
+ if (ret < 0) {
+ dev_err(dev, "failed to set cabc off: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to set exit sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ msleep(120);
+
+ ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x00}, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to set mcap: %d\n", ret);
+ return ret;
+ }
+
+ mdelay(10);
+
+ /* Interface setting, video mode */
+ ret = mipi_dsi_generic_write(dsi, (u8[])
+ {0xB3, 0x26, 0x08, 0x00, 0x20, 0x00}, 6);
+ if (ret < 0) {
+ dev_err(dev, "failed to set display interface setting: %d\n"
+ , ret);
+ return ret;
+ }
+
+ mdelay(20);
+
+ ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x03}, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to set default values for mcap: %d\n"
+ , ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jdi_panel_on(struct jdi_panel *jdi)
+{
+ struct mipi_dsi_device *dsi = jdi->dsi;
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to set display on: %d\n", ret);
+
+ return ret;
+}
+
+static void jdi_panel_off(struct jdi_panel *jdi)
+{
+ struct mipi_dsi_device *dsi = jdi->dsi;
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to set display off: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ dev_err(dev, "failed to enter sleep mode: %d\n", ret);
+
+ msleep(100);
+}
+
+static int jdi_panel_disable(struct drm_panel *panel)
+{
+ struct jdi_panel *jdi = to_jdi_panel(panel);
+
+ if (!jdi->enabled)
+ return 0;
+
+ jdi->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(jdi->backlight);
+
+ jdi->enabled = false;
+
+ return 0;
+}
+
+static int jdi_panel_unprepare(struct drm_panel *panel)
+{
+ struct jdi_panel *jdi = to_jdi_panel(panel);
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+
+ if (!jdi->prepared)
+ return 0;
+
+ jdi_panel_off(jdi);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+ if (ret < 0)
+ dev_err(dev, "regulator disable failed, %d\n", ret);
+
+ gpiod_set_value(jdi->enable_gpio, 0);
+
+ gpiod_set_value(jdi->reset_gpio, 1);
+
+ gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+ jdi->prepared = false;
+
+ return 0;
+}
+
+static int jdi_panel_prepare(struct drm_panel *panel)
+{
+ struct jdi_panel *jdi = to_jdi_panel(panel);
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+
+ if (jdi->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+ if (ret < 0) {
+ dev_err(dev, "regulator enable failed, %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+
+ gpiod_set_value(jdi->dcdc_en_gpio, 1);
+ usleep_range(10, 20);
+
+ gpiod_set_value(jdi->reset_gpio, 0);
+ usleep_range(10, 20);
+
+ gpiod_set_value(jdi->enable_gpio, 1);
+ usleep_range(10, 20);
+
+ ret = jdi_panel_init(jdi);
+ if (ret < 0) {
+ dev_err(dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ ret = jdi_panel_on(jdi);
+ if (ret < 0) {
+ dev_err(dev, "failed to set panel on: %d\n", ret);
+ goto poweroff;
+ }
+
+ jdi->prepared = true;
+
+ return 0;
+
+poweroff:
+ ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies);
+ if (ret < 0)
+ dev_err(dev, "regulator disable failed, %d\n", ret);
+
+ gpiod_set_value(jdi->enable_gpio, 0);
+
+ gpiod_set_value(jdi->reset_gpio, 1);
+
+ gpiod_set_value(jdi->dcdc_en_gpio, 0);
+
+ return ret;
+}
+
+static int jdi_panel_enable(struct drm_panel *panel)
+{
+ struct jdi_panel *jdi = to_jdi_panel(panel);
+
+ if (jdi->enabled)
+ return 0;
+
+ jdi->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(jdi->backlight);
+
+ jdi->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 155493,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 48,
+ .hsync_end = 1200 + 48 + 32,
+ .htotal = 1200 + 48 + 32 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 3,
+ .vsync_end = 1920 + 3 + 5,
+ .vtotal = 1920 + 3 + 5 + 6,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int jdi_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+ struct jdi_panel *jdi = to_jdi_panel(panel);
+ struct device *dev = &jdi->dsi->dev;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 95;
+ panel->connector->display_info.height_mm = 151;
+
+ return 1;
+}
+
+static int dsi_dcs_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ int ret;
+ u16 brightness = bl->props.brightness;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness & 0xff;
+}
+
+static int dsi_dcs_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops dsi_bl_ops = {
+ .update_status = dsi_dcs_bl_update_status,
+ .get_brightness = dsi_dcs_bl_get_brightness,
+};
+
+static struct backlight_device *
+drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = 255;
+ props.max_brightness = 255;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &dsi_bl_ops, &props);
+}
+
+static const struct drm_panel_funcs jdi_panel_funcs = {
+ .disable = jdi_panel_disable,
+ .unprepare = jdi_panel_unprepare,
+ .prepare = jdi_panel_prepare,
+ .enable = jdi_panel_enable,
+ .get_modes = jdi_panel_get_modes,
+};
+
+static const struct of_device_id jdi_of_match[] = {
+ { .compatible = "jdi,lt070me05000", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jdi_of_match);
+
+static int jdi_panel_add(struct jdi_panel *jdi)
+{
+ struct device *dev = &jdi->dsi->dev;
+ int ret;
+ unsigned int i;
+
+ jdi->mode = &default_mode;
+
+ for (i = 0; i < ARRAY_SIZE(jdi->supplies); i++)
+ jdi->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies),
+ jdi->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to init regulator, ret=%d\n", ret);
+ return ret;
+ }
+
+ jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(jdi->enable_gpio)) {
+ ret = PTR_ERR(jdi->enable_gpio);
+ dev_err(dev, "cannot get enable-gpio %d\n", ret);
+ return ret;
+ }
+
+ jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(jdi->reset_gpio)) {
+ ret = PTR_ERR(jdi->reset_gpio);
+ dev_err(dev, "cannot get reset-gpios %d\n", ret);
+ return ret;
+ }
+
+ jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW);
+ if (IS_ERR(jdi->dcdc_en_gpio)) {
+ ret = PTR_ERR(jdi->dcdc_en_gpio);
+ dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret);
+ return ret;
+ }
+
+ jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi);
+ if (IS_ERR(jdi->backlight)) {
+ ret = PTR_ERR(jdi->backlight);
+ dev_err(dev, "failed to register backlight %d\n", ret);
+ return ret;
+ }
+
+ drm_panel_init(&jdi->base);
+ jdi->base.funcs = &jdi_panel_funcs;
+ jdi->base.dev = &jdi->dsi->dev;
+
+ ret = drm_panel_add(&jdi->base);
+
+ return ret;
+}
+
+static void jdi_panel_del(struct jdi_panel *jdi)
+{
+ if (jdi->base.dev)
+ drm_panel_remove(&jdi->base);
+}
+
+static int jdi_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct jdi_panel *jdi;
+ int ret;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL);
+ if (!jdi)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, jdi);
+
+ jdi->dsi = dsi;
+
+ ret = jdi_panel_add(jdi);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int jdi_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = jdi_panel_disable(&jdi->base);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", ret);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_detach(&jdi->base);
+ jdi_panel_del(jdi);
+
+ return 0;
+}
+
+static void jdi_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
+
+ jdi_panel_disable(&jdi->base);
+}
+
+static struct mipi_dsi_driver jdi_panel_driver = {
+ .driver = {
+ .name = "panel-jdi-lt070me05000",
+ .of_match_table = jdi_of_match,
+ },
+ .probe = jdi_panel_probe,
+ .remove = jdi_panel_remove,
+ .shutdown = jdi_panel_shutdown,
+};
+module_mipi_dsi_driver(jdi_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_AUTHOR("Vinay Simha BN <simhavcs@gmail.com>");
+MODULE_DESCRIPTION("JDI LT070ME05000 WUXGA");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 85143d1b9b31..113db3c4a633 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -849,6 +849,34 @@ static const struct panel_desc innolux_at070tn92 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing innolux_g101ice_l01_timing = {
+ .pixelclock = { 60400000, 71100000, 74700000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 41, 80, 100 },
+ .hback_porch = { 40, 79, 99 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 11, 14 },
+ .vback_porch = { 4, 11, 14 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g101ice_l01 = {
+ .timings = &innolux_g101ice_l01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 135,
+ },
+ .delay = {
+ .enable = 200,
+ .disable = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
.hdisplay = 1280,
@@ -1186,7 +1214,7 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.width = 105,
.height = 67,
},
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
/*
@@ -1245,6 +1273,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
static const struct drm_display_mode qd43003c0_40_mode = {
@@ -1384,6 +1413,11 @@ static const struct panel_desc sharp_lq123p1jx31 = {
.width = 259,
.height = 173,
},
+ .delay = {
+ .prepare = 110,
+ .enable = 50,
+ .unprepare = 550,
+ },
};
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
@@ -1430,6 +1464,11 @@ static const struct panel_desc starry_kr122ea0sra = {
.width = 263,
.height = 164,
},
+ .delay = {
+ .prepare = 10 + 200,
+ .enable = 50,
+ .unprepare = 10 + 500,
+ },
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -1575,6 +1614,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,at070tn92",
.data = &innolux_at070tn92,
}, {
+ .compatible ="innolux,g101ice-l01",
+ .data = &innolux_g101ice_l01
+ }, {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 3aef12742a53..a61c0d460ec2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -211,6 +211,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
drm_crtc_cleanup(crtc);
+ qxl_bo_unref(&qxl_crtc->cursor_bo);
kfree(qxl_crtc);
}
@@ -296,6 +297,52 @@ qxl_hide_cursor(struct qxl_device *qdev)
return 0;
}
+static int qxl_crtc_apply_cursor(struct drm_crtc *crtc)
+{
+ struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct qxl_device *qdev = dev->dev_private;
+ struct qxl_cursor_cmd *cmd;
+ struct qxl_release *release;
+ int ret = 0;
+
+ if (!qcrtc->cursor_bo)
+ return 0;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+ QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_list_add(release, qcrtc->cursor_bo);
+ if (ret)
+ goto out_free_release;
+
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_release;
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_SET;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+
+ cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
+
+ cmd->u.set.visible = 1;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ qxl_release_fence_buffer_objects(release);
+
+ return ret;
+
+out_free_release:
+ qxl_release_free(qdev, release);
+ return ret;
+}
+
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -400,7 +447,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
}
drm_gem_object_unreference_unlocked(obj);
- qxl_bo_unref(&cursor_bo);
+ qxl_bo_unref (&qcrtc->cursor_bo);
+ qcrtc->cursor_bo = cursor_bo;
return ret;
@@ -655,6 +703,12 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
bo->surf.stride, bo->surf.format);
qxl_io_create_primary(qdev, 0, bo);
bo->is_primary = true;
+
+ ret = qxl_crtc_apply_cursor(crtc);
+ if (ret) {
+ DRM_ERROR("could not set cursor after modeset");
+ ret = 0;
+ }
}
if (bo->is_primary) {
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index ffe885395145..9b728edf1b49 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -57,11 +57,8 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
static int
alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
{
- int ret;
- ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
- QXL_RELEASE_DRAWABLE, release,
- NULL);
- return ret;
+ return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+ QXL_RELEASE_DRAWABLE, release, NULL);
}
static void
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8e633caa4078..5f3e5ad99de7 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -137,6 +137,7 @@ struct qxl_crtc {
int cur_y;
int hot_spot_x;
int hot_spot_y;
+ struct qxl_bo *cursor_bo;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 28c1423049c5..2cd879a4ae15 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -24,7 +24,6 @@
* David Airlie
*/
#include <linux/module.h>
-#include <linux/fb.h>
#include "drmP.h"
#include "drm/drm.h"
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 5e1d7899dd72..fa5440dc9a19 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -61,7 +61,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
if (domain == QXL_GEM_DOMAIN_VRAM)
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
@@ -151,7 +151,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+ else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
map = qdev->surface_mapping;
else
goto fallback;
@@ -191,7 +191,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+ else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
map = qdev->surface_mapping;
else
goto fallback;
@@ -311,7 +311,7 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
int qxl_surf_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+ return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
}
int qxl_vram_evict(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index f599cd073b72..cd83f050cf3e 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -203,12 +203,9 @@ qxl_release_free(struct qxl_device *qdev,
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
- int ret;
/* pin releases bo's they are too messy to evict */
- ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
- QXL_GEM_DOMAIN_VRAM, NULL,
- bo);
- return ret;
+ return qxl_bo_create(qdev, PAGE_SIZE, false, true,
+ QXL_GEM_DOMAIN_VRAM, NULL, bo);
}
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d50c9679e631..e26c82db948b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -168,7 +168,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- case TTM_PL_PRIV0:
+ case TTM_PL_PRIV:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = 0;
@@ -210,7 +210,8 @@ static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct qxl_bo *qbo = to_qxl_bo(bo);
- return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+ filp->private_data);
}
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -235,7 +236,7 @@ static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
mem->bus.base = qdev->vram_base;
mem->bus.offset = mem->start << PAGE_SHIFT;
break;
- case TTM_PL_PRIV0:
+ case TTM_PL_PRIV:
mem->bus.is_iomem = true;
mem->bus.base = qdev->surfaceram_base;
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -361,8 +362,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
+ new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -376,7 +377,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
qbo = to_qxl_bo(bo);
qdev = qbo->gem_base.dev->dev_private;
- if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+ if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
@@ -422,7 +423,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+ r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
qdev->surfaceram_size / PAGE_SIZE);
if (r) {
DRM_ERROR("Failed initializing Surfaces heap.\n");
@@ -445,7 +446,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
void qxl_ttm_fini(struct qxl_device *qdev)
{
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+ ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
@@ -489,7 +490,7 @@ static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
if (i == 0)
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
else
- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+ qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
}
return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index c57b4de63caf..a982be57d1ef 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -56,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.load = r128_driver_load,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 1dcf39084555..74f99bac08b1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1156,6 +1156,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1259,8 +1260,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
@@ -1435,8 +1437,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1471,6 +1473,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
+ char *format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1560,8 +1563,9 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
bypass_lut = true;
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ format_name = drm_get_format_name(target_fb->pixel_format);
+ DRM_ERROR("Unsupported screen format %s\n", format_name);
+ kfree(format_name);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index cead089a9e7d..432cb46f6a34 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -389,22 +389,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
- for (i = 0; i < 7; i++) {
- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
- DP_DPCD_SIZE);
- if (ret == DP_DPCD_SIZE) {
- memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+ ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+ DP_DPCD_SIZE);
+ if (ret == DP_DPCD_SIZE) {
+ memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
- dig_connector->dpcd);
+ DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+ dig_connector->dpcd);
- radeon_dp_probe_oui(radeon_connector);
+ radeon_dp_probe_oui(radeon_connector);
- return true;
- }
+ return true;
}
+
dig_connector->dpcd[0] = 0;
return false;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0c1b9ff433af..f6ff41a0eed6 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1871,7 +1871,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
- u32 running, blackout = 0, tmp;
+ u32 running, tmp;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
@@ -1912,11 +1912,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
if (running == 0) {
- if (running) {
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1964,9 +1959,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
break;
udelay(1);
}
-
- if (running)
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -4201,11 +4193,7 @@ u32 cik_gfx_get_rptr(struct radeon_device *rdev,
u32 cik_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr;
-
- wptr = RREG32(CP_RB0_WPTR);
-
- return wptr;
+ return RREG32(CP_RB0_WPTR);
}
void cik_gfx_set_wptr(struct radeon_device *rdev,
@@ -8215,7 +8203,7 @@ static void cik_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index cead2284fd79..48db93577c1d 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2069,6 +2069,7 @@
#define UVD_UDEC_ADDR_CONFIG 0xef4c
#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
+#define UVD_NO_OP 0xeffc
#define UVD_LMI_EXT40_ADDR 0xf498
#define UVD_GP_SCRATCH4 0xf4e0
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index db275b7ed34a..0b6b5766216f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2878,9 +2878,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 3) {
+ if ((tmp & 0x7) != 0) {
tmp &= ~0x7;
- tmp |= 0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -5580,7 +5579,7 @@ static void evergreen_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index c8e3d394cde7..f3d88ca2aa8f 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1523,6 +1523,7 @@
#define UVD_UDEC_ADDR_CONFIG 0xef4c
#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
+#define UVD_NO_OP 0xeffc
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
#define UVD_STATUS 0xf6bc
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 4a3d7cab83f7..a0d4a0522fdc 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
@@ -2062,7 +2060,7 @@ static void cayman_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 47eb49b77d32..3c9fec88ea44 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1137,6 +1137,7 @@
#define UVD_UDEC_ADDR_CONFIG 0xEF4C
#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_NO_OP 0xEFFC
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
#define UVD_STATUS 0xf6bc
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f25994b3afa6..f5e84f4b58e6 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1071,11 +1071,7 @@ u32 r100_gfx_get_rptr(struct radeon_device *rdev,
u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr;
-
- wptr = RREG32(RADEON_CP_RB_WPTR);
-
- return wptr;
+ return RREG32(RADEON_CP_RB_WPTR);
}
void r100_gfx_set_wptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9247e7d207fe..a951881c2a50 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2631,11 +2631,7 @@ u32 r600_gfx_get_rptr(struct radeon_device *rdev,
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr;
-
- wptr = RREG32(R600_CP_RB_WPTR);
-
- return wptr;
+ return RREG32(R600_CP_RB_WPTR);
}
void r600_gfx_set_wptr(struct radeon_device *rdev,
@@ -3097,7 +3093,7 @@ static void r600_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 1e8495cca41e..2e00a5287bd2 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1490,6 +1490,7 @@
#define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14
#define UVD_ENGINE_CNTL 0xef18
+#define UVD_NO_OP 0xeffc
#define UVD_SEMA_CNTL 0xf400
#define UVD_RB_ARB_CTRL 0xf480
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5633ee3eb46e..1b0dcad916b0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -742,6 +742,7 @@ struct radeon_flip_work {
struct work_struct unpin_work;
struct radeon_device *rdev;
int crtc_id;
+ u32 target_vblank;
uint64_t base;
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 31c9a92d6a1b..6efbd65c929e 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -32,6 +33,12 @@
#include "radeon_acpi.h"
#include "atom.h"
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atpx_dgpu_req_power_for_displays(void);
+#else
+static inline bool radeon_atpx_dgpu_req_power_for_displays(void) { return false; }
+#endif
+
#define ACPI_AC_CLASS "ac_adapter"
extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
@@ -394,6 +401,16 @@ int radeon_atif_handler(struct radeon_device *rdev,
#endif
}
}
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((rdev->flags & RADEON_IS_PX) &&
+ radeon_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(rdev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(rdev->ddev);
+ pm_runtime_mark_last_busy(rdev->ddev->dev);
+ pm_runtime_put_autosuspend(rdev->ddev->dev);
+ }
+ }
/* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index ddef0d494084..2fdcd04bc93f 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -29,6 +29,7 @@ struct radeon_atpx {
acpi_handle handle;
struct radeon_atpx_functions functions;
bool is_hybrid;
+ bool dgpu_req_power_for_displays;
};
static struct radeon_atpx_priv {
@@ -72,6 +73,10 @@ bool radeon_is_atpx_hybrid(void) {
return radeon_atpx_priv.atpx.is_hybrid;
}
+bool radeon_atpx_dgpu_req_power_for_displays(void) {
+ return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b79f3b002471..27affbde058c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -198,12 +198,12 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
}
/* Any defined maximum tmds clock limit we must not exceed? */
- if (connector->max_tmds_clock > 0) {
+ if (connector->display_info.max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = radeon_connector->pixelclock_for_modeset;
/* Maximum allowable input clock in kHz */
- max_tmds_clock = connector->max_tmds_clock * 1000;
+ max_tmds_clock = connector->display_info.max_tmds_clock;
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
+ drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = false;
+ }
+}
+
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_lvds_set_property,
};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a00dd2f74527..621af069a3d2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
+
+ /* disable PX is the system doesn't support dGPU power control or hybrid gfx */
+ if (!radeon_is_atpx_hybrid() &&
+ !radeon_has_atpx_dgpu_power_cntl())
+ rdev->flags &= ~RADEON_IS_PX;
}
/**
@@ -639,7 +652,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
* Used at driver startup.
* Returns true if virtual or false if not.
*/
-static bool radeon_device_is_virtual(void)
+bool radeon_device_is_virtual(void)
{
#ifdef CONFIG_X86
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
@@ -661,8 +674,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- /* for pass through, always force asic_init */
- if (radeon_device_is_virtual())
+ /* for pass through, always force asic_init for CI */
+ if (rdev->family >= CHIP_BONAIRE &&
+ radeon_device_is_virtual())
return false;
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
@@ -1956,14 +1970,3 @@ static void radeon_debugfs_remove_files(struct radeon_device *rdev)
}
#endif
}
-
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-void radeon_debugfs_cleanup(struct drm_minor *minor)
-{
-}
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c3206fb8f4cf..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -321,16 +321,30 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
update_pending = radeon_page_flip_pending(rdev, crtc_id);
/* Has the pageflip already completed in crtc, or is it certain
- * to complete in this vblank?
+ * to complete in this vblank? GET_DISTANCE_TO_VBLANKSTART provides
+ * distance to start of "fudged earlier" vblank in vpos, distance to
+ * start of real vblank in hpos. vpos >= 0 && hpos < 0 means we are in
+ * the last few scanlines before start of real vblank, where the vblank
+ * irq can fire, so we have sampled update_pending a bit too early and
+ * know the flip will complete at leading edge of the upcoming real
+ * vblank. On pre-AVIVO hardware, flips also complete inside the real
+ * vblank, not only at leading edge, so if update_pending for hpos >= 0
+ * == inside real vblank, the flip will complete almost immediately.
+ * Note that this method of completion handling is still not 100% race
+ * free, as we could execute before the radeon_flip_work_func managed
+ * to run and set the RADEON_FLIP_SUBMITTED status, thereby we no-op,
+ * but the flip still gets programmed into hw and completed during
+ * vblank, leading to a delayed emission of the flip completion event.
+ * This applies at least to pre-AVIVO hardware, where flips are always
+ * completing inside vblank, not only at leading edge of vblank.
*/
if (update_pending &&
- (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
- crtc_id,
- USE_REAL_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
- ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
- (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+ (DRM_SCANOUTPOS_VALID &
+ radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ GET_DISTANCE_TO_VBLANKSTART,
+ &vpos, &hpos, NULL, NULL,
+ &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
+ ((vpos >= 0 && hpos < 0) || (hpos >= 0 && !ASIC_IS_AVIVO(rdev)))) {
/* crtc didn't flip in this target vblank interval,
* but flip is pending in crtc. Based on the current
* scanout position we know that the current frame is
@@ -400,14 +414,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
struct radeon_flip_work *work =
container_of(__work, struct radeon_flip_work, flip_work);
struct radeon_device *rdev = work->rdev;
+ struct drm_device *dev = rdev->ddev;
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay = 0;
- unsigned repcnt = 4;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ int vpos, hpos;
down_read(&rdev->exclusive_lock);
if (work->fence) {
@@ -438,59 +451,28 @@ static void radeon_flip_work_func(struct work_struct *__work)
work->fence = NULL;
}
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip. Always wait on pre DCE4 to avoid races with
+ * flip completion handling from vblank irq, as these old asics don't
+ * have reliable pageflip completion interrupts.
+ */
+ while (radeon_crtc->enabled &&
+ (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (!ASIC_IS_AVIVO(rdev) ||
+ ((int) (work->target_vblank -
+ dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+ usleep_range(1000, 2000);
+
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* set the proper interrupt */
radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in radeon_get_crtc_scanoutpos() for
- * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
- */
- while (radeon_crtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- };
-
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
-
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
@@ -499,10 +481,11 @@ static void radeon_flip_work_func(struct work_struct *__work)
up_read(&rdev->exclusive_lock);
}
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ uint32_t target)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -599,12 +582,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
base &= ~7;
}
work->base = base;
-
- r = drm_crtc_vblank_get(crtc);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
+ work->target_vblank = target - drm_crtc_vblank_count(crtc) +
+ dev->driver->get_vblank_counter(dev, work->crtc_id);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -613,7 +592,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
- goto vblank_cleanup;
+ goto pflip_cleanup;
}
radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work;
@@ -626,9 +605,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
queue_work(radeon_crtc->flip_queue, &work->flip_work);
return 0;
-vblank_cleanup:
- drm_crtc_vblank_put(crtc);
-
pflip_cleanup:
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
@@ -697,7 +673,7 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
.gamma_set = radeon_crtc_gamma_set,
.set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
- .page_flip = radeon_crtc_page_flip,
+ .page_flip_target = radeon_crtc_page_flip_target,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1699,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
- radeon_fbdev_fini(rdev);
- kfree(rdev->mode_info.bios_hardcoded_edid);
-
- /* free i2c buses */
- radeon_i2c_fini(rdev);
-
if (rdev->mode_info.mode_config_initialized) {
- radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
+ radeon_fbdev_fini(rdev);
+ radeon_afmt_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index db64e0062689..474a8a1886f7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
@@ -164,7 +164,6 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
}
if (tmp & AUX_SW_RX_TIMEOUT) {
- DRM_DEBUG_KMS("dp_aux_ch timed out\n");
ret = -ETIMEDOUT;
goto done;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c01a7c6abb49..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -39,6 +39,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
+#include <drm/drm_fb_helper.h>
#include "drm_crtc_helper.h"
#include "radeon_kfd.h"
@@ -94,9 +95,11 @@
* 2.44.0 - SET_APPEND_CNT packet3 support
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
+ * 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 46
+#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -154,11 +157,6 @@ void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-#if defined(CONFIG_DEBUG_FS)
-int radeon_debugfs_init(struct drm_minor *minor);
-void radeon_debugfs_cleanup(struct drm_minor *minor);
-#endif
-
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
@@ -309,6 +307,8 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static struct drm_driver kms_driver;
+bool radeon_device_is_virtual(void);
+
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
@@ -324,7 +324,7 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
kfree(ap);
return 0;
@@ -362,6 +362,17 @@ radeon_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown.
+ * unfortunately we can't detect certain
+ * hypervisors so just do this all the time.
+ */
+ radeon_pci_remove(pdev);
+}
+
static int radeon_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -373,6 +384,14 @@ static int radeon_pmops_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ /* GPU comes up enabled by the bios on resume */
+ if (radeon_is_px(drm_dev)) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
return radeon_resume_kms(drm_dev, true, true);
}
@@ -529,10 +548,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = radeon_disable_vblank_kms,
.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
.get_scanout_position = radeon_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = radeon_debugfs_init,
- .debugfs_cleanup = radeon_debugfs_cleanup,
-#endif
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
@@ -574,6 +589,7 @@ static struct pci_driver radeon_kms_pci_driver = {
.id_table = pciidlist,
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
+ .shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0e3143acb565..0daad446d2c7 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -47,8 +47,35 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
+static int
+radeonfb_open(struct fb_info *info, int user)
+{
+ struct radeon_fbdev *rfbdev = info->par;
+ struct radeon_device *rdev = rfbdev->rdev;
+ int ret = pm_runtime_get_sync(rdev->ddev->dev);
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_mark_last_busy(rdev->ddev->dev);
+ pm_runtime_put_autosuspend(rdev->ddev->dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+radeonfb_release(struct fb_info *info, int user)
+{
+ struct radeon_fbdev *rfbdev = info->par;
+ struct radeon_device *rdev = rfbdev->rdev;
+
+ pm_runtime_mark_last_busy(rdev->ddev->dev);
+ pm_runtime_put_autosuspend(rdev->ddev->dev);
+ return 0;
+}
+
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
+ .fb_open = radeonfb_open,
+ .fb_release = radeonfb_release,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
@@ -383,7 +410,7 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
if (rdev->mode_info.rfbdev)
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 9590bcd321c0..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -938,10 +938,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
"Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
+ if (ret)
goto out_free;
- }
} else if (rec->hw_capable &&
radeon_hw_i2c &&
ASIC_IS_DCE3(rdev)) {
@@ -950,10 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
"Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_atom_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register hw i2c %s\n", name);
+ if (ret)
goto out_free;
- }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
@@ -986,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
- if (i2c->has_aux)
- drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 835563c1f0ed..4388ddeec8d2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -641,11 +641,11 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
struct radeon_vm *vm;
- int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_suspend;
}
if (rdev->accel_working) {
@@ -653,14 +653,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
- return r;
+ goto out_suspend;
}
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
radeon_vm_fini(rdev, vm);
kfree(fpriv);
- return r;
+ goto out_suspend;
}
/* map the ib pool buffer read only into
@@ -674,15 +674,16 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (r) {
radeon_vm_fini(rdev, vm);
kfree(fpriv);
- return r;
+ goto out_suspend;
}
}
file_priv->driver_priv = fpriv;
}
+out_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- return 0;
+ return r;
}
/**
@@ -717,6 +718,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
}
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
/**
@@ -733,6 +736,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
{
struct radeon_device *rdev = dev->dev_private;
+ pm_runtime_get_sync(dev->dev);
+
mutex_lock(&rdev->gem.mutex);
if (rdev->hyperz_filp == file_priv)
rdev->hyperz_filp = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c2e0a1ccdfbc..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -237,7 +237,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (radeon_ttm_tt_has_userptr(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+ filp->private_data);
}
static void radeon_move_null(struct ttm_buffer_object *bo,
@@ -346,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -379,7 +380,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -444,8 +445,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
if (r) {
return r;
}
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+ pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 73dfe01435ea..0cd0e7bdee55 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -669,6 +669,7 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
return r;
break;
case UVD_ENGINE_CNTL:
+ case UVD_NO_OP:
break;
default:
DRM_ERROR("Invalid reg 0x%X!\n",
@@ -753,8 +754,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.ptr[3] = addr >> 32;
ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
ib.ptr[5] = 0;
- for (i = 6; i < 16; ++i)
- ib.ptr[i] = PACKET2(0);
+ for (i = 6; i < 16; i += 2) {
+ ib.ptr[i] = PACKET0(UVD_NO_OP, 0);
+ ib.ptr[i+1] = 0;
+ }
ib.length_dw = 16;
r = radeon_ib_schedule(rdev, &ib, NULL, false);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1c120a4c3c97..729ae588c970 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1738,7 +1738,7 @@ static void rv770_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9ef2064b1c9c..0271f4c559ae 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -387,6 +387,7 @@
#define UVD_UDEC_TILING_CONFIG 0xef40
#define UVD_UDEC_DB_TILING_CONFIG 0xef44
#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
+#define UVD_NO_OP 0xeffc
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2523ca96c6c7..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1547,7 +1547,7 @@ int si_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
- u32 running, blackout = 0;
+ u32 running;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
@@ -1598,11 +1598,6 @@ int si_mc_load_microcode(struct radeon_device *rdev)
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
if (running == 0) {
- if (running) {
- blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
- }
-
/* reset the engine and set to writable */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -1641,9 +1636,6 @@ int si_mc_load_microcode(struct radeon_device *rdev)
break;
udelay(1);
}
-
- if (running)
- WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
}
return 0;
@@ -4439,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
+ case TA_CS_BC_BASE_ADDR:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
@@ -6928,7 +6921,7 @@ static void si_uvd_resume(struct radeon_device *rdev)
return;
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
if (r) {
dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
return;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1f78ec2548ec..c49934527a87 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (rdev->family == CHIP_PITCAIRN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (rdev->family == CHIP_VERDE) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_HAINAN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (rdev->pdev->device == 0x6811 &&
- rdev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (rdev->pdev->device == 0x6665 &&
- rdev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
@@ -4112,7 +4145,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index d1a7b58dd291..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
+#define TA_CS_BC_BASE_ADDR 0x950C
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
@@ -1559,6 +1560,7 @@
#define UVD_UDEC_ADDR_CONFIG 0xEF4C
#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
+#define UVD_NO_OP 0xEFFC
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
#define UVD_STATUS 0xf6bc
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 3c779838d9ab..966e3a556011 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
struct SISLANDS_SMC_VOLTAGEMASKTABLE
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 899ef7a2a7b4..73c971e39b1c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -316,8 +316,8 @@ static int rcar_du_probe(struct platform_device *pdev)
rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
- if (!ddev)
- return -ENOMEM;
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
rcdu->ddev = ddev;
ddev->dev_private = rcdu;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f03eb55318c1..392c7e6de042 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev,
struct rcar_du_device *rcdu = dev->dev_private;
int ret;
- ret = drm_atomic_helper_check(dev, state);
- if (ret < 0)
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
return ret;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
@@ -257,7 +265,8 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, true);
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 05d07138a2b2..9746365694ba 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
- rockchip_drm_gem.o rockchip_drm_vop.o
+ rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 89aadbf465f8..8548e8271639 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -32,6 +32,7 @@
#include <drm/bridge/analogix_dp.h>
#include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
@@ -41,6 +42,8 @@
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100
+
#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
/**
@@ -68,11 +71,65 @@ struct rockchip_dp_device {
struct regmap *grf;
struct reset_control *rst;
+ struct work_struct psr_work;
+ spinlock_t psr_lock;
+ unsigned int psr_state;
+
const struct rockchip_dp_chip_data *data;
struct analogix_dp_plat_data plat_data;
};
+static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
+{
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ unsigned long flags;
+
+ if (!analogix_dp_psr_supported(dp->dev))
+ return;
+
+ dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+
+ spin_lock_irqsave(&dp->psr_lock, flags);
+ if (enabled)
+ dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
+ else
+ dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+
+ schedule_work(&dp->psr_work);
+ spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
+static void analogix_dp_psr_work(struct work_struct *work)
+{
+ struct rockchip_dp_device *dp =
+ container_of(work, typeof(*dp), psr_work);
+ struct drm_crtc *crtc = dp->encoder.crtc;
+ int psr_state = dp->psr_state;
+ int vact_end;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc)
+ return;
+
+ vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay;
+
+ ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end,
+ PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+ if (ret) {
+ dev_err(dp->dev, "line flag interrupt did not arrive\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dp->psr_lock, flags);
+ if (psr_state == EDP_VSC_PSR_STATE_ACTIVE)
+ analogix_dp_enable_psr(dp->dev);
+ else
+ analogix_dp_disable_psr(dp->dev);
+ spin_unlock_irqrestore(&dp->psr_lock, flags);
+}
+
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
@@ -87,6 +144,8 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
struct rockchip_dp_device *dp = to_dp(plat_data);
int ret;
+ cancel_work_sync(&dp->psr_work);
+
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
dev_err(dp->dev, "failed to enable pclk %d\n", ret);
@@ -342,12 +401,22 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
+ spin_lock_init(&dp->psr_lock);
+ dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
+ INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
+
+ rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
+
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
}
static void rockchip_dp_unbind(struct device *dev, struct device *master,
void *data)
{
+ struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+
+ rockchip_drm_psr_unregister(&dp->encoder);
+
return analogix_dp_unbind(dev, master, data);
}
@@ -381,10 +450,8 @@ static int rockchip_dp_probe(struct platform_device *pdev)
panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- if (!panel) {
- DRM_ERROR("failed to find panel\n");
+ if (!panel)
return -EPROBE_DEFER;
- }
}
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -445,7 +512,6 @@ static struct platform_driver rockchip_dp_driver = {
.remove = rockchip_dp_remove,
.driver = {
.name = "rockchip-dp",
- .owner = THIS_MODULE,
.pm = &rockchip_dp_pm_ops,
.of_match_table = of_match_ptr(rockchip_dp_dt_ids),
},
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a822d49a255a..8c8cbe837e61 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -143,8 +143,8 @@ static int rockchip_drm_bind(struct device *dev)
int ret;
drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
- if (!drm_dev)
- return -ENOMEM;
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
dev_set_drvdata(dev, drm_dev);
@@ -156,6 +156,9 @@ static int rockchip_drm_bind(struct device *dev)
drm_dev->dev_private = private;
+ INIT_LIST_HEAD(&private->psr_list);
+ spin_lock_init(&private->psr_list_lock);
+
drm_mode_config_init(drm_dev);
rockchip_drm_mode_config_init(drm_dev);
@@ -306,7 +309,7 @@ static struct drm_driver rockchip_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-void rockchip_drm_fb_suspend(struct drm_device *drm)
+static void rockchip_drm_fb_suspend(struct drm_device *drm)
{
struct rockchip_drm_private *priv = drm->dev_private;
@@ -315,7 +318,7 @@ void rockchip_drm_fb_suspend(struct drm_device *drm)
console_unlock();
}
-void rockchip_drm_fb_resume(struct drm_device *drm)
+static void rockchip_drm_fb_resume(struct drm_device *drm)
{
struct rockchip_drm_private *priv = drm->dev_private;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ea3932940061..fb6226cf84b7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -39,7 +39,6 @@ struct drm_connector;
struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
- void (*wait_for_update)(struct drm_crtc *crtc);
};
struct rockchip_crtc_state {
@@ -61,6 +60,9 @@ struct rockchip_drm_private {
struct drm_gem_object *fbdev_bo;
const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
struct drm_atomic_state *state;
+
+ struct list_head psr_list;
+ spinlock_t psr_list_lock;
};
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
@@ -70,4 +72,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
struct device *dev);
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev);
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+ unsigned int mstimeout);
+
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 55c52734c52d..0f6eda023bd0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -22,6 +22,7 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
+#include "rockchip_drm_psr.h"
#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
@@ -63,9 +64,20 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
rockchip_fb->obj[0], handle);
}
+static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ rockchip_drm_psr_flush_all(fb->dev);
+ return 0;
+}
+
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = rockchip_drm_fb_destroy,
.create_handle = rockchip_drm_fb_create_handle,
+ .dirty = rockchip_drm_fb_dirty,
};
static struct rockchip_drm_fb *
@@ -162,68 +174,6 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
-static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
-{
- struct rockchip_drm_private *priv = crtc->dev->dev_private;
- int pipe = drm_crtc_index(crtc);
- const struct rockchip_crtc_funcs *crtc_funcs = priv->crtc_funcs[pipe];
-
- if (crtc_funcs && crtc_funcs->wait_for_update)
- crtc_funcs->wait_for_update(crtc);
-}
-
-/*
- * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
- * have hardware counters for neither vblanks nor scanlines, which results in
- * a race where:
- * | <-- HW vsync irq and reg take effect
- * plane_commit --> |
- * get_vblank and wait --> |
- * | <-- handle_vblank, vblank->count + 1
- * cleanup_fb --> |
- * iommu crash --> |
- * | <-- HW vsync irq and reg take effect
- *
- * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
- * of waiting for vblank_count to change.
- */
-static void
-rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
-{
- struct drm_crtc_state *old_crtc_state;
- struct drm_crtc *crtc;
- int i, ret;
-
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- /* No one cares about the old state, so abuse it for tracking
- * and store whether we hold a vblank reference (and should do a
- * vblank wait) in the ->enable boolean.
- */
- old_crtc_state->enable = false;
-
- if (!crtc->state->active)
- continue;
-
- if (!drm_atomic_helper_framebuffer_changed(dev,
- old_state, crtc))
- continue;
-
- ret = drm_crtc_vblank_get(crtc);
- if (ret != 0)
- continue;
-
- old_crtc_state->enable = true;
- }
-
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- if (!old_crtc_state->enable)
- continue;
-
- rockchip_crtc_wait_for_update(crtc);
- drm_crtc_vblank_put(crtc);
- }
-}
-
static void
rockchip_atomic_commit_tail(struct drm_atomic_state *state)
{
@@ -233,11 +183,12 @@ rockchip_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, true);
+ drm_atomic_helper_commit_planes(dev, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_hw_done(state);
- rockchip_atomic_wait_for_complete(dev, state);
+ drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 207e01de6e32..a16c69f96ed5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -20,6 +20,7 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
#define PREFERRED_BPP 32
#define to_drm_private(x) \
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
new file mode 100644
index 000000000000..a553e182ff53
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_psr.h"
+
+#define PSR_FLUSH_TIMEOUT msecs_to_jiffies(100)
+
+enum psr_state {
+ PSR_FLUSH,
+ PSR_ENABLE,
+ PSR_DISABLE,
+};
+
+struct psr_drv {
+ struct list_head list;
+ struct drm_encoder *encoder;
+
+ spinlock_t lock;
+ bool active;
+ enum psr_state state;
+
+ struct timer_list flush_timer;
+
+ void (*set)(struct drm_encoder *encoder, bool enable);
+};
+
+static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc)
+{
+ struct rockchip_drm_private *drm_drv = crtc->dev->dev_private;
+ struct psr_drv *psr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ list_for_each_entry(psr, &drm_drv->psr_list, list) {
+ if (psr->encoder->crtc == crtc)
+ goto out;
+ }
+ psr = ERR_PTR(-ENODEV);
+
+out:
+ spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+ return psr;
+}
+
+static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state)
+{
+ /*
+ * Allowed finite state machine:
+ *
+ * PSR_ENABLE < = = = = = > PSR_FLUSH
+ * | ^ |
+ * | | |
+ * v | |
+ * PSR_DISABLE < - - - - - - - - -
+ */
+ if (state == psr->state || !psr->active)
+ return;
+
+ /* Already disabled in flush, change the state, but not the hardware */
+ if (state == PSR_DISABLE && psr->state == PSR_FLUSH) {
+ psr->state = state;
+ return;
+ }
+
+ psr->state = state;
+
+ /* Actually commit the state change to hardware */
+ switch (psr->state) {
+ case PSR_ENABLE:
+ psr->set(psr->encoder, true);
+ break;
+
+ case PSR_DISABLE:
+ case PSR_FLUSH:
+ psr->set(psr->encoder, false);
+ break;
+ }
+}
+
+static void psr_set_state(struct psr_drv *psr, enum psr_state state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&psr->lock, flags);
+ psr_set_state_locked(psr, state);
+ spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+static void psr_flush_handler(unsigned long data)
+{
+ struct psr_drv *psr = (struct psr_drv *)data;
+ unsigned long flags;
+
+ /* If the state has changed since we initiated the flush, do nothing */
+ spin_lock_irqsave(&psr->lock, flags);
+ if (psr->state == PSR_FLUSH)
+ psr_set_state_locked(psr, PSR_ENABLE);
+ spin_unlock_irqrestore(&psr->lock, flags);
+}
+
+/**
+ * rockchip_drm_psr_activate - activate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_activate(struct drm_crtc *crtc)
+{
+ struct psr_drv *psr = find_psr_by_crtc(crtc);
+ unsigned long flags;
+
+ if (IS_ERR(psr))
+ return PTR_ERR(psr);
+
+ spin_lock_irqsave(&psr->lock, flags);
+ psr->active = true;
+ spin_unlock_irqrestore(&psr->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_activate);
+
+/**
+ * rockchip_drm_psr_deactivate - deactivate PSR on the given pipe
+ * @crtc: CRTC to obtain the PSR encoder
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc)
+{
+ struct psr_drv *psr = find_psr_by_crtc(crtc);
+ unsigned long flags;
+
+ if (IS_ERR(psr))
+ return PTR_ERR(psr);
+
+ spin_lock_irqsave(&psr->lock, flags);
+ psr->active = false;
+ spin_unlock_irqrestore(&psr->lock, flags);
+ del_timer_sync(&psr->flush_timer);
+
+ return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_deactivate);
+
+static void rockchip_drm_do_flush(struct psr_drv *psr)
+{
+ mod_timer(&psr->flush_timer,
+ round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT));
+ psr_set_state(psr, PSR_FLUSH);
+}
+
+/**
+ * rockchip_drm_psr_flush - flush a single pipe
+ * @crtc: CRTC of the pipe to flush
+ *
+ * Returns:
+ * 0 on success, -errno on fail
+ */
+int rockchip_drm_psr_flush(struct drm_crtc *crtc)
+{
+ struct psr_drv *psr = find_psr_by_crtc(crtc);
+ if (IS_ERR(psr))
+ return PTR_ERR(psr);
+
+ rockchip_drm_do_flush(psr);
+ return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush);
+
+/**
+ * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
+ * @dev: drm device
+ *
+ * Disable the PSR function for all registered encoders, and then enable the
+ * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
+ * changed during flush time, then keep the state no change after flush
+ * timeout.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_flush_all(struct drm_device *dev)
+{
+ struct rockchip_drm_private *drm_drv = dev->dev_private;
+ struct psr_drv *psr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ list_for_each_entry(psr, &drm_drv->psr_list, list)
+ rockchip_drm_do_flush(psr);
+ spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
+
+/**
+ * rockchip_drm_psr_register - register encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+ void (*psr_set)(struct drm_encoder *, bool enable))
+{
+ struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+ struct psr_drv *psr;
+ unsigned long flags;
+
+ if (!encoder || !psr_set)
+ return -EINVAL;
+
+ psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
+ if (!psr)
+ return -ENOMEM;
+
+ setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ spin_lock_init(&psr->lock);
+
+ psr->active = true;
+ psr->state = PSR_DISABLE;
+ psr->encoder = encoder;
+ psr->set = psr_set;
+
+ spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ list_add_tail(&psr->list, &drm_drv->psr_list);
+ spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_psr_register);
+
+/**
+ * rockchip_drm_psr_unregister - unregister encoder to psr driver
+ * @encoder: encoder that obtain the PSR function
+ * @psr_set: call back to set PSR state
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
+{
+ struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+ struct psr_drv *psr, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_drv->psr_list_lock, flags);
+ list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
+ if (psr->encoder == encoder) {
+ del_timer(&psr->flush_timer);
+ list_del(&psr->list);
+ kfree(psr);
+ }
+ }
+ spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
new file mode 100644
index 000000000000..b420cf1bf902
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Yakir Yang <ykk@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ROCKCHIP_DRM_PSR___
+#define __ROCKCHIP_DRM_PSR___
+
+void rockchip_drm_psr_flush_all(struct drm_device *dev);
+int rockchip_drm_psr_flush(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_activate(struct drm_crtc *crtc);
+int rockchip_drm_psr_deactivate(struct drm_crtc *crtc);
+
+int rockchip_drm_psr_register(struct drm_encoder *encoder,
+ void (*psr_set)(struct drm_encoder *, bool enable));
+void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
+
+#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 91305eb7d312..c7eba305c488 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -17,12 +17,14 @@
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
#include <drm/drm_plane_helper.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -34,17 +36,21 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
+#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
-#define __REG_SET_RELAXED(x, off, mask, shift, v) \
- vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
-#define __REG_SET_NORMAL(x, off, mask, shift, v) \
- vop_mask_write(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
+ vop_mask_write(x, off, mask, shift, v, write_mask, true)
+
+#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
+ vop_mask_write(x, off, mask, shift, v, write_mask, false)
#define REG_SET(x, base, reg, v, mode) \
- __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+ __REG_SET_##mode(x, base + reg.offset, \
+ reg.mask, reg.shift, v, reg.write_mask)
#define REG_SET_MASK(x, base, reg, mask, v, mode) \
- __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
+ __REG_SET_##mode(x, base + reg.offset, \
+ mask, reg.shift, v, reg.write_mask)
#define VOP_WIN_SET(x, win, name, v) \
REG_SET(x, win->base, win->phy->name, v, RELAXED)
@@ -82,25 +88,15 @@
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
-#define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
-struct vop_plane_state {
- struct drm_plane_state base;
- int format;
- struct drm_rect src;
- struct drm_rect dest;
- dma_addr_t yrgb_mst;
- bool enable;
+enum vop_pending {
+ VOP_PENDING_FB_UNREF,
};
struct vop_win {
struct drm_plane base;
const struct vop_win_data *data;
struct vop *vop;
-
- /* protected by dev->event_lock */
- bool enable;
- dma_addr_t yrgb_mst;
};
struct vop {
@@ -113,11 +109,15 @@ struct vop {
struct mutex vsync_mutex;
bool vsync_work_pending;
struct completion dsp_hold_completion;
- struct completion wait_update_complete;
/* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
+ struct drm_flip_work fb_unref_work;
+ unsigned long pending;
+
+ struct completion line_flag_completion;
+
const struct vop_data *data;
uint32_t *regsbak;
@@ -164,27 +164,25 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
}
static inline void vop_mask_write(struct vop *vop, uint32_t offset,
- uint32_t mask, uint32_t v)
+ uint32_t mask, uint32_t shift, uint32_t v,
+ bool write_mask, bool relaxed)
{
- if (mask) {
- uint32_t cached_val = vop->regsbak[offset >> 2];
-
- cached_val = (cached_val & ~mask) | v;
- writel(cached_val, vop->regs + offset);
- vop->regsbak[offset >> 2] = cached_val;
- }
-}
+ if (!mask)
+ return;
-static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
- uint32_t mask, uint32_t v)
-{
- if (mask) {
+ if (write_mask) {
+ v = ((v << shift) & 0xffff) | (mask << (shift + 16));
+ } else {
uint32_t cached_val = vop->regsbak[offset >> 2];
- cached_val = (cached_val & ~mask) | v;
- writel_relaxed(cached_val, vop->regs + offset);
- vop->regsbak[offset >> 2] = cached_val;
+ v = (cached_val & ~(mask << shift)) | ((v & mask) << shift);
+ vop->regsbak[offset >> 2] = v;
}
+
+ if (relaxed)
+ writel_relaxed(v, vop->regs + offset);
+ else
+ writel(v, vop->regs + offset);
}
static inline uint32_t vop_get_intr_type(struct vop *vop,
@@ -240,7 +238,7 @@ static enum vop_data_format vop_convert_format(uint32_t format)
case DRM_FORMAT_NV24:
return VOP_FMT_YUV444SP;
default:
- DRM_ERROR("unsupport format[%08x]\n", format);
+ DRM_ERROR("unsupported format[%08x]\n", format);
return -EINVAL;
}
}
@@ -317,7 +315,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
int vskiplines = 0;
if (dst_w > 3840) {
- DRM_ERROR("Maximum destination width (3840) exceeded\n");
+ DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
return;
}
@@ -355,11 +353,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode);
if (lb_mode == LB_RGB_3840X2) {
if (yrgb_ver_scl_mode != SCALE_NONE) {
- DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+ DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n");
return;
}
if (cbcr_ver_scl_mode != SCALE_NONE) {
- DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+ DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n");
return;
}
vsu_mode = SCALE_UP_BIL;
@@ -411,6 +409,7 @@ static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
spin_lock_irqsave(&vop->irq_lock, flags);
+ VOP_INTR_SET_TYPE(vop, clear, DSP_HOLD_VALID_INTR, 1);
VOP_INTR_SET_TYPE(vop, enable, DSP_HOLD_VALID_INTR, 1);
spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -430,7 +429,73 @@ static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
-static void vop_enable(struct drm_crtc *crtc)
+/*
+ * (1) each frame starts at the start of the Vsync pulse which is signaled by
+ * the "FRAME_SYNC" interrupt.
+ * (2) the active data region of each frame ends at dsp_vact_end
+ * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num,
+ * to get "LINE_FLAG" interrupt at the end of the active on screen data.
+ *
+ * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end
+ * Interrupts
+ * LINE_FLAG -------------------------------+
+ * FRAME_SYNC ----+ |
+ * | |
+ * v v
+ * | Vsync | Vbp | Vactive | Vfp |
+ * ^ ^ ^ ^
+ * | | | |
+ * | | | |
+ * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END
+ * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END
+ * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END
+ * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END
+ */
+static bool vop_line_flag_irq_is_enabled(struct vop *vop)
+{
+ uint32_t line_flag_irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR);
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ return !!line_flag_irq;
+}
+
+static void vop_line_flag_irq_enable(struct vop *vop, int line_num)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ VOP_CTRL_SET(vop, line_flag_num[0], line_num);
+ VOP_INTR_SET_TYPE(vop, clear, LINE_FLAG_INTR, 1);
+ VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1);
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static void vop_line_flag_irq_disable(struct vop *vop)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!vop->is_enabled))
+ return;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0);
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
int ret;
@@ -438,26 +503,20 @@ static void vop_enable(struct drm_crtc *crtc)
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
- return;
+ goto err_put_pm_runtime;
}
ret = clk_enable(vop->hclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
- return;
- }
+ if (WARN_ON(ret < 0))
+ goto err_put_pm_runtime;
ret = clk_enable(vop->dclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ if (WARN_ON(ret < 0))
goto err_disable_hclk;
- }
ret = clk_enable(vop->aclk);
- if (ret < 0) {
- dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+ if (WARN_ON(ret < 0))
goto err_disable_dclk;
- }
/*
* Slave iommu shares power, irq and clock with vop. It was associated
@@ -487,7 +546,7 @@ static void vop_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
- return;
+ return 0;
err_disable_aclk:
clk_disable(vop->aclk);
@@ -495,6 +554,9 @@ err_disable_dclk:
clk_disable(vop->dclk);
err_disable_hclk:
clk_disable(vop->hclk);
+err_put_pm_runtime:
+ pm_runtime_put_sync(vop->dev);
+ return ret;
}
static void vop_crtc_disable(struct drm_crtc *crtc)
@@ -504,6 +566,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
WARN_ON(vop->event);
+ rockchip_drm_psr_deactivate(&vop->crtc);
+
/*
* We need to make sure that all windows are disabled before we
* disable that crtc. Otherwise we might try to scan from a destroyed
@@ -568,22 +632,6 @@ static void vop_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
-static int vop_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
-{
- if (plane->state->fb)
- drm_framebuffer_reference(plane->state->fb);
-
- return 0;
-}
-
-static void vop_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_state)
-{
- if (old_state->fb)
- drm_framebuffer_unreference(old_state->fb);
-}
-
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -591,12 +639,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->fb;
struct vop_win *vop_win = to_vop_win(plane);
- struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
const struct vop_win_data *win = vop_win->data;
- bool visible;
int ret;
- struct drm_rect *dest = &vop_plane_state->dest;
- struct drm_rect *src = &vop_plane_state->src;
struct drm_rect clip;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
DRM_PLANE_HELPER_NO_SCALING;
@@ -604,62 +648,43 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
DRM_PLANE_HELPER_NO_SCALING;
if (!crtc || !fb)
- goto out_disable;
+ return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
- src->x1 = state->src_x;
- src->y1 = state->src_y;
- src->x2 = state->src_x + state->src_w;
- src->y2 = state->src_y + state->src_h;
- dest->x1 = state->crtc_x;
- dest->y1 = state->crtc_y;
- dest->x2 = state->crtc_x + state->crtc_w;
- dest->y2 = state->crtc_y + state->crtc_h;
-
clip.x1 = 0;
clip.y1 = 0;
clip.x2 = crtc_state->adjusted_mode.hdisplay;
clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_update(plane, crtc, state->fb,
- src, dest, &clip,
- state->rotation,
- min_scale,
- max_scale,
- true, true, &visible);
+ ret = drm_plane_helper_check_state(state, &clip,
+ min_scale, max_scale,
+ true, true);
if (ret)
return ret;
- if (!visible)
- goto out_disable;
+ if (!state->visible)
+ return 0;
- vop_plane_state->format = vop_convert_format(fb->pixel_format);
- if (vop_plane_state->format < 0)
- return vop_plane_state->format;
+ ret = vop_convert_format(fb->pixel_format);
+ if (ret < 0)
+ return ret;
/*
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2))
+ if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2))
return -EINVAL;
- vop_plane_state->enable = true;
-
- return 0;
-
-out_disable:
- vop_plane_state->enable = false;
return 0;
}
static void vop_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct vop_plane_state *vop_plane_state = to_vop_plane_state(old_state);
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
struct vop *vop = to_vop(old_state->crtc);
@@ -667,18 +692,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
if (!old_state->crtc)
return;
- spin_lock_irq(&plane->dev->event_lock);
- vop_win->enable = false;
- vop_win->yrgb_mst = 0;
- spin_unlock_irq(&plane->dev->event_lock);
-
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
spin_unlock(&vop->reg_lock);
-
- vop_plane_state->enable = false;
}
static void vop_plane_atomic_update(struct drm_plane *plane,
@@ -687,21 +705,21 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
struct drm_crtc *crtc = state->crtc;
struct vop_win *vop_win = to_vop_win(plane);
- struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
const struct vop_win_data *win = vop_win->data;
struct vop *vop = to_vop(state->crtc);
struct drm_framebuffer *fb = state->fb;
unsigned int actual_w, actual_h;
unsigned int dsp_stx, dsp_sty;
uint32_t act_info, dsp_info, dsp_st;
- struct drm_rect *src = &vop_plane_state->src;
- struct drm_rect *dest = &vop_plane_state->dest;
+ struct drm_rect *src = &state->src;
+ struct drm_rect *dest = &state->dst;
struct drm_gem_object *obj, *uv_obj;
struct rockchip_gem_object *rk_obj, *rk_uv_obj;
unsigned long offset;
dma_addr_t dma_addr;
uint32_t val;
bool rb_swap;
+ int format;
/*
* can't update plane when vop is disabled.
@@ -712,7 +730,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
if (WARN_ON(!vop->is_enabled))
return;
- if (!vop_plane_state->enable) {
+ if (!state->visible) {
vop_plane_atomic_disable(plane, old_state);
return;
}
@@ -733,18 +751,15 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
offset += (src->y1 >> 16) * fb->pitches[0];
- vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+ dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
- spin_lock_irq(&plane->dev->event_lock);
- vop_win->enable = true;
- vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
- spin_unlock_irq(&plane->dev->event_lock);
+ format = vop_convert_format(fb->pixel_format);
spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, format, vop_plane_state->format);
+ VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
- VOP_WIN_SET(vop, win, yrgb_mst, vop_plane_state->yrgb_mst);
+ VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
if (is_yuv_support(fb->pixel_format)) {
int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
@@ -791,68 +806,18 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
- .prepare_fb = vop_plane_prepare_fb,
- .cleanup_fb = vop_plane_cleanup_fb,
.atomic_check = vop_plane_atomic_check,
.atomic_update = vop_plane_atomic_update,
.atomic_disable = vop_plane_atomic_disable,
};
-static void vop_atomic_plane_reset(struct drm_plane *plane)
-{
- struct vop_plane_state *vop_plane_state =
- to_vop_plane_state(plane->state);
-
- if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
-
- kfree(vop_plane_state);
- vop_plane_state = kzalloc(sizeof(*vop_plane_state), GFP_KERNEL);
- if (!vop_plane_state)
- return;
-
- plane->state = &vop_plane_state->base;
- plane->state->plane = plane;
-}
-
-static struct drm_plane_state *
-vop_atomic_plane_duplicate_state(struct drm_plane *plane)
-{
- struct vop_plane_state *old_vop_plane_state;
- struct vop_plane_state *vop_plane_state;
-
- if (WARN_ON(!plane->state))
- return NULL;
-
- old_vop_plane_state = to_vop_plane_state(plane->state);
- vop_plane_state = kmemdup(old_vop_plane_state,
- sizeof(*vop_plane_state), GFP_KERNEL);
- if (!vop_plane_state)
- return NULL;
-
- __drm_atomic_helper_plane_duplicate_state(plane,
- &vop_plane_state->base);
-
- return &vop_plane_state->base;
-}
-
-static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct vop_plane_state *vop_state = to_vop_plane_state(state);
-
- __drm_atomic_helper_plane_destroy_state(state);
-
- kfree(vop_state);
-}
-
static const struct drm_plane_funcs vop_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = vop_plane_destroy,
- .reset = vop_atomic_plane_reset,
- .atomic_duplicate_state = vop_atomic_plane_duplicate_state,
- .atomic_destroy_state = vop_atomic_plane_destroy_state,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -865,6 +830,7 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
spin_lock_irqsave(&vop->irq_lock, flags);
+ VOP_INTR_SET_TYPE(vop, clear, FS_INTR, 1);
VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 1);
spin_unlock_irqrestore(&vop->irq_lock, flags);
@@ -887,18 +853,9 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&vop->irq_lock, flags);
}
-static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
-{
- struct vop *vop = to_vop(crtc);
-
- reinit_completion(&vop->wait_update_complete);
- WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
-}
-
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
- .wait_for_update = vop_crtc_wait_for_update,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -928,11 +885,17 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
- uint32_t val;
+ uint32_t pin_pol, val;
+ int ret;
WARN_ON(vop->event);
- vop_enable(crtc);
+ ret = vop_enable(crtc);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
+ return;
+ }
+
/*
* If dclk rate is zero, mean that scanout is stop,
* we don't need wait any more.
@@ -969,25 +932,31 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
vop_dsp_hold_valid_irq_disable(vop);
}
- val = 0x8;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
- val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
- VOP_CTRL_SET(vop, pin_pol, val);
+ pin_pol = 0x8;
+ pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
+ pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
+ VOP_CTRL_SET(vop, pin_pol, pin_pol);
+
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
VOP_CTRL_SET(vop, rgb_en, 1);
+ VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
VOP_CTRL_SET(vop, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
VOP_CTRL_SET(vop, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
VOP_CTRL_SET(vop, mipi_en, 1);
break;
default:
- DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+ DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
+ s->output_type);
}
VOP_CTRL_SET(vop, out_mode, s->output_mode);
@@ -1006,12 +975,44 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
VOP_CTRL_SET(vop, standby, 0);
+
+ rockchip_drm_psr_activate(&vop->crtc);
+}
+
+static bool vop_fs_irq_is_pending(struct vop *vop)
+{
+ return VOP_INTR_GET_TYPE(vop, status, FS_INTR);
+}
+
+static void vop_wait_for_irq_handler(struct vop *vop)
+{
+ bool pending;
+ int ret;
+
+ /*
+ * Spin until frame start interrupt status bit goes low, which means
+ * that interrupt handler was invoked and cleared it. The timeout of
+ * 10 msecs is really too long, but it is just a safety measure if
+ * something goes really wrong. The wait will only happen in the very
+ * unlikely case of a vblank happening exactly at the same time and
+ * shouldn't exceed microseconds range.
+ */
+ ret = readx_poll_timeout_atomic(vop_fs_irq_is_pending, vop, pending,
+ !pending, 0, 10 * 1000);
+ if (ret)
+ DRM_DEV_ERROR(vop->dev, "VOP vblank IRQ stuck for 10 ms\n");
+
+ synchronize_irq(vop->irq);
}
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_atomic_state *old_state = old_crtc_state->state;
+ struct drm_plane_state *old_plane_state;
struct vop *vop = to_vop(crtc);
+ struct drm_plane *plane;
+ int i;
if (WARN_ON(!vop->is_enabled))
return;
@@ -1021,12 +1022,13 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
-}
-static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
- struct vop *vop = to_vop(crtc);
+ /*
+ * There is a (rather unlikely) possiblity that a vblank interrupt
+ * fired before we set the cfg_done bit. To avoid spuriously
+ * signalling flip completion we need to wait for it to finish.
+ */
+ vop_wait_for_irq_handler(vop);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
@@ -1037,6 +1039,25 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
+
+ for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+ if (!old_plane_state->fb)
+ continue;
+
+ if (old_plane_state->fb == plane->state->fb)
+ continue;
+
+ drm_framebuffer_reference(old_plane_state->fb);
+ drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
+ set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ }
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ rockchip_drm_psr_flush(crtc);
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1093,16 +1114,13 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.atomic_destroy_state = vop_crtc_destroy_state,
};
-static bool vop_win_pending_is_complete(struct vop_win *vop_win)
+static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
{
- dma_addr_t yrgb_mst;
-
- if (!vop_win->enable)
- return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
-
- yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+ struct vop *vop = container_of(work, struct vop, fb_unref_work);
+ struct drm_framebuffer *fb = val;
- return yrgb_mst == vop_win->yrgb_mst;
+ drm_crtc_vblank_put(&vop->crtc);
+ drm_framebuffer_unreference(fb);
}
static void vop_handle_vblank(struct vop *vop)
@@ -1110,25 +1128,17 @@ static void vop_handle_vblank(struct vop *vop)
struct drm_device *drm = vop->drm_dev;
struct drm_crtc *crtc = &vop->crtc;
unsigned long flags;
- int i;
-
- for (i = 0; i < vop->data->win_size; i++) {
- if (!vop_win_pending_is_complete(&vop->win[i]))
- return;
- }
spin_lock_irqsave(&drm->event_lock, flags);
if (vop->event) {
-
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
-
}
spin_unlock_irqrestore(&drm->event_lock, flags);
- if (!completion_done(&vop->wait_update_complete))
- complete(&vop->wait_update_complete);
+ if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
+ drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
}
static irqreturn_t vop_isr(int irq, void *data)
@@ -1162,6 +1172,12 @@ static irqreturn_t vop_isr(int irq, void *data)
ret = IRQ_HANDLED;
}
+ if (active_irqs & LINE_FLAG_INTR) {
+ complete(&vop->line_flag_completion);
+ active_irqs &= ~LINE_FLAG_INTR;
+ ret = IRQ_HANDLED;
+ }
+
if (active_irqs & FS_INTR) {
drm_crtc_handle_vblank(crtc);
vop_handle_vblank(vop);
@@ -1171,7 +1187,8 @@ static irqreturn_t vop_isr(int irq, void *data)
/* Unhandled irqs are spurious. */
if (active_irqs)
- DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+ DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
+ active_irqs);
return ret;
}
@@ -1206,7 +1223,8 @@ static int vop_create_crtc(struct vop *vop)
win_data->phy->nformats,
win_data->type, NULL);
if (ret) {
- DRM_ERROR("failed to initialize plane\n");
+ DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
+ ret);
goto err_cleanup_planes;
}
@@ -1244,7 +1262,8 @@ static int vop_create_crtc(struct vop *vop)
win_data->phy->nformats,
win_data->type, NULL);
if (ret) {
- DRM_ERROR("failed to initialize overlay plane\n");
+ DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
+ ret);
goto err_cleanup_crtc;
}
drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
@@ -1252,14 +1271,17 @@ static int vop_create_crtc(struct vop *vop)
port = of_get_child_by_name(dev->of_node, "port");
if (!port) {
- DRM_ERROR("no port node found in %s\n",
- dev->of_node->full_name);
+ DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
+ dev->of_node->full_name);
ret = -ENOENT;
goto err_cleanup_crtc;
}
+ drm_flip_work_init(&vop->fb_unref_work, "fb_unref",
+ vop_fb_unref_worker);
+
init_completion(&vop->dsp_hold_completion);
- init_completion(&vop->wait_update_complete);
+ init_completion(&vop->line_flag_completion);
crtc->port = port;
rockchip_register_crtc_funcs(crtc, &private_crtc_funcs);
@@ -1300,6 +1322,7 @@ static void vop_destroy_crtc(struct vop *vop)
* references the CRTC.
*/
drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&vop->fb_unref_work);
}
static int vop_initial(struct vop *vop)
@@ -1416,6 +1439,49 @@ static void vop_win_init(struct vop *vop)
}
}
+/**
+ * rockchip_drm_wait_line_flag - acqiure the give line flag event
+ * @crtc: CRTC to enable line flag
+ * @line_num: interested line number
+ * @mstimeout: millisecond for timeout
+ *
+ * Driver would hold here until the interested line flag interrupt have
+ * happened or timeout to wait.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num,
+ unsigned int mstimeout)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long jiffies_left;
+
+ if (!crtc || !vop->is_enabled)
+ return -ENODEV;
+
+ if (line_num > crtc->mode.vtotal || mstimeout <= 0)
+ return -EINVAL;
+
+ if (vop_line_flag_irq_is_enabled(vop))
+ return -EBUSY;
+
+ reinit_completion(&vop->line_flag_completion);
+ vop_line_flag_irq_enable(vop, line_num);
+
+ jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion,
+ msecs_to_jiffies(mstimeout));
+ vop_line_flag_irq_disable(vop);
+
+ if (jiffies_left == 0) {
+ dev_err(vop->dev, "Timeout waiting for IRQ\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rockchip_drm_wait_line_flag);
+
static int vop_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1481,10 +1547,15 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
ret = vop_create_crtc(vop);
if (ret)
- return ret;
+ goto err_enable_irq;
pm_runtime_enable(&pdev->dev);
+
return 0;
+
+err_enable_irq:
+ enable_irq(vop->irq); /* To balance out the disable_irq above */
+ return ret;
}
static void vop_unbind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 071ff0be7a95..1dbc52615257 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -33,6 +33,7 @@ struct vop_reg {
uint32_t offset;
uint32_t shift;
uint32_t mask;
+ bool write_mask;
};
struct vop_ctrl {
@@ -48,6 +49,10 @@ struct vop_ctrl {
struct vop_reg dither_down;
struct vop_reg dither_up;
struct vop_reg pin_pol;
+ struct vop_reg rgb_pin_pol;
+ struct vop_reg hdmi_pin_pol;
+ struct vop_reg edp_pin_pol;
+ struct vop_reg mipi_pin_pol;
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
@@ -56,6 +61,8 @@ struct vop_ctrl {
struct vop_reg hpost_st_end;
struct vop_reg vpost_st_end;
+ struct vop_reg line_flag_num[2];
+
struct vop_reg cfg_done;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 919992cdc97e..35c51f3402f2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -23,7 +23,14 @@
#define VOP_REG(off, _mask, s) \
{.offset = off, \
.mask = _mask, \
- .shift = s,}
+ .shift = s, \
+ .write_mask = false,}
+
+#define VOP_REG_MASK(off, _mask, s) \
+ {.offset = off, \
+ .mask = _mask, \
+ .shift = s, \
+ .write_mask = true,}
static const uint32_t formats_win_full[] = {
DRM_FORMAT_XRGB8888,
@@ -50,6 +57,89 @@ static const uint32_t formats_win_lite[] = {
DRM_FORMAT_BGR565,
};
+static const struct vop_scl_regs rk3036_win_scl = {
+ .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3036_win0_data = {
+ .scl = &rk3036_win_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
+ .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
+ .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
+ .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+ .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy rk3036_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
+ .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
+ .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
+ .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3036_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3036_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &rk3036_win1_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3036_vop_intrs[] = {
+ DSP_HOLD_VALID_INTR,
+ FS_INTR,
+ LINE_FLAG_INTR,
+ BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3036_intr = {
+ .intrs = rk3036_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3036_vop_intrs),
+ .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
+ .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
+ .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_ctrl rk3036_ctrl_data = {
+ .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
+ .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
+ .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+ .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
+ .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
+ {RK3036_DSP_CTRL1, 0x00000000},
+};
+
+static const struct vop_data rk3036_vop = {
+ .init_table = rk3036_vop_init_reg_table,
+ .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
+ .ctrl = &rk3036_ctrl_data,
+ .intr = &rk3036_intr,
+ .win = rk3036_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3036_vop_win_data),
+};
+
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -133,6 +223,7 @@ static const struct vop_ctrl rk3288_ctrl_data = {
.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+ .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
.cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
};
@@ -190,93 +281,104 @@ static const struct vop_data rk3288_vop = {
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_scl_regs rk3036_win_scl = {
- .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
- .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
- .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
- .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
-};
-
-static const struct vop_win_phy rk3036_win0_data = {
- .scl = &rk3036_win_scl,
- .data_formats = formats_win_full,
- .nformats = ARRAY_SIZE(formats_win_full),
- .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
- .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
- .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
- .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
- .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
- .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
+static const struct vop_ctrl rk3399_ctrl_data = {
+ .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+ .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
+ .dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+ .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+ .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+ .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+ .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
+ .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
+ .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
+ .htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+ .vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+ .line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0),
+ .line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16),
+ .cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0),
};
-static const struct vop_win_phy rk3036_win1_data = {
- .data_formats = formats_win_lite,
- .nformats = ARRAY_SIZE(formats_win_lite),
- .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
- .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
- .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
- .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0),
- .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0),
- .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0),
- .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0),
- .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0),
-};
-
-static const struct vop_win_data rk3036_vop_win_data[] = {
- { .base = 0x00, .phy = &rk3036_win0_data,
- .type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x00, .phy = &rk3036_win1_data,
- .type = DRM_PLANE_TYPE_CURSOR },
-};
-
-static const int rk3036_vop_intrs[] = {
- DSP_HOLD_VALID_INTR,
+static const int rk3399_vop_intrs[] = {
FS_INTR,
+ 0, 0,
LINE_FLAG_INTR,
+ 0,
BUS_ERROR_INTR,
+ 0, 0, 0, 0, 0, 0, 0,
+ DSP_HOLD_VALID_INTR,
};
-static const struct vop_intr rk3036_intr = {
- .intrs = rk3036_vop_intrs,
- .nintrs = ARRAY_SIZE(rk3036_vop_intrs),
- .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
- .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
- .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+static const struct vop_intr rk3399_vop_intr = {
+ .intrs = rk3399_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3399_vop_intrs),
+ .status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0),
+ .enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0),
+ .clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0),
};
-static const struct vop_ctrl rk3036_ctrl_data = {
- .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
- .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
- .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
- .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
- .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
- .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
- .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
- .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
+static const struct vop_reg_data rk3399_init_reg_table[] = {
+ {RK3399_SYS_CTRL, 0x2000f800},
+ {RK3399_DSP_CTRL0, 0x00000000},
+ {RK3399_WIN0_CTRL0, 0x00000080},
+ {RK3399_WIN1_CTRL0, 0x00000080},
+ /* TODO: Win2/3 support multiple area function, but we haven't found
+ * a suitable way to use it yet, so let's just use them as other windows
+ * with only area 0 enabled.
+ */
+ {RK3399_WIN2_CTRL0, 0x00000010},
+ {RK3399_WIN3_CTRL0, 0x00000010},
};
-static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
- {RK3036_DSP_CTRL1, 0x00000000},
+static const struct vop_data rk3399_vop_big = {
+ .init_table = rk3399_init_reg_table,
+ .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+ .intr = &rk3399_vop_intr,
+ .ctrl = &rk3399_ctrl_data,
+ /*
+ * rk3399 vop big windows register layout is same as rk3288.
+ */
+ .win = rk3288_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_data rk3036_vop = {
- .init_table = rk3036_vop_init_reg_table,
- .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
- .ctrl = &rk3036_ctrl_data,
- .intr = &rk3036_intr,
- .win = rk3036_vop_win_data,
- .win_size = ARRAY_SIZE(rk3036_vop_win_data),
+static const struct vop_win_data rk3399_vop_lit_win_data[] = {
+ { .base = 0x00, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_CURSOR},
+};
+
+static const struct vop_data rk3399_vop_lit = {
+ .init_table = rk3399_init_reg_table,
+ .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+ .intr = &rk3399_vop_intr,
+ .ctrl = &rk3399_ctrl_data,
+ /*
+ * rk3399 vop lit windows register layout is same as rk3288,
+ * but cut off the win1 and win3 windows.
+ */
+ .win = rk3399_vop_lit_win_data,
+ .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
};
static const struct of_device_id vop_driver_dt_match[] = {
- { .compatible = "rockchip,rk3288-vop",
- .data = &rk3288_vop },
{ .compatible = "rockchip,rk3036-vop",
.data = &rk3036_vop },
+ { .compatible = "rockchip,rk3288-vop",
+ .data = &rk3288_vop },
+ { .compatible = "rockchip,rk3399-vop-big",
+ .data = &rk3399_vop_big },
+ { .compatible = "rockchip,rk3399-vop-lit",
+ .data = &rk3399_vop_lit },
{},
};
MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
@@ -305,7 +407,6 @@ static struct platform_driver vop_platform_driver = {
.remove = vop_remove,
.driver = {
.name = "rockchip-vop",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(vop_driver_dt_match),
},
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index d4b46cba2f26..cd197260ece5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -166,4 +166,197 @@
#define RK3036_HWC_LUT_ADDR 0x800
/* rk3036 register definition end */
+/* rk3399 register definition */
+#define RK3399_REG_CFG_DONE 0x00000
+#define RK3399_VERSION_INFO 0x00004
+#define RK3399_SYS_CTRL 0x00008
+#define RK3399_SYS_CTRL1 0x0000c
+#define RK3399_DSP_CTRL0 0x00010
+#define RK3399_DSP_CTRL1 0x00014
+#define RK3399_DSP_BG 0x00018
+#define RK3399_MCU_CTRL 0x0001c
+#define RK3399_WB_CTRL0 0x00020
+#define RK3399_WB_CTRL1 0x00024
+#define RK3399_WB_YRGB_MST 0x00028
+#define RK3399_WB_CBR_MST 0x0002c
+#define RK3399_WIN0_CTRL0 0x00030
+#define RK3399_WIN0_CTRL1 0x00034
+#define RK3399_WIN0_COLOR_KEY 0x00038
+#define RK3399_WIN0_VIR 0x0003c
+#define RK3399_WIN0_YRGB_MST 0x00040
+#define RK3399_WIN0_CBR_MST 0x00044
+#define RK3399_WIN0_ACT_INFO 0x00048
+#define RK3399_WIN0_DSP_INFO 0x0004c
+#define RK3399_WIN0_DSP_ST 0x00050
+#define RK3399_WIN0_SCL_FACTOR_YRGB 0x00054
+#define RK3399_WIN0_SCL_FACTOR_CBR 0x00058
+#define RK3399_WIN0_SCL_OFFSET 0x0005c
+#define RK3399_WIN0_SRC_ALPHA_CTRL 0x00060
+#define RK3399_WIN0_DST_ALPHA_CTRL 0x00064
+#define RK3399_WIN0_FADING_CTRL 0x00068
+#define RK3399_WIN0_CTRL2 0x0006c
+#define RK3399_WIN1_CTRL0 0x00070
+#define RK3399_WIN1_CTRL1 0x00074
+#define RK3399_WIN1_COLOR_KEY 0x00078
+#define RK3399_WIN1_VIR 0x0007c
+#define RK3399_WIN1_YRGB_MST 0x00080
+#define RK3399_WIN1_CBR_MST 0x00084
+#define RK3399_WIN1_ACT_INFO 0x00088
+#define RK3399_WIN1_DSP_INFO 0x0008c
+#define RK3399_WIN1_DSP_ST 0x00090
+#define RK3399_WIN1_SCL_FACTOR_YRGB 0x00094
+#define RK3399_WIN1_SCL_FACTOR_CBR 0x00098
+#define RK3399_WIN1_SCL_OFFSET 0x0009c
+#define RK3399_WIN1_SRC_ALPHA_CTRL 0x000a0
+#define RK3399_WIN1_DST_ALPHA_CTRL 0x000a4
+#define RK3399_WIN1_FADING_CTRL 0x000a8
+#define RK3399_WIN1_CTRL2 0x000ac
+#define RK3399_WIN2_CTRL0 0x000b0
+#define RK3399_WIN2_CTRL1 0x000b4
+#define RK3399_WIN2_VIR0_1 0x000b8
+#define RK3399_WIN2_VIR2_3 0x000bc
+#define RK3399_WIN2_MST0 0x000c0
+#define RK3399_WIN2_DSP_INFO0 0x000c4
+#define RK3399_WIN2_DSP_ST0 0x000c8
+#define RK3399_WIN2_COLOR_KEY 0x000cc
+#define RK3399_WIN2_MST1 0x000d0
+#define RK3399_WIN2_DSP_INFO1 0x000d4
+#define RK3399_WIN2_DSP_ST1 0x000d8
+#define RK3399_WIN2_SRC_ALPHA_CTRL 0x000dc
+#define RK3399_WIN2_MST2 0x000e0
+#define RK3399_WIN2_DSP_INFO2 0x000e4
+#define RK3399_WIN2_DSP_ST2 0x000e8
+#define RK3399_WIN2_DST_ALPHA_CTRL 0x000ec
+#define RK3399_WIN2_MST3 0x000f0
+#define RK3399_WIN2_DSP_INFO3 0x000f4
+#define RK3399_WIN2_DSP_ST3 0x000f8
+#define RK3399_WIN2_FADING_CTRL 0x000fc
+#define RK3399_WIN3_CTRL0 0x00100
+#define RK3399_WIN3_CTRL1 0x00104
+#define RK3399_WIN3_VIR0_1 0x00108
+#define RK3399_WIN3_VIR2_3 0x0010c
+#define RK3399_WIN3_MST0 0x00110
+#define RK3399_WIN3_DSP_INFO0 0x00114
+#define RK3399_WIN3_DSP_ST0 0x00118
+#define RK3399_WIN3_COLOR_KEY 0x0011c
+#define RK3399_WIN3_MST1 0x00120
+#define RK3399_WIN3_DSP_INFO1 0x00124
+#define RK3399_WIN3_DSP_ST1 0x00128
+#define RK3399_WIN3_SRC_ALPHA_CTRL 0x0012c
+#define RK3399_WIN3_MST2 0x00130
+#define RK3399_WIN3_DSP_INFO2 0x00134
+#define RK3399_WIN3_DSP_ST2 0x00138
+#define RK3399_WIN3_DST_ALPHA_CTRL 0x0013c
+#define RK3399_WIN3_MST3 0x00140
+#define RK3399_WIN3_DSP_INFO3 0x00144
+#define RK3399_WIN3_DSP_ST3 0x00148
+#define RK3399_WIN3_FADING_CTRL 0x0014c
+#define RK3399_HWC_CTRL0 0x00150
+#define RK3399_HWC_CTRL1 0x00154
+#define RK3399_HWC_MST 0x00158
+#define RK3399_HWC_DSP_ST 0x0015c
+#define RK3399_HWC_SRC_ALPHA_CTRL 0x00160
+#define RK3399_HWC_DST_ALPHA_CTRL 0x00164
+#define RK3399_HWC_FADING_CTRL 0x00168
+#define RK3399_HWC_RESERVED1 0x0016c
+#define RK3399_POST_DSP_HACT_INFO 0x00170
+#define RK3399_POST_DSP_VACT_INFO 0x00174
+#define RK3399_POST_SCL_FACTOR_YRGB 0x00178
+#define RK3399_POST_RESERVED 0x0017c
+#define RK3399_POST_SCL_CTRL 0x00180
+#define RK3399_POST_DSP_VACT_INFO_F1 0x00184
+#define RK3399_DSP_HTOTAL_HS_END 0x00188
+#define RK3399_DSP_HACT_ST_END 0x0018c
+#define RK3399_DSP_VTOTAL_VS_END 0x00190
+#define RK3399_DSP_VACT_ST_END 0x00194
+#define RK3399_DSP_VS_ST_END_F1 0x00198
+#define RK3399_DSP_VACT_ST_END_F1 0x0019c
+#define RK3399_PWM_CTRL 0x001a0
+#define RK3399_PWM_PERIOD_HPR 0x001a4
+#define RK3399_PWM_DUTY_LPR 0x001a8
+#define RK3399_PWM_CNT 0x001ac
+#define RK3399_BCSH_COLOR_BAR 0x001b0
+#define RK3399_BCSH_BCS 0x001b4
+#define RK3399_BCSH_H 0x001b8
+#define RK3399_BCSH_CTRL 0x001bc
+#define RK3399_CABC_CTRL0 0x001c0
+#define RK3399_CABC_CTRL1 0x001c4
+#define RK3399_CABC_CTRL2 0x001c8
+#define RK3399_CABC_CTRL3 0x001cc
+#define RK3399_CABC_GAUSS_LINE0_0 0x001d0
+#define RK3399_CABC_GAUSS_LINE0_1 0x001d4
+#define RK3399_CABC_GAUSS_LINE1_0 0x001d8
+#define RK3399_CABC_GAUSS_LINE1_1 0x001dc
+#define RK3399_CABC_GAUSS_LINE2_0 0x001e0
+#define RK3399_CABC_GAUSS_LINE2_1 0x001e4
+#define RK3399_FRC_LOWER01_0 0x001e8
+#define RK3399_FRC_LOWER01_1 0x001ec
+#define RK3399_FRC_LOWER10_0 0x001f0
+#define RK3399_FRC_LOWER10_1 0x001f4
+#define RK3399_FRC_LOWER11_0 0x001f8
+#define RK3399_FRC_LOWER11_1 0x001fc
+#define RK3399_AFBCD0_CTRL 0x00200
+#define RK3399_AFBCD0_HDR_PTR 0x00204
+#define RK3399_AFBCD0_PIC_SIZE 0x00208
+#define RK3399_AFBCD0_STATUS 0x0020c
+#define RK3399_AFBCD1_CTRL 0x00220
+#define RK3399_AFBCD1_HDR_PTR 0x00224
+#define RK3399_AFBCD1_PIC_SIZE 0x00228
+#define RK3399_AFBCD1_STATUS 0x0022c
+#define RK3399_AFBCD2_CTRL 0x00240
+#define RK3399_AFBCD2_HDR_PTR 0x00244
+#define RK3399_AFBCD2_PIC_SIZE 0x00248
+#define RK3399_AFBCD2_STATUS 0x0024c
+#define RK3399_AFBCD3_CTRL 0x00260
+#define RK3399_AFBCD3_HDR_PTR 0x00264
+#define RK3399_AFBCD3_PIC_SIZE 0x00268
+#define RK3399_AFBCD3_STATUS 0x0026c
+#define RK3399_INTR_EN0 0x00280
+#define RK3399_INTR_CLEAR0 0x00284
+#define RK3399_INTR_STATUS0 0x00288
+#define RK3399_INTR_RAW_STATUS0 0x0028c
+#define RK3399_INTR_EN1 0x00290
+#define RK3399_INTR_CLEAR1 0x00294
+#define RK3399_INTR_STATUS1 0x00298
+#define RK3399_INTR_RAW_STATUS1 0x0029c
+#define RK3399_LINE_FLAG 0x002a0
+#define RK3399_VOP_STATUS 0x002a4
+#define RK3399_BLANKING_VALUE 0x002a8
+#define RK3399_MCU_BYPASS_PORT 0x002ac
+#define RK3399_WIN0_DSP_BG 0x002b0
+#define RK3399_WIN1_DSP_BG 0x002b4
+#define RK3399_WIN2_DSP_BG 0x002b8
+#define RK3399_WIN3_DSP_BG 0x002bc
+#define RK3399_YUV2YUV_WIN 0x002c0
+#define RK3399_YUV2YUV_POST 0x002c4
+#define RK3399_AUTO_GATING_EN 0x002cc
+#define RK3399_WIN0_CSC_COE 0x003a0
+#define RK3399_WIN1_CSC_COE 0x003c0
+#define RK3399_WIN2_CSC_COE 0x003e0
+#define RK3399_WIN3_CSC_COE 0x00400
+#define RK3399_HWC_CSC_COE 0x00420
+#define RK3399_BCSH_R2Y_CSC_COE 0x00440
+#define RK3399_BCSH_Y2R_CSC_COE 0x00460
+#define RK3399_POST_YUV2YUV_Y2R_COE 0x00480
+#define RK3399_POST_YUV2YUV_3X3_COE 0x004a0
+#define RK3399_POST_YUV2YUV_R2Y_COE 0x004c0
+#define RK3399_WIN0_YUV2YUV_Y2R 0x004e0
+#define RK3399_WIN0_YUV2YUV_3X3 0x00500
+#define RK3399_WIN0_YUV2YUV_R2Y 0x00520
+#define RK3399_WIN1_YUV2YUV_Y2R 0x00540
+#define RK3399_WIN1_YUV2YUV_3X3 0x00560
+#define RK3399_WIN1_YUV2YUV_R2Y 0x00580
+#define RK3399_WIN2_YUV2YUV_Y2R 0x005a0
+#define RK3399_WIN2_YUV2YUV_3X3 0x005c0
+#define RK3399_WIN2_YUV2YUV_R2Y 0x005e0
+#define RK3399_WIN3_YUV2YUV_Y2R 0x00600
+#define RK3399_WIN3_YUV2YUV_3X3 0x00620
+#define RK3399_WIN3_YUV2YUV_R2Y 0x00640
+#define RK3399_WIN2_LUT_ADDR 0x01000
+#define RK3399_WIN3_LUT_ADDR 0x01400
+#define RK3399_HWC_LUT_ADDR 0x01800
+#define RK3399_CABC_GAMMA_LUT_ADDR 0x01c00
+#define RK3399_GAMMA_LUT_ADDR 0x02000
+/* rk3399 register definition end */
+
#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 21aed1febeb4..3b807135a5cd 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+ DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA | DRIVER_LEGACY,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index c01ad0aeaa58..3dc0d8ff95ec 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -1001,15 +1001,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
cmdbuf->cmd_addr = kcmd_addr;
}
if (cmdbuf->vb_size) {
- kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
- if (kvb_addr == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(kvb_addr, cmdbuf->vb_addr,
- cmdbuf->vb_size)) {
- ret = -EFAULT;
+ kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
+ if (IS_ERR(kvb_addr)) {
+ ret = PTR_ERR(kvb_addr);
goto done;
}
cmdbuf->vb_addr = kvb_addr;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 79bce76cb8f7..ae9839886c4d 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -102,7 +102,7 @@ static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP,
+ .driver_features = DRIVER_USE_AGP | DRIVER_LEGACY,
.load = sis_driver_load,
.unload = sis_driver_unload,
.open = sis_driver_open,
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 494ab257f77c..acd72865feac 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
config DRM_STI
- tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
- depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
+ depends on DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
@@ -9,4 +9,4 @@ config DRM_STI
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
help
- Choose this option to enable DRM on STM stiH41x chipset
+ Choose this option to enable DRM on STM stiH4xx chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index b8057620b3b3..d20f7c0b4eac 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -9,7 +9,6 @@ sti-drm-y := \
sti_crtc.o \
sti_plane.o \
sti_hdmi.o \
- sti_hdmi_tx3g0c55phy.o \
sti_hdmi_tx3g4c28phy.o \
sti_dvo.o \
sti_awg_utils.o \
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 134201ecc6fd..f62041fe8412 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -25,7 +25,7 @@
/*
* stiH407 compositor properties
*/
-struct sti_compositor_data stih407_compositor_data = {
+static const struct sti_compositor_data stih407_compositor_data = {
.nb_subdev = 8,
.subdev_desc = {
{STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
@@ -39,38 +39,18 @@ struct sti_compositor_data stih407_compositor_data = {
},
};
-/*
- * stiH416 compositor properties
- * Note:
- * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
- * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
- * not fit for stiH416 if we want to enable the MIXER_AUX.
- */
-struct sti_compositor_data stih416_compositor_data = {
- .nb_subdev = 3,
- .subdev_desc = {
- {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
- {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
- {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
- },
-};
-
-int sti_compositor_debufs_init(struct sti_compositor *compo,
- struct drm_minor *minor)
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
{
- int ret = 0, i;
+ unsigned int i;
- for (i = 0; compo->vid[i]; i++) {
- ret = vid_debugfs_init(compo->vid[i], minor);
- if (ret)
- return ret;
- }
+ for (i = 0; i < STI_MAX_VID; i++)
+ if (compo->vid[i])
+ vid_debugfs_init(compo->vid[i], minor);
- for (i = 0; compo->mixer[i]; i++) {
- ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
- if (ret)
- return ret;
- }
+ for (i = 0; i < STI_MAX_MIXER; i++)
+ if (compo->mixer[i])
+ sti_mixer_debugfs_init(compo->mixer[i], minor);
return 0;
}
@@ -183,9 +163,6 @@ static const struct component_ops sti_compositor_ops = {
static const struct of_device_id compositor_of_match[] = {
{
- .compatible = "st,stih416-compositor",
- .data = &stih416_compositor_data,
- }, {
.compatible = "st,stih407-compositor",
.data = &stih407_compositor_data,
}, {
@@ -201,6 +178,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
struct device_node *vtg_np;
struct sti_compositor *compo;
struct resource *res;
+ unsigned int i;
compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
if (!compo) {
@@ -208,7 +186,8 @@ static int sti_compositor_probe(struct platform_device *pdev)
return -ENOMEM;
}
compo->dev = dev;
- compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
+ for (i = 0; i < STI_MAX_MIXER; i++)
+ compo->vtg_vblank_nb[i].notifier_call = sti_crtc_vblank_cb;
/* populate data structure depending on compatibility */
BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -266,12 +245,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
- compo->vtg_main = of_vtg_find(vtg_np);
+ compo->vtg[STI_MIXER_MAIN] = of_vtg_find(vtg_np);
of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
- compo->vtg_aux = of_vtg_find(vtg_np);
+ compo->vtg[STI_MIXER_AUX] = of_vtg_find(vtg_np);
of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 24444ef42a98..2952a2d25a52 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -60,9 +60,8 @@ struct sti_compositor_data {
* @rst_aux: reset control of the aux path
* @mixer: array of mixers
* @vid: array of vids
- * @vtg_main: vtg for main data path
- * @vtg_aux: vtg for auxillary data path
- * @vtg_vblank_nb: callback for VTG VSYNC notification
+ * @vtg: array of vtgs
+ * @vtg_vblank_nb: array of callbacks for VTG VSYNC notification
*/
struct sti_compositor {
struct device *dev;
@@ -76,12 +75,11 @@ struct sti_compositor {
struct reset_control *rst_aux;
struct sti_mixer *mixer[STI_MAX_MIXER];
struct sti_vid *vid[STI_MAX_VID];
- struct sti_vtg *vtg_main;
- struct sti_vtg *vtg_aux;
- struct notifier_block vtg_vblank_nb;
+ struct sti_vtg *vtg[STI_MAX_MIXER];
+ struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
};
-int sti_compositor_debufs_init(struct sti_compositor *compo,
- struct drm_minor *minor);
+int sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index c7d734dc3cf4..e992bed98dcb 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -86,8 +86,7 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
goto pix_error;
}
- sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux, &crtc->mode);
+ sti_vtg_set_config(compo->vtg[mixer->id], &crtc->mode);
if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
DRM_ERROR("Can't set active video area\n");
@@ -166,6 +165,10 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
switch (plane->status) {
case STI_PLANE_UPDATED:
+ /* ignore update for other CRTC */
+ if (p->state->crtc != crtc)
+ continue;
+
/* update planes tag as updated */
DRM_DEBUG_DRIVER("update plane %s\n",
sti_plane_to_str(plane));
@@ -244,8 +247,7 @@ static int sti_crtc_set_property(struct drm_crtc *crtc,
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
- struct sti_compositor *compo =
- container_of(nb, struct sti_compositor, vtg_vblank_nb);
+ struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
unsigned long flags;
@@ -254,6 +256,7 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
priv = crtc->dev->dev_private;
pipe = drm_crtc_index(crtc);
+ compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
mixer = compo->mixer[pipe];
if ((event != VTG_TOP_FIELD_EVENT) &&
@@ -295,14 +298,13 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
- struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+ struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
- if (sti_vtg_register_client(pipe == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux,
- vtg_vblank_nb, crtc)) {
+ if (sti_vtg_register_client(vtg, vtg_vblank_nb, crtc)) {
DRM_ERROR("Cannot register VTG notifier\n");
return -EINVAL;
}
@@ -314,13 +316,13 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
{
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
- struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
+ struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
- if (sti_vtg_unregister_client(pipe == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+ if (sti_vtg_unregister_client(vtg, vtg_vblank_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* free the resources of the pending requests */
@@ -336,7 +338,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
if (drm_crtc_index(crtc) == 0)
- return sti_compositor_debufs_init(compo, crtc->dev->primary);
+ return sti_compositor_debugfs_init(compo, crtc->dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 3b53f7f2e3fc..cca75bddb9ad 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -309,15 +309,15 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- if (!drm_plane->crtc) {
+ if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id,
- sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+ oldstate->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
@@ -345,7 +345,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
return cursor_debugfs_init(cursor, drm_plane->dev->primary);
}
-struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_cursor_destroy,
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 96bd3d08b2d4..9df308565f6c 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -140,7 +140,7 @@ err:
return ret;
}
-void sti_drm_dbg_cleanup(struct drm_minor *minor)
+static void sti_drm_dbg_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(sti_drm_dbg_list,
ARRAY_SIZE(sti_drm_dbg_list), minor);
@@ -178,7 +178,7 @@ static void sti_atomic_complete(struct sti_private *private,
*/
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, false);
+ drm_atomic_helper_commit_planes(drm, state, 0);
drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -195,6 +195,26 @@ static void sti_atomic_work(struct work_struct *work)
sti_atomic_complete(private, private->commit.state);
}
+static int sti_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int sti_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool nonblock)
{
@@ -248,7 +268,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = sti_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = sti_atomic_check,
.atomic_commit = sti_atomic_commit,
};
@@ -282,7 +302,7 @@ static const struct file_operations sti_driver_fops = {
};
static struct drm_driver sti_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ .driver_features = DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -365,8 +385,8 @@ static int sti_bind(struct device *dev)
int ret;
ddev = drm_dev_alloc(&sti_driver, dev);
- if (!ddev)
- return -ENOMEM;
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
ddev->platformdev = to_platform_device(dev);
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 00881eb4536e..e8c1ed08a9f7 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -17,6 +17,7 @@
#include <drm/drm_panel.h>
#include "sti_awg_utils.h"
+#include "sti_drv.h"
#include "sti_mixer.h"
/* DVO registers */
@@ -106,7 +107,7 @@ struct sti_dvo_connector {
container_of(x, struct sti_dvo_connector, drm_connector)
#define BLANKING_LEVEL 16
-int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
+static int dvo_awg_generate_code(struct sti_dvo *dvo, u8 *ram_size, u32 *ram_code)
{
struct drm_display_mode *mode = &dvo->mode;
struct dvo_config *config = dvo->config;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index b8d942ca45e8..81df3097b545 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -460,6 +460,7 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
clk_disable_unprepare(gdp->clk_pix);
gdp->plane.status = STI_PLANE_DISABLED;
+ gdp->vtg = NULL;
}
/**
@@ -473,8 +474,8 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
* RETURNS:
* 0 on success.
*/
-int sti_gdp_field_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int sti_gdp_field_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
@@ -611,7 +612,6 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
struct drm_crtc *crtc = state->crtc;
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
- bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
@@ -628,8 +628,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
mode = &crtc_state->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
@@ -648,10 +648,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (first_prepare) {
+ if (!gdp->vtg) {
/* Register gdp callback */
- gdp->vtg = mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux;
+ gdp->vtg = compo->vtg[mixer->id];
if (sti_vtg_register_client(gdp->vtg,
&gdp->vtg_field_nb, crtc)) {
DRM_ERROR("Cannot register VTG notifier\n");
@@ -719,7 +718,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
u32 dma_updated_top;
u32 dma_updated_btm;
int format;
- unsigned int depth, bpp;
+ unsigned int bpp;
u32 ydo, xdo, yds, xds;
if (!crtc || !fb)
@@ -728,8 +727,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
@@ -758,9 +757,9 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
(unsigned long)cma_obj->paddr);
/* pixel memory location */
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+ bpp = drm_format_plane_cpp(fb->pixel_format, 0);
top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
- top_field->gam_gdp_pml += src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
/* output parameters (clamped / cropped) */
@@ -810,7 +809,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
if (!curr_list) {
/* First update or invalid node should directly write in the
* hw register */
- DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)\n",
sti_plane_to_str(plane));
writel(gdp->is_curr_top ?
@@ -846,15 +845,15 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- if (!drm_plane->crtc) {
+ if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id,
- sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+ oldstate->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
@@ -882,7 +881,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane)
return gdp_debugfs_init(gdp, drm_plane->dev->primary);
}
-struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_gdp_destroy,
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8505569f75de..e7c243f70870 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -62,14 +62,8 @@
#define SCALE_CTRL_CR_DFLT 0x00DB0249
/* Video DACs control */
-#define VIDEO_DACS_CONTROL_MASK 0x0FFF
-#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */
-#define DAC_CFG_HD_OFF_SHIFT 5
-#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT)
-#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */
#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
-
/* Upsampler values for the alternative 2X Filter */
#define SAMPLER_COEF_NB 8
#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
@@ -300,28 +294,14 @@ static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
*/
static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
{
- u32 mask;
-
if (hda->video_dacs_ctrl) {
u32 val;
- switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
- case VIDEO_DACS_CONTROL_SYSCFG2535:
- mask = DAC_CFG_HD_OFF_MASK;
- break;
- case VIDEO_DACS_CONTROL_SYSCFG5072:
- mask = DAC_CFG_HD_HZUVW_OFF_MASK;
- break;
- default:
- DRM_INFO("Video DACS control register not supported!");
- return;
- }
-
val = readl(hda->video_dacs_ctrl);
if (enable)
- val &= ~mask;
+ val &= ~DAC_CFG_HD_HZUVW_OFF_MASK;
else
- val |= mask;
+ val |= DAC_CFG_HD_HZUVW_OFF_MASK;
writel(val, hda->video_dacs_ctrl);
}
@@ -352,24 +332,11 @@ static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
{
u32 val = readl(reg);
- u32 mask;
-
- switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
- case VIDEO_DACS_CONTROL_SYSCFG2535:
- mask = DAC_CFG_HD_OFF_MASK;
- break;
- case VIDEO_DACS_CONTROL_SYSCFG5072:
- mask = DAC_CFG_HD_HZUVW_OFF_MASK;
- break;
- default:
- DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
- return;
- }
seq_puts(s, "\n");
seq_printf(s, "\n %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
seq_puts(s, "\tHD DACs ");
- seq_puts(s, val & mask ? "disabled" : "enabled");
+ seq_puts(s, val & DAC_CFG_HD_HZUVW_OFF_MASK ? "disabled" : "enabled");
}
static int hda_dbg_show(struct seq_file *s, void *data)
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index fedc17f98d9b..376b0763c874 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -22,7 +22,6 @@
#include "sti_hdmi.h"
#include "sti_hdmi_tx3g4c28phy.h"
-#include "sti_hdmi_tx3g0c55phy.h"
#include "sti_vtg.h"
#define HDMI_CFG 0x0000
@@ -203,7 +202,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
/* Audio FIFO underrun IRQ */
if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
- DRM_INFO("Warning: audio FIFO underrun occurs!");
+ DRM_INFO("Warning: audio FIFO underrun occurs!\n");
return IRQ_HANDLED;
}
@@ -569,7 +568,7 @@ static void hdmi_swreset(struct sti_hdmi *hdmi)
/* Wait reset completed */
wait_event_interruptible_timeout(hdmi->wait_event,
- hdmi->event_received == true,
+ hdmi->event_received,
msecs_to_jiffies
(HDMI_TIMEOUT_SWRESET));
@@ -1054,6 +1053,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
}
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
@@ -1181,7 +1181,7 @@ static void hdmi_audio_shutdown(struct device *dev, void *data)
HDMI_AUD_CFG_ONE_BIT_INVALID;
hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
- hdmi->audio.enabled = 0;
+ hdmi->audio.enabled = false;
hdmi_audio_infoframe_config(hdmi);
}
@@ -1213,7 +1213,7 @@ static int hdmi_audio_hw_params(struct device *dev,
return -EINVAL;
}
- audio.enabled = 1;
+ audio.enabled = true;
ret = hdmi_audio_configure(hdmi, &audio);
if (ret < 0)
@@ -1265,7 +1265,7 @@ static int sti_hdmi_register_audio_driver(struct device *dev,
DRM_DEBUG_DRIVER("\n");
- hdmi->audio.enabled = 0;
+ hdmi->audio.enabled = false;
hdmi->audio_pdev = platform_device_register_data(
dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
@@ -1373,9 +1373,6 @@ static const struct component_ops sti_hdmi_ops = {
static const struct of_device_id hdmi_of_match[] = {
{
- .compatible = "st,stih416-hdmi",
- .data = &tx3g0c55phy_ops,
- }, {
.compatible = "st,stih407-hdmi",
.data = &tx3g4c28phy_ops,
}, {
@@ -1422,22 +1419,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
- if (of_device_is_compatible(np, "st,stih416-hdmi")) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "syscfg");
- if (!res) {
- DRM_ERROR("Invalid syscfg resource\n");
- ret = -ENOMEM;
- goto release_adapter;
- }
- hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
- resource_size(res));
- if (!hdmi->syscfg) {
- ret = -ENOMEM;
- goto release_adapter;
- }
- }
-
hdmi->phy_ops = (struct hdmi_phy_ops *)
of_match_node(hdmi_of_match, np)->data;
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
deleted file mode 100644
index 49ae8e44b285..000000000000
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#include "sti_hdmi_tx3g0c55phy.h"
-
-#define HDMI_SRZ_PLL_CFG 0x0504
-#define HDMI_SRZ_TAP_1 0x0508
-#define HDMI_SRZ_TAP_2 0x050C
-#define HDMI_SRZ_TAP_3 0x0510
-#define HDMI_SRZ_CTRL 0x0514
-
-#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0)
-#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1
-#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0
-#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1
-#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2
-#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3
-#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3
-#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
-#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8
-#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
-#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16
-#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1
-#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4
-#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5
-#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
-#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7
-#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8
-#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9
-#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
-#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB
-#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC
-#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD
-#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
-#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF
-#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF
-#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
-
-#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0)
-#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1)
-
-/* sysconf registers */
-#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */
-#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */
-
-#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
-#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
-#define REJECTION_PLL_HDMI_PDIV_SHIFT 24
-#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
-#define REJECTION_PLL_HDMI_NDIV_SHIFT 16
-#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
-#define REJECTION_PLL_HDMI_MDIV_SHIFT 8
-#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
-
-#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
-
-#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
-
-/**
- * pll mode structure
- *
- * A pointer to an array of these structures is passed to a TMDS (HDMI) output
- * via the control interface to provide board and SoC specific
- * configurations of the HDMI PHY. Each entry in the array specifies a hardware
- * specific configuration for a given TMDS clock frequency range. The array
- * should be terminated with an entry that has all fields set to zero.
- *
- * @min: Lower bound of TMDS clock frequency this entry applies to
- * @max: Upper bound of TMDS clock frequency this entry applies to
- * @mode: SoC specific register configuration
- */
-struct pllmode {
- u32 min;
- u32 max;
- u32 mode;
-};
-
-#define NB_PLL_MODE 7
-static struct pllmode pllmodes[NB_PLL_MODE] = {
- {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
- {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
- {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
- {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
- {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
- {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
- {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
-};
-
-#define NB_HDMI_PHY_CONFIG 5
-static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
- {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
- {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
- {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
- {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
- {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
-};
-
-#define PLL_CHANGE_DELAY 1 /* ms */
-
-/**
- * Disable the pll rejection
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if the pll has been disabled
- */
-static bool disable_pll_rejection(struct sti_hdmi *hdmi)
-{
- u32 val;
-
- DRM_DEBUG_DRIVER("\n");
-
- val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
- val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
- writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
- msleep(PLL_CHANGE_DELAY);
- val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
- return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
- * clock input to the new PHY PLL that generates the serializer clock
- * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
- * formatter instead of the TMDS clock line from ClockGenB.
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * return true if pll has been correctly set
- */
-static bool enable_pll_rejection(struct sti_hdmi *hdmi)
-{
- unsigned int inputclock;
- u32 mdiv, ndiv, pdiv, val;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (!disable_pll_rejection(hdmi))
- return false;
-
- inputclock = hdmi->mode.clock * 1000;
-
- DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
-
-
- /* Power up the HDMI rejection PLL
- * Note: On this SoC (stiH416) we are forced to have the input clock
- * be equal to the HDMI pixel clock.
- *
- * The values here have been suggested by validation however they are
- * still provisional and subject to change.
- *
- * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
- */
- if (inputclock < 50000000) {
- /*
- * For slower clocks we need to multiply more to keep the
- * internal VCO frequency within the physical specification
- * of the PLL.
- */
- pdiv = 4;
- ndiv = 240;
- mdiv = 30;
- } else {
- pdiv = 2;
- ndiv = 60;
- mdiv = 30;
- }
-
- val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
- val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
- REJECTION_PLL_HDMI_NDIV_MASK |
- REJECTION_PLL_HDMI_MDIV_MASK |
- REJECTION_PLL_HDMI_ENABLE_MASK);
-
- val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
- (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
- (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
- (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
-
- writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
-
- msleep(PLL_CHANGE_DELAY);
- val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
-
- return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
-}
-
-/**
- * Start hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- *
- * Return false if an error occur
- */
-static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
-{
- u32 ckpxpll = hdmi->mode.clock * 1000;
- u32 val, tmdsck, freqvco, pllctrl = 0;
- unsigned int i;
-
- if (!enable_pll_rejection(hdmi))
- return false;
-
- DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
-
- /* Assuming no pixel repetition and 24bits color */
- tmdsck = ckpxpll;
- pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
-
- /*
- * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
- * a clock frequency supported by one of the specific PLL modes then we
- * will end up using the generic mode (0) which only supports a 10x
- * multiplier, hence only 24bit color.
- */
- for (i = 0; i < NB_PLL_MODE; i++) {
- if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
- pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
- }
-
- freqvco = tmdsck * 10;
- if (freqvco <= 425000000UL)
- pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
- else if (freqvco <= 850000000UL)
- pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
- else if (freqvco <= 1700000000UL)
- pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
- else if (freqvco <= 2970000000UL)
- pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
- else {
- DRM_ERROR("PHY serializer clock out of range\n");
- goto err;
- }
-
- /*
- * Configure and power up the PHY PLL
- */
- hdmi->event_received = false;
- DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
- hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
-
- /* wait PLL interrupt */
- wait_event_interruptible_timeout(hdmi->wait_event,
- hdmi->event_received == true,
- msecs_to_jiffies
- (HDMI_TIMEOUT_PLL_LOCK));
-
- if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
- DRM_ERROR("hdmi phy pll not locked\n");
- goto err;
- }
-
- DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
-
- /*
- * To configure the source termination and pre-emphasis appropriately
- * for different high speed TMDS clock frequencies a phy configuration
- * table must be provided, tailored to the SoC and board combination.
- */
- for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
- if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
- (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
- val = hdmiphy_config[i].config[0];
- hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
- val = hdmiphy_config[i].config[1];
- hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
- val = hdmiphy_config[i].config[2];
- hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
- val = hdmiphy_config[i].config[3];
- val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
- val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
- hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
-
- DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
- hdmiphy_config[i].config[0],
- hdmiphy_config[i].config[1],
- hdmiphy_config[i].config[2],
- hdmiphy_config[i].config[3]);
- return true;
- }
- }
-
- /*
- * Default, power up the serializer with no pre-emphasis or source
- * termination.
- */
- hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
- hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
- hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
- hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
-
- return true;
-
-err:
- disable_pll_rejection(hdmi);
-
- return false;
-}
-
-/**
- * Stop hdmi phy macro cell tx3g0c55
- *
- * @hdmi: pointer on the hdmi internal structure
- */
-static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
-{
- DRM_DEBUG_DRIVER("\n");
-
- hdmi->event_received = false;
-
- hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
- hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
-
- /* wait PLL interrupt */
- wait_event_interruptible_timeout(hdmi->wait_event,
- hdmi->event_received == true,
- msecs_to_jiffies
- (HDMI_TIMEOUT_PLL_LOCK));
-
- if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
- DRM_ERROR("hdmi phy pll not well disabled\n");
-
- disable_pll_rejection(hdmi);
-}
-
-struct hdmi_phy_ops tx3g0c55phy_ops = {
- .start = sti_hdmi_tx3g0c55phy_start,
- .stop = sti_hdmi_tx3g0c55phy_stop,
-};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
deleted file mode 100644
index 068237b3a303..000000000000
--- a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms: GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HDMI_TX3G0C55PHY_H_
-#define _STI_HDMI_TX3G0C55PHY_H_
-
-#include "sti_hdmi.h"
-
-extern struct hdmi_phy_ops tx3g0c55phy_ops;
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b5ee783e3e7c..f88130f2eb48 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -17,6 +17,7 @@
#include "sti_hqvdp_lut.h"
#include "sti_plane.h"
#include "sti_vtg.h"
+#include "sti_drv.h"
/* Firmware name */
#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
@@ -770,6 +771,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
DRM_ERROR("XP70 could not revert to idle\n");
hqvdp->plane.status = STI_PLANE_DISABLED;
+ hqvdp->xp70_initialized = false;
}
/**
@@ -783,7 +785,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
* RETURNS:
* 0 on success.
*/
-int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
{
struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
int btm_cmd_offset, top_cmd_offest;
@@ -1012,7 +1014,6 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
- bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
@@ -1026,8 +1027,8 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
mode = &crtc_state->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
@@ -1063,7 +1064,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (first_prepare) {
+ if (!hqvdp->xp70_initialized) {
/* Start HQVDP XP70 coprocessor */
sti_hqvdp_start_xp70(hqvdp);
@@ -1115,8 +1116,8 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
@@ -1214,15 +1215,15 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- if (!drm_plane->crtc) {
+ if (!oldstate->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
drm_plane->base.id);
return;
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id,
- sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
+ oldstate->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
@@ -1250,7 +1251,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
}
-struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_hqvdp_destroy,
@@ -1289,7 +1290,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
return &hqvdp->plane.drm_plane;
}
-int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 7d9aea805eab..4ddc58f7fe2e 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -16,12 +16,6 @@ static unsigned int bkg_color = 0x000000;
MODULE_PARM_DESC(bkgcolor, "Value of the background color 0xRRGGBB");
module_param_named(bkgcolor, bkg_color, int, 0644);
-/* Identity: G=Y , B=Cb , R=Cr */
-static const u32 mixerColorSpaceMatIdentity[] = {
- 0x10000000, 0x00000000, 0x10000000, 0x00001000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000
-};
-
/* regs offset */
#define GAM_MIXER_CTL 0x00
#define GAM_MIXER_BKC 0x04
@@ -358,22 +352,12 @@ int sti_mixer_set_plane_status(struct sti_mixer *mixer,
return 0;
}
-void sti_mixer_set_matrix(struct sti_mixer *mixer)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
- sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
- mixerColorSpaceMatIdentity[i]);
-}
-
struct sti_mixer *sti_mixer_create(struct device *dev,
struct drm_device *drm_dev,
int id,
void __iomem *baseaddr)
{
struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
- struct device_node *np = dev->of_node;
dev_dbg(dev, "%s\n", __func__);
if (!mixer) {
@@ -384,9 +368,6 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
mixer->dev = dev;
mixer->id = id;
- if (of_device_is_compatible(np, "st,stih416-compositor"))
- sti_mixer_set_matrix(mixer);
-
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index e25995b35715..ad46d3558d91 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include "sti_crtc.h"
+#include "sti_drv.h"
#include "sti_vtg.h"
/* glue registers */
@@ -209,13 +210,11 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
- * @sel_input_logic_inverted: need to invert the logic
* @sel_input: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
bool main_path,
- bool sel_input_logic_inverted,
enum sti_tvout_video_out_type video_out)
{
u32 sel_input;
@@ -236,8 +235,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
}
/* on stih407 chip the sel_input bypass mode logic is inverted */
- if (sel_input_logic_inverted)
- sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+ sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
val &= ~TVO_VIP_SEL_INPUT_MASK;
val |= sel_input;
@@ -295,8 +293,6 @@ static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
*/
static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
{
- struct device_node *node = tvout->dev->of_node;
- bool sel_input_logic_inverted = false;
u32 tvo_in_vid_format;
int val, tmp;
@@ -334,16 +330,11 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
/* Set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
- if (of_device_is_compatible(node, "st,stih407-tvout")) {
- /* Set input video format */
- tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
- TVO_IN_FMT_SIGNED);
- sel_input_logic_inverted = true;
- }
+ /* Set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* Input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_DVO, main_path,
- sel_input_logic_inverted,
STI_TVOUT_VIDEO_OUT_RGB);
}
@@ -356,8 +347,6 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
*/
static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
{
- struct device_node *node = tvout->dev->of_node;
- bool sel_input_logic_inverted = false;
u32 tvo_in_vid_format;
dev_dbg(tvout->dev, "%s\n", __func__);
@@ -390,16 +379,12 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
/* set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
- if (of_device_is_compatible(node, "st,stih407-tvout")) {
- /* set input video format */
- tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
- TVO_IN_FMT_SIGNED);
- sel_input_logic_inverted = true;
- }
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
- sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+ STI_TVOUT_VIDEO_OUT_RGB);
}
/**
@@ -411,8 +396,6 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
*/
static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
{
- struct device_node *node = tvout->dev->of_node;
- bool sel_input_logic_inverted = false;
u32 tvo_in_vid_format;
int val;
@@ -448,16 +431,11 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
/* set round mode (rounded to 10-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
- if (of_device_is_compatible(node, "st,stih407-tvout")) {
- /* set input video format */
- tvout_vip_set_in_vid_fmt(tvout,
- tvo_in_vid_format, TVO_IN_FMT_SIGNED);
- sel_input_logic_inverted = true;
- }
+ /* Set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format, TVO_IN_FMT_SIGNED);
/* Input selection */
tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
- sel_input_logic_inverted,
STI_TVOUT_VIDEO_OUT_YUV);
/* power up HD DAC */
@@ -905,7 +883,6 @@ static int sti_tvout_remove(struct platform_device *pdev)
}
static const struct of_device_id tvout_of_match[] = {
- { .compatible = "st,stih416-tvout", },
{ .compatible = "st,stih407-tvout", },
{ /* end node */ }
};
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 47634a0251fc..2ad59892b57e 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -142,8 +142,8 @@ void sti_vid_commit(struct sti_vid *vid,
struct drm_display_mode *mode = &crtc->mode;
int dst_x = state->crtc_x;
int dst_y = state->crtc_y;
- int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ int dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
+ int dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
int src_h = state->src_h >> 16;
u32 val, ydo, xdo, yds, xds;
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
index b1eb0d77630d..cf7fe8a1db42 100644
--- a/drivers/gpu/drm/sti/sti_vtac.c
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -12,6 +12,8 @@
#include <drm/drmP.h>
+#include "sti_drv.h"
+
/* registers offset */
#define VTAC_CONFIG 0x00
#define VTAC_RX_FIFO_CONFIG 0x04
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0bdc385eec17..a8882bdd0f8b 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -13,6 +13,7 @@
#include <drm/drmP.h>
+#include "sti_drv.h"
#include "sti_vtg.h"
#define VTG_MODE_MASTER 0
@@ -72,7 +73,7 @@
#define AWG_DELAY_ED (-8)
#define AWG_DELAY_SD (-7)
-LIST_HEAD(vtg_lookup);
+static LIST_HEAD(vtg_lookup);
/*
* STI VTG register offset structure
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 58cd55149827..d625a82a6e5f 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -9,5 +9,5 @@ sun4i-tcon-y += sun4i_dotclock.o
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o
-
+obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 3ab560450a82..32c0584e3c35 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -83,8 +83,13 @@ void sun4i_backend_layer_enable(struct sun4i_backend *backend,
}
EXPORT_SYMBOL(sun4i_backend_layer_enable);
-static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
+static int sun4i_backend_drm_format_to_layer(struct drm_plane *plane,
+ u32 format, u32 *mode)
{
+ if ((plane->type == DRM_PLANE_TYPE_PRIMARY) &&
+ (format == DRM_FORMAT_ARGB8888))
+ format = DRM_FORMAT_XRGB8888;
+
switch (format) {
case DRM_FORMAT_ARGB8888:
*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
@@ -164,7 +169,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
interlaced ? "on" : "off");
- ret = sun4i_backend_drm_format_to_layer(fb->pixel_format, &val);
+ ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
return val;
@@ -217,6 +222,51 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
}
EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
+static int sun4i_backend_init_sat(struct device *dev) {
+ struct sun4i_backend *backend = dev_get_drvdata(dev);
+ int ret;
+
+ backend->sat_reset = devm_reset_control_get(dev, "sat");
+ if (IS_ERR(backend->sat_reset)) {
+ dev_err(dev, "Couldn't get the SAT reset line\n");
+ return PTR_ERR(backend->sat_reset);
+ }
+
+ ret = reset_control_deassert(backend->sat_reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the SAT reset line\n");
+ return ret;
+ }
+
+ backend->sat_clk = devm_clk_get(dev, "sat");
+ if (IS_ERR(backend->sat_clk)) {
+ dev_err(dev, "Couldn't get our SAT clock\n");
+ ret = PTR_ERR(backend->sat_clk);
+ goto err_assert_reset;
+ }
+
+ ret = clk_prepare_enable(backend->sat_clk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable the SAT clock\n");
+ return ret;
+ }
+
+ return 0;
+
+err_assert_reset:
+ reset_control_assert(backend->sat_reset);
+ return ret;
+}
+
+static int sun4i_backend_free_sat(struct device *dev) {
+ struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(backend->sat_clk);
+ reset_control_assert(backend->sat_reset);
+
+ return 0;
+}
+
static struct regmap_config sun4i_backend_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -243,10 +293,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- dev_err(dev, "Couldn't map the backend registers\n");
+ if (IS_ERR(regs))
return PTR_ERR(regs);
- }
backend->regs = devm_regmap_init_mmio(dev, regs,
&sun4i_backend_regmap_config);
@@ -291,6 +339,15 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
}
clk_prepare_enable(backend->ram_clk);
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun8i-a33-display-backend")) {
+ ret = sun4i_backend_init_sat(dev);
+ if (ret) {
+ dev_err(dev, "Couldn't init SAT resources\n");
+ goto err_disable_ram_clk;
+ }
+ }
+
/* Reset the registers */
for (i = 0x800; i < 0x1000; i += 4)
regmap_write(backend->regs, i, 0);
@@ -306,6 +363,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
return 0;
+err_disable_ram_clk:
+ clk_disable_unprepare(backend->ram_clk);
err_disable_mod_clk:
clk_disable_unprepare(backend->mod_clk);
err_disable_bus_clk:
@@ -320,6 +379,10 @@ static void sun4i_backend_unbind(struct device *dev, struct device *master,
{
struct sun4i_backend *backend = dev_get_drvdata(dev);
+ if (of_device_is_compatible(dev->of_node,
+ "allwinner,sun8i-a33-display-backend"))
+ sun4i_backend_free_sat(dev);
+
clk_disable_unprepare(backend->ram_clk);
clk_disable_unprepare(backend->mod_clk);
clk_disable_unprepare(backend->bus_clk);
@@ -345,6 +408,7 @@ static int sun4i_backend_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_backend_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-backend" },
+ { .compatible = "allwinner,sun8i-a33-display-backend" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 7070bb3434e5..83e63cc702b4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -52,8 +52,8 @@
#define SUN4I_BACKEND_LAYFB_L32ADD_REG(l) (0x850 + (0x4 * (l)))
#define SUN4I_BACKEND_LAYFB_H4ADD_REG 0x860
-#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l) GENMASK(3 + ((l) * 8), 0)
-#define SUN4I_BACKEND_LAYFB_H4ADD(l, val) ((val) << ((l) * 8))
+#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l) GENMASK(3 + ((l) * 8), (l) * 8)
+#define SUN4I_BACKEND_LAYFB_H4ADD(l, val) ((val) << ((l) * 8))
#define SUN4I_BACKEND_REGBUFFCTL_REG 0x870
#define SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS BIT(1)
@@ -146,6 +146,9 @@ struct sun4i_backend {
struct clk *bus_clk;
struct clk *mod_clk;
struct clk *ram_clk;
+
+ struct clk *sat_clk;
+ struct reset_control *sat_reset;
};
void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 5b3463197c48..d401156490f3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include "sun4i_tcon.h"
+#include "sun4i_dotclock.h"
struct sun4i_dclk {
struct clk_hw hw;
@@ -61,7 +62,7 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
- val &= SUN4I_TCON0_DCLK_DIV_WIDTH;
+ val &= (1 << SUN4I_TCON0_DCLK_DIV_WIDTH) - 1;
if (!val)
val = 1;
@@ -76,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
u8 best_div = 1;
int i;
- for (i = 6; i < 127; i++) {
+ for (i = 6; i <= 127; i++) {
unsigned long ideal = rate * i;
unsigned long rounded;
@@ -89,7 +90,8 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
goto out;
}
- if ((rounded < ideal) && (rounded > best_parent)) {
+ if (abs(rate - rounded / i) <
+ abs(rate - best_parent / best_div)) {
best_parent = rounded;
best_div = i;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 7092daaf6c43..70e9fd59c5a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -109,7 +110,7 @@ static void sun4i_remove_framebuffers(void)
ap->ranges[0].base = 0;
ap->ranges[0].size = ~0;
- remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
kfree(ap);
}
@@ -120,8 +121,8 @@ static int sun4i_drv_bind(struct device *dev)
int ret;
drm = drm_dev_alloc(&sun4i_drv_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv) {
@@ -141,9 +142,9 @@ static int sun4i_drv_bind(struct device *dev)
/* Create our layers */
drv->layers = sun4i_layers_init(drm);
- if (!drv->layers) {
+ if (IS_ERR(drv->layers)) {
dev_err(drm->dev, "Couldn't create the planes\n");
- ret = -EINVAL;
+ ret = PTR_ERR(drv->layers);
goto free_drm;
}
@@ -199,13 +200,14 @@ static const struct component_master_ops sun4i_drv_master_ops = {
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
- return of_device_is_compatible(node,
- "allwinner,sun5i-a13-display-frontend");
+ return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+ return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun8i-a33-tcon");
}
static int compare_of(struct device *dev, void *data)
@@ -257,8 +259,8 @@ static int sun4i_drv_add_endpoints(struct device *dev,
}
/*
- * If the node is our TCON, the first port is used for our
- * panel, and will not be part of the
+ * If the node is our TCON, the first port is used for
+ * panel or bridges, and will not be part of the
* component framework.
*/
if (sun4i_drv_node_is_tcon(node)) {
@@ -320,6 +322,7 @@ static int sun4i_drv_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-display-engine" },
+ { .compatible = "allwinner,sun8i-a33-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 70688febd7ac..8b6ce619ad81 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
static void sun4i_de_output_poll_changed(struct drm_device *drm)
{
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 068ab806309b..f0035bf5efea 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -19,7 +19,12 @@
#include "sun4i_drv.h"
#include "sun4i_layer.h"
-#define SUN4I_NUM_LAYERS 2
+struct sun4i_plane_desc {
+ enum drm_plane_type type;
+ u8 pipe;
+ const uint32_t *formats;
+ uint32_t nformats;
+};
static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
@@ -65,14 +70,35 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
.update_plane = drm_atomic_helper_update_plane,
};
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_backend_layer_formats_primary[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
+};
+
+static const uint32_t sun4i_backend_layer_formats_overlay[] = {
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct sun4i_plane_desc sun4i_backend_planes[] = {
+ {
+ .type = DRM_PLANE_TYPE_PRIMARY,
+ .pipe = 0,
+ .formats = sun4i_backend_layer_formats_primary,
+ .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_primary),
+ },
+ {
+ .type = DRM_PLANE_TYPE_OVERLAY,
+ .pipe = 1,
+ .formats = sun4i_backend_layer_formats_overlay,
+ .nformats = ARRAY_SIZE(sun4i_backend_layer_formats_overlay),
+ },
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
- enum drm_plane_type type)
+ const struct sun4i_plane_desc *plane)
{
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_layer *layer;
@@ -84,10 +110,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
&sun4i_backend_layer_funcs,
- sun4i_backend_layer_formats,
- ARRAY_SIZE(sun4i_backend_layer_formats),
- type,
- NULL);
+ plane->formats, plane->nformats,
+ plane->type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
@@ -97,7 +121,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
&sun4i_backend_layer_helper_funcs);
layer->drv = drv;
- if (type == DRM_PLANE_TYPE_PRIMARY)
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
drv->primary = &layer->plane;
return layer;
@@ -109,8 +133,8 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
struct sun4i_layer **layers;
int i;
- layers = devm_kcalloc(drm->dev, SUN4I_NUM_LAYERS, sizeof(**layers),
- GFP_KERNEL);
+ layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes),
+ sizeof(**layers), GFP_KERNEL);
if (!layers)
return ERR_PTR(-ENOMEM);
@@ -135,13 +159,11 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
* SoCs that support it, sprites could fill the need for more
* layers.
*/
- for (i = 0; i < SUN4I_NUM_LAYERS; i++) {
- enum drm_plane_type type = (i == 0)
- ? DRM_PLANE_TYPE_PRIMARY
- : DRM_PLANE_TYPE_OVERLAY;
+ for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
+ const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
struct sun4i_layer *layer = layers[i];
- layer = sun4i_layer_init_one(drm, type);
+ layer = sun4i_layer_init_one(drm, plane);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
@@ -149,10 +171,10 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
};
DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
- i ? "overlay" : "primary", i);
+ i ? "overlay" : "primary", plane->pipe);
regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
- SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(i));
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
layer->id = i;
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index f5bbac6efb4c..d198ad7e5323 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -19,6 +19,7 @@
#include "sun4i_drv.h"
#include "sun4i_tcon.h"
+#include "sun4i_rgb.h"
struct sun4i_rgb {
struct drm_connector connector;
@@ -151,8 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- drm_panel_enable(tcon->panel);
+ if (!IS_ERR(tcon->panel))
+ drm_panel_prepare(tcon->panel);
+
sun4i_tcon_channel_enable(tcon, 0);
+
+ if (!IS_ERR(tcon->panel))
+ drm_panel_enable(tcon->panel);
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -163,8 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
+ if (!IS_ERR(tcon->panel))
+ drm_panel_disable(tcon->panel);
+
sun4i_tcon_channel_disable(tcon, 0);
- drm_panel_disable(tcon->panel);
+
+ if (!IS_ERR(tcon->panel))
+ drm_panel_unprepare(tcon->panel);
}
static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
@@ -203,17 +214,22 @@ int sun4i_rgb_init(struct drm_device *drm)
{
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tcon *tcon = drv->tcon;
+ struct drm_encoder *encoder;
struct sun4i_rgb *rgb;
int ret;
- /* If we don't have a panel, there's no point in going on */
- if (IS_ERR(tcon->panel))
- return -ENODEV;
-
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return -ENOMEM;
rgb->drv = drv;
+ encoder = &rgb->encoder;
+
+ tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node);
+ encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node);
+ if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) {
+ dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
+ return 0;
+ }
drm_encoder_helper_add(&rgb->encoder,
&sun4i_rgb_enc_helper_funcs);
@@ -230,19 +246,38 @@ int sun4i_rgb_init(struct drm_device *drm)
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = BIT(0);
- drm_connector_helper_add(&rgb->connector,
- &sun4i_rgb_con_helper_funcs);
- ret = drm_connector_init(drm, &rgb->connector,
- &sun4i_rgb_con_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (ret) {
- dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
- goto err_cleanup_connector;
+ if (!IS_ERR(tcon->panel)) {
+ drm_connector_helper_add(&rgb->connector,
+ &sun4i_rgb_con_helper_funcs);
+ ret = drm_connector_init(drm, &rgb->connector,
+ &sun4i_rgb_con_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+ goto err_cleanup_connector;
+ }
+
+ drm_mode_connector_attach_encoder(&rgb->connector,
+ &rgb->encoder);
+
+ ret = drm_panel_attach(tcon->panel, &rgb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our panel\n");
+ goto err_cleanup_connector;
+ }
}
- drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+ if (!IS_ERR(encoder->bridge)) {
+ encoder->bridge->encoder = &rgb->encoder;
- drm_panel_attach(tcon->panel, &rgb->connector);
+ ret = drm_bridge_attach(drm, encoder->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our bridge\n");
+ goto err_cleanup_connector;
+ }
+ } else {
+ encoder->bridge = NULL;
+ }
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 652385f09735..cadacb517f95 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -59,11 +59,13 @@ void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_TCON_ENABLE, 0);
clk_disable_unprepare(tcon->dclk);
- } else if (channel == 1) {
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE, 0);
- clk_disable_unprepare(tcon->sclk1);
+ return;
}
+
+ WARN_ON(!tcon->has_channel_1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+ clk_disable_unprepare(tcon->sclk1);
}
EXPORT_SYMBOL(sun4i_tcon_channel_disable);
@@ -75,12 +77,14 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
SUN4I_TCON0_CTL_TCON_ENABLE,
SUN4I_TCON0_CTL_TCON_ENABLE);
clk_prepare_enable(tcon->dclk);
- } else if (channel == 1) {
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE,
- SUN4I_TCON1_CTL_TCON_ENABLE);
- clk_prepare_enable(tcon->sclk1);
+ return;
}
+
+ WARN_ON(!tcon->has_channel_1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE,
+ SUN4I_TCON1_CTL_TCON_ENABLE);
+ clk_prepare_enable(tcon->sclk1);
}
EXPORT_SYMBOL(sun4i_tcon_channel_enable);
@@ -198,6 +202,8 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val;
+ WARN_ON(!tcon->has_channel_1);
+
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
@@ -321,10 +327,12 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
- tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
- if (IS_ERR(tcon->sclk1)) {
- dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
- return PTR_ERR(tcon->sclk1);
+ if (tcon->has_channel_1) {
+ tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+ if (IS_ERR(tcon->sclk1)) {
+ dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+ return PTR_ERR(tcon->sclk1);
+ }
}
return sun4i_dclk_create(dev, tcon);
@@ -374,10 +382,8 @@ static int sun4i_tcon_init_regmap(struct device *dev,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs)) {
- dev_err(dev, "Couldn't map the TCON registers\n");
+ if (IS_ERR(regs))
return PTR_ERR(regs);
- }
tcon->regs = devm_regmap_init_mmio(dev, regs,
&sun4i_tcon_regmap_config);
@@ -398,7 +404,7 @@ static int sun4i_tcon_init_regmap(struct device *dev,
return 0;
}
-static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
{
struct device_node *port, *remote, *child;
struct device_node *end_node = NULL;
@@ -432,6 +438,40 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
}
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node)
+{
+ struct device_node *port, *remote, *child;
+ struct device_node *end_node = NULL;
+
+ /* Inputs are listed first, then outputs */
+ port = of_graph_get_port_by_id(node, 1);
+
+ /*
+ * Our first output is the RGB interface where the panel will
+ * be connected.
+ */
+ for_each_child_of_node(port, child) {
+ u32 reg;
+
+ of_property_read_u32(child, "reg", &reg);
+ if (reg == 0)
+ end_node = child;
+ }
+
+ if (!end_node) {
+ DRM_DEBUG_DRIVER("Missing bridge endpoint\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ remote = of_graph_get_remote_port_parent(end_node);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return of_drm_find_bridge(remote) ?: ERR_PTR(-EPROBE_DEFER);
+}
+
static int sun4i_tcon_bind(struct device *dev, struct device *master,
void *data)
{
@@ -446,9 +486,15 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, tcon);
drv->tcon = tcon;
tcon->drm = drm;
+ tcon->dev = dev;
- if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+ if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon")) {
tcon->has_mux = true;
+ tcon->has_channel_1 = true;
+ } else {
+ tcon->has_mux = false;
+ tcon->has_channel_1 = false;
+ }
tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
if (IS_ERR(tcon->lcd_rst)) {
@@ -484,12 +530,6 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_clocks;
}
- tcon->panel = sun4i_tcon_find_panel(dev->of_node);
- if (IS_ERR(tcon->panel)) {
- dev_info(dev, "No panel found... RGB output disabled\n");
- return 0;
- }
-
ret = sun4i_rgb_init(drm);
if (ret < 0)
goto err_free_clocks;
@@ -519,19 +559,22 @@ static struct component_ops sun4i_tcon_ops = {
static int sun4i_tcon_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct drm_bridge *bridge;
struct drm_panel *panel;
/*
- * The panel is not ready.
+ * Neither the bridge or the panel is ready.
* Defer the probe.
*/
panel = sun4i_tcon_find_panel(node);
+ bridge = sun4i_tcon_find_bridge(node);
/*
* If we don't have a panel endpoint, just go on
*/
- if (PTR_ERR(panel) == -EPROBE_DEFER) {
- DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+ if ((PTR_ERR(panel) == -EPROBE_DEFER) &&
+ (PTR_ERR(bridge) == -EPROBE_DEFER)) {
+ DRM_DEBUG_DRIVER("Still waiting for our panel/bridge. Deferring...\n");
return -EPROBE_DEFER;
}
@@ -547,6 +590,7 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
static const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun5i-a13-tcon" },
+ { .compatible = "allwinner,sun8i-a33-tcon" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 0e0b11db401b..12bd48925f4d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -143,6 +143,7 @@
#define SUN4I_TCON_MAX_CHANNELS 2
struct sun4i_tcon {
+ struct device *dev;
struct drm_device *drm;
struct regmap *regs;
@@ -163,8 +164,13 @@ struct sun4i_tcon {
bool has_mux;
struct drm_panel *panel;
+
+ bool has_channel_1;
};
+struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
+struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
+
/* Global Control */
void sun4i_tcon_disable(struct sun4i_tcon *tcon);
void sun4i_tcon_enable(struct sun4i_tcon *tcon);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index b84147896294..1dd3d9eabf2e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -161,10 +161,10 @@ struct tv_mode {
bool dac3_en;
bool dac_bit25_en;
- struct color_gains *color_gains;
- struct burst_levels *burst_levels;
- struct video_levels *video_levels;
- struct resync_parameters *resync_params;
+ const struct color_gains *color_gains;
+ const struct burst_levels *burst_levels;
+ const struct video_levels *video_levels;
+ const struct resync_parameters *resync_params;
};
struct sun4i_tv {
@@ -178,39 +178,39 @@ struct sun4i_tv {
struct sun4i_drv *drv;
};
-struct video_levels ntsc_video_levels = {
+static const struct video_levels ntsc_video_levels = {
.black = 282, .blank = 240,
};
-struct video_levels pal_video_levels = {
+static const struct video_levels pal_video_levels = {
.black = 252, .blank = 252,
};
-struct burst_levels ntsc_burst_levels = {
+static const struct burst_levels ntsc_burst_levels = {
.cb = 79, .cr = 0,
};
-struct burst_levels pal_burst_levels = {
+static const struct burst_levels pal_burst_levels = {
.cb = 40, .cr = 40,
};
-struct color_gains ntsc_color_gains = {
+static const struct color_gains ntsc_color_gains = {
.cb = 160, .cr = 160,
};
-struct color_gains pal_color_gains = {
+static const struct color_gains pal_color_gains = {
.cb = 224, .cr = 224,
};
-struct resync_parameters ntsc_resync_parameters = {
+static const struct resync_parameters ntsc_resync_parameters = {
.field = false, .line = 14, .pixel = 12,
};
-struct resync_parameters pal_resync_parameters = {
+static const struct resync_parameters pal_resync_parameters = {
.field = true, .line = 13, .pixel = 12,
};
-struct tv_mode tv_modes[] = {
+static const struct tv_mode tv_modes[] = {
{
.name = "NTSC",
.mode = SUN4I_TVE_CFG0_RES_480i,
@@ -289,13 +289,13 @@ drm_connector_to_sun4i_tv(struct drm_connector *connector)
* So far, it doesn't seem to be preserved when the mode is passed by
* to mode_set for some reason.
*/
-static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
+static const struct tv_mode *sun4i_tv_find_tv_by_mode(const struct drm_display_mode *mode)
{
int i;
/* First try to identify the mode by name */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- struct tv_mode *tv_mode = &tv_modes[i];
+ const struct tv_mode *tv_mode = &tv_modes[i];
DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
mode->name, tv_mode->name);
@@ -306,7 +306,7 @@ static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
/* Then by number of lines */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- struct tv_mode *tv_mode = &tv_modes[i];
+ const struct tv_mode *tv_mode = &tv_modes[i];
DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
mode->name, tv_mode->name,
@@ -319,7 +319,7 @@ static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
return NULL;
}
-static void sun4i_tv_mode_to_drm_mode(struct tv_mode *tv_mode,
+static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
struct drm_display_mode *mode)
{
DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
@@ -386,7 +386,7 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_drv *drv = tv->drv;
struct sun4i_tcon *tcon = drv->tcon;
- struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+ const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
sun4i_tcon1_mode_set(tcon, mode);
@@ -507,8 +507,14 @@ static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
int i;
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- struct drm_display_mode *mode = drm_mode_create(connector->dev);
- struct tv_mode *tv_mode = &tv_modes[i];
+ struct drm_display_mode *mode;
+ const struct tv_mode *tv_mode = &tv_modes[i];
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("Failed to create a new display mode\n");
+ return 0;
+ }
strcpy(mode->name, tv_mode->name);
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
new file mode 100644
index 000000000000..bf6d846d8132
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+struct sun6i_drc {
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct reset_control *reset;
+};
+
+static int sun6i_drc_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun6i_drc *drc;
+ int ret;
+
+ drc = devm_kzalloc(dev, sizeof(*drc), GFP_KERNEL);
+ if (!drc)
+ return -ENOMEM;
+ dev_set_drvdata(dev, drc);
+
+ drc->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(drc->reset)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(drc->reset);
+ }
+
+ ret = reset_control_deassert(drc->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert our reset line\n");
+ return ret;
+ }
+
+ drc->bus_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(drc->bus_clk)) {
+ dev_err(dev, "Couldn't get our bus clock\n");
+ ret = PTR_ERR(drc->bus_clk);
+ goto err_assert_reset;
+ }
+ clk_prepare_enable(drc->bus_clk);
+
+ drc->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(drc->mod_clk)) {
+ dev_err(dev, "Couldn't get our mod clock\n");
+ ret = PTR_ERR(drc->mod_clk);
+ goto err_disable_bus_clk;
+ }
+ clk_prepare_enable(drc->mod_clk);
+
+ return 0;
+
+err_disable_bus_clk:
+ clk_disable_unprepare(drc->bus_clk);
+err_assert_reset:
+ reset_control_assert(drc->reset);
+ return ret;
+}
+
+static void sun6i_drc_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun6i_drc *drc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drc->mod_clk);
+ clk_disable_unprepare(drc->bus_clk);
+ reset_control_assert(drc->reset);
+}
+
+static struct component_ops sun6i_drc_ops = {
+ .bind = sun6i_drc_bind,
+ .unbind = sun6i_drc_unbind,
+};
+
+static int sun6i_drc_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun6i_drc_ops);
+}
+
+static int sun6i_drc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun6i_drc_ops);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_drc_of_table[] = {
+ { .compatible = "allwinner,sun8i-a33-drc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
+
+static struct platform_driver sun6i_drc_platform_driver = {
+ .probe = sun6i_drc_probe,
+ .remove = sun6i_drc_remove,
+ .driver = {
+ .name = "sun6i-drc",
+ .of_match_table = sun6i_drc_of_table,
+ },
+};
+module_platform_driver(sun6i_drc_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 Dynamic Range Control (DRC) Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index fab5ebcb0fef..f418892b0c71 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -56,6 +56,7 @@ static const struct file_operations tdfx_driver_fops = {
};
static struct drm_driver driver = {
+ .driver_features = DRIVER_LEGACY,
.set_busid = drm_pci_set_busid,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 8495bd01b544..4010d69cbd08 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -480,17 +480,6 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
.atomic_destroy_state = tegra_plane_atomic_destroy_state,
};
-static int tegra_plane_prepare_fb(struct drm_plane *plane,
- const struct drm_plane_state *new_state)
-{
- return 0;
-}
-
-static void tegra_plane_cleanup_fb(struct drm_plane *plane,
- const struct drm_plane_state *old_fb)
-{
-}
-
static int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
@@ -591,7 +580,14 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
window.base[i] = bo->paddr + fb->offsets[i];
- window.stride[i] = fb->pitches[i];
+
+ /*
+ * Tegra uses a shared stride for UV planes. Framebuffers are
+ * already checked for this in the tegra_plane_atomic_check()
+ * function, so it's safe to ignore the V-plane pitch here.
+ */
+ if (i < 2)
+ window.stride[i] = fb->pitches[i];
}
tegra_dc_setup_window(dc, p->index, &window);
@@ -624,8 +620,6 @@ static void tegra_plane_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
- .prepare_fb = tegra_plane_prepare_fb,
- .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_update = tegra_plane_atomic_update,
.atomic_disable = tegra_plane_atomic_disable,
@@ -796,8 +790,6 @@ static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
};
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
- .prepare_fb = tegra_plane_prepare_fb,
- .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -866,8 +858,6 @@ static const uint32_t tegra_overlay_plane_formats[] = {
};
static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
- .prepare_fb = tegra_plane_prepare_fb,
- .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_update = tegra_plane_atomic_update,
.atomic_disable = tegra_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 755264d9db22..8ab47b502d83 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -57,7 +57,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, true);
+ drm_atomic_helper_commit_planes(drm, state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -982,8 +983,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
int err;
drm = drm_dev_alloc(driver, &dev->dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
dev_set_drvdata(&dev->dev, drm);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index aa60d9909ea2..95e622e31931 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -613,7 +613,7 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
exp_info.flags = flags;
exp_info.priv = gem;
- return dma_buf_export(&exp_info);
+ return drm_gem_dmabuf_export(drm, &exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index deeca4869d94..6f675175a9e5 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
tilcdc_slave_compat.dtb.o
tilcdc-y := \
+ tilcdc_plane.o \
tilcdc_crtc.o \
tilcdc_tfp410.o \
tilcdc_panel.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 107c8bd04f6d..52ebe8fc1784 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,8 +15,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "drm_flip_work.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
#include <drm/drm_plane_helper.h>
+#include <linux/workqueue.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -26,13 +30,16 @@
struct tilcdc_crtc {
struct drm_crtc base;
+ struct drm_plane primary;
const struct tilcdc_panel_info *info;
struct drm_pending_vblank_event *event;
- int dpms;
+ bool enabled;
wait_queue_head_t frame_done_wq;
bool frame_done;
spinlock_t irq_lock;
+ unsigned int lcd_fck_rate;
+
ktime_t last_vblank;
struct drm_framebuffer *curr_fb;
@@ -67,6 +74,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
struct drm_gem_cma_object *gem;
unsigned int depth, bpp;
dma_addr_t start, end;
+ u64 dma_base_and_ceiling;
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -77,8 +85,13 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
- tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start);
- tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end);
+ /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
+ * with a single insruction, if available. This should make it more
+ * unlikely that LCDC would fetch the DMA addresses in the middle of
+ * an update.
+ */
+ dma_base_and_ceiling = (u64)(end - 1) << 32 | start;
+ tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
if (tilcdc_crtc->curr_fb)
drm_flip_work_queue(&tilcdc_crtc->unref_work,
@@ -87,6 +100,43 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
tilcdc_crtc->curr_fb = fb;
}
+static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+
+ if (priv->rev == 1) {
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_UNDERFLOW_INT_ENA);
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ } else {
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+ }
+}
+
+static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ /* disable irqs that we might have enabled: */
+ if (priv->rev == 1) {
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ } else {
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+ LCDC_V2_END_OF_FRAME0_INT_ENA |
+ LCDC_FRAME_DONE | LCDC_SYNC_LOST);
+ }
+}
+
static void reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -100,66 +150,112 @@ static void reset(struct drm_crtc *crtc)
tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
}
-static void start(struct drm_crtc *crtc)
+static void tilcdc_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ if (tilcdc_crtc->enabled)
+ return;
+
+ pm_runtime_get_sync(dev->dev);
reset(crtc);
+ tilcdc_crtc_enable_irqs(dev);
+
tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ drm_crtc_vblank_on(crtc);
+
+ tilcdc_crtc->enabled = true;
}
-static void stop(struct drm_crtc *crtc)
+void tilcdc_crtc_disable(struct drm_crtc *crtc)
{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ if (!tilcdc_crtc->enabled)
+ return;
+ tilcdc_crtc->frame_done = false;
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ /*
+ * if necessary wait for framedone irq which will still come
+ * before putting things to sleep..
+ */
+ if (priv->rev == 2) {
+ int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+ }
+
+ drm_crtc_vblank_off(crtc);
+
+ tilcdc_crtc_disable_irqs(dev);
+
+ pm_runtime_put_sync(dev->dev);
+
+ if (tilcdc_crtc->next_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->next_fb);
+ tilcdc_crtc->next_fb = NULL;
+ }
+
+ if (tilcdc_crtc->curr_fb) {
+ drm_flip_work_queue(&tilcdc_crtc->unref_work,
+ tilcdc_crtc->curr_fb);
+ tilcdc_crtc->curr_fb = NULL;
+ }
+
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
+ tilcdc_crtc->last_vblank = ktime_set(0, 0);
+
+ tilcdc_crtc->enabled = false;
+}
+
+static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
+{
+ return crtc->state && crtc->state->enable && crtc->state->active;
}
static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+
+ drm_modeset_lock_crtc(crtc, NULL);
+ tilcdc_crtc_disable(crtc);
+ drm_modeset_unlock_crtc(crtc);
- tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ flush_workqueue(priv->wq);
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
}
-static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int depth, bpp;
-
- drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
-
- if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) {
- dev_err(dev->dev,
- "Invalid pitch: fb and crtc widths must be the same");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+ struct drm_pending_vblank_event *event)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- int r;
unsigned long flags;
- s64 tdiff;
- ktime_t next_vblank;
- r = tilcdc_verify_fb(crtc, fb);
- if (r)
- return r;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
if (tilcdc_crtc->event) {
dev_err(dev->dev, "already pending page flip!\n");
@@ -170,82 +266,31 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
crtc->primary->fb = fb;
- pm_runtime_get_sync(dev->dev);
-
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ ktime_t next_vblank;
+ s64 tdiff;
+
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
- tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
+ tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
- if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
+ tilcdc_crtc->next_fb = fb;
+ }
+
+ if (tilcdc_crtc->next_fb != fb)
set_scanout(crtc, fb);
- else
- tilcdc_crtc->next_fb = fb;
tilcdc_crtc->event = event;
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
- pm_runtime_put_sync(dev->dev);
-
return 0;
}
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- /* we really only care about on or off: */
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- if (tilcdc_crtc->dpms == mode)
- return;
-
- tilcdc_crtc->dpms = mode;
-
- if (mode == DRM_MODE_DPMS_ON) {
- pm_runtime_get_sync(dev->dev);
- start(crtc);
- } else {
- tilcdc_crtc->frame_done = false;
- stop(crtc);
-
- /*
- * if necessary wait for framedone irq which will still come
- * before putting things to sleep..
- */
- if (priv->rev == 2) {
- int ret = wait_event_timeout(
- tilcdc_crtc->frame_done_wq,
- tilcdc_crtc->frame_done,
- msecs_to_jiffies(50));
- if (ret == 0)
- dev_err(dev->dev, "timeout waiting for framedone\n");
- }
-
- pm_runtime_put_sync(dev->dev);
-
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- }
-}
-
static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -275,41 +320,54 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
{
- tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ const unsigned clkdiv = 2; /* using a fixed divider of 2 */
+ int ret;
-static void tilcdc_crtc_commit(struct drm_crtc *crtc)
-{
- tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ /* mode.clock is in KHz, set_rate wants parameter in Hz */
+ ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+ crtc->mode.clock);
+ return;
+ }
+
+ tilcdc_crtc->lcd_fck_rate = clk_get_rate(priv->clk);
+
+ DBG("lcd_clk=%u, mode clock=%d, div=%u",
+ tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
+
+ /* Configure the LCD clock divisor. */
+ tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
+ LCDC_RASTER_MODE);
+
+ if (priv->rev == 2)
+ tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+ LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+ LCDC_V2_CORE_CLK_EN);
}
-static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
const struct tilcdc_panel_info *info = tilcdc_crtc->info;
uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
- int ret;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_framebuffer *fb = crtc->primary->state->fb;
- ret = tilcdc_crtc_mode_valid(crtc, mode);
- if (WARN_ON(ret))
- return ret;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
if (WARN_ON(!info))
- return -EINVAL;
-
- ret = tilcdc_verify_fb(crtc, crtc->primary->fb);
- if (ret)
- return ret;
+ return;
- pm_runtime_get_sync(dev->dev);
+ if (WARN_ON(!fb))
+ return;
/* Configure the Burst Size and fifo threshold of DMA: */
reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
@@ -330,7 +388,8 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
break;
default:
- return -EINVAL;
+ dev_err(dev->dev, "invalid burst size\n");
+ return;
}
reg |= (info->fifo_th << 8);
tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
@@ -344,9 +403,9 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
vsw = mode->vsync_end - mode->vsync_start;
DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
- mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+ mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
- /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+ /* Set AC Bias Period and Number of Transitions per Interrupt: */
reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
@@ -381,7 +440,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
/*
* be sure to set Bit 10 for the V2 LCDC controller,
* otherwise limited to 1024 pixels width, stopping
- * 1920x1080 being suppoted.
+ * 1920x1080 being supported.
*/
if (priv->rev == 2) {
if ((mode->vdisplay - 1) & 0x400) {
@@ -396,14 +455,15 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
/* Configure display type: */
reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
- LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+ LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
+ 0x000ff000 /* Palette Loading Delay bits */);
reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
unsigned int depth, bpp;
- drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp);
+ drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
switch (bpp) {
case 16:
break;
@@ -415,7 +475,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
break;
default:
dev_err(dev->dev, "invalid pixel format\n");
- return -EINVAL;
+ return;
}
}
reg |= info->fdd < 12;
@@ -436,12 +496,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
- /*
- * use value from adjusted_mode here as this might have been
- * changed as part of the fixup for slave encoders to solve the
- * issue where tilcdc timings are not VESA compliant
- */
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -456,51 +511,56 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
- drm_framebuffer_reference(crtc->primary->fb);
-
- set_scanout(crtc, crtc->primary->fb);
+ drm_framebuffer_reference(fb);
- tilcdc_crtc_update_clk(crtc);
+ set_scanout(crtc, fb);
- pm_runtime_put_sync(dev->dev);
+ tilcdc_crtc_set_clk(crtc);
- return 0;
+ crtc->hwmode = crtc->state->adjusted_mode;
}
-static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct drm_device *dev = crtc->dev;
- int r;
-
- r = tilcdc_verify_fb(crtc, crtc->primary->fb);
- if (r)
- return r;
-
- drm_framebuffer_reference(crtc->primary->fb);
+ struct drm_display_mode *mode = &state->mode;
+ int ret;
- pm_runtime_get_sync(dev->dev);
+ /* If we are not active we don't care */
+ if (!state->active)
+ return 0;
- set_scanout(crtc, crtc->primary->fb);
+ if (state->state->planes[0].ptr != crtc->primary ||
+ state->state->planes[0].state == NULL ||
+ state->state->planes[0].state->crtc != crtc) {
+ dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
+ return -EINVAL;
+ }
- pm_runtime_put_sync(dev->dev);
+ ret = tilcdc_crtc_mode_valid(crtc, mode);
+ if (ret) {
+ dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
+ return -EINVAL;
+ }
return 0;
}
static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
- .destroy = tilcdc_crtc_destroy,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = tilcdc_crtc_page_flip,
+ .destroy = tilcdc_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
- .dpms = tilcdc_crtc_dpms,
.mode_fixup = tilcdc_crtc_mode_fixup,
- .prepare = tilcdc_crtc_prepare,
- .commit = tilcdc_crtc_commit,
- .mode_set = tilcdc_crtc_mode_set,
- .mode_set_base = tilcdc_crtc_mode_set_base,
+ .enable = tilcdc_crtc_enable,
+ .disable = tilcdc_crtc_disable,
+ .atomic_check = tilcdc_crtc_atomic_check,
+ .mode_set_nofb = tilcdc_crtc_mode_set_nofb,
};
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -622,46 +682,23 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- int dpms = tilcdc_crtc->dpms;
- unsigned long lcd_clk;
- const unsigned clkdiv = 2; /* using a fixed divider of 2 */
- int ret;
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
- pm_runtime_get_sync(dev->dev);
+ drm_modeset_lock_crtc(crtc, NULL);
+ if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
+ if (tilcdc_crtc_is_on(crtc)) {
+ pm_runtime_get_sync(dev->dev);
+ tilcdc_crtc_disable(crtc);
- if (dpms == DRM_MODE_DPMS_ON)
- tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ tilcdc_crtc_set_clk(crtc);
- /* mode.clock is in KHz, set_rate wants parameter in Hz */
- ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv);
- if (ret < 0) {
- dev_err(dev->dev, "failed to set display clock rate to: %d\n",
- crtc->mode.clock);
- goto out;
+ tilcdc_crtc_enable(crtc);
+ pm_runtime_put_sync(dev->dev);
+ }
}
-
- lcd_clk = clk_get_rate(priv->clk);
-
- DBG("lcd_clk=%lu, mode clock=%d, div=%u",
- lcd_clk, crtc->mode.clock, clkdiv);
-
- /* Configure the LCD clock divisor. */
- tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
- LCDC_RASTER_MODE);
-
- if (priv->rev == 2)
- tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
- LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
- LCDC_V2_CORE_CLK_EN);
-
- if (dpms == DRM_MODE_DPMS_ON)
- tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-
-out:
- pm_runtime_put_sync(dev->dev);
+ drm_modeset_unlock_crtc(crtc);
}
#define SYNC_LOST_COUNT_LIMIT 50
@@ -718,30 +755,34 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
tilcdc_crtc->frame_intact = true;
}
+ if (stat & LCDC_FIFO_UNDERFLOW)
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
+ __func__, stat);
+
+ /* For revision 2 only */
if (priv->rev == 2) {
if (stat & LCDC_FRAME_DONE) {
tilcdc_crtc->frame_done = true;
wake_up(&tilcdc_crtc->frame_done_wq);
}
- tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
- }
- if (stat & LCDC_SYNC_LOST) {
- dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
- __func__, stat);
- tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev,
- "%s(0x%08x): Sync lost flood detected, disabling the interrupt",
- __func__, stat);
- tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
- LCDC_SYNC_LOST);
+ if (stat & LCDC_SYNC_LOST) {
+ dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
+ __func__, stat);
+ tilcdc_crtc->frame_intact = false;
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat);
+ tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
+ LCDC_SYNC_LOST);
+ }
}
- }
- if (stat & LCDC_FIFO_UNDERFLOW)
- dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow",
- __func__, stat);
+ /* Indicate to LCDC that the interrupt service routine has
+ * completed, see 13.3.6.1.6 in AM335x TRM.
+ */
+ tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+ }
return IRQ_HANDLED;
}
@@ -761,7 +802,10 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
crtc = &tilcdc_crtc->base;
- tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+ ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary);
+ if (ret < 0)
+ goto fail;
+
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
drm_flip_work_init(&tilcdc_crtc->unref_work,
@@ -769,7 +813,11 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
spin_lock_init(&tilcdc_crtc->irq_lock);
- ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc,
+ &tilcdc_crtc->primary,
+ NULL,
+ &tilcdc_crtc_funcs,
+ "tilcdc crtc");
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d27809372d54..a694977c32f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -20,6 +20,8 @@
#include <linux/component.h>
#include <linux/pinctrl/consumer.h>
#include <linux/suspend.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -31,6 +33,20 @@
static LIST_HEAD(module_list);
+static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
+
+static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XBGR8888 };
+
+static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888 };
+
+static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888 };
+
void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
const struct tilcdc_module_ops *funcs)
{
@@ -59,9 +75,84 @@ static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(priv->fbdev);
}
+static int tilcdc_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * tilcdc ->atomic_check can update ->mode_changed if pixel format
+ * changes, hence will we check modeset changes again.
+ */
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int tilcdc_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ int ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ drm_atomic_helper_swap_state(state, true);
+
+ /*
+ * Everything below can be run asynchronously without the need to grab
+ * any modeset locks at all under one condition: It must be guaranteed
+ * that the asynchronous work has either been cancelled (if the driver
+ * supports it, which at least requires that the framebuffers get
+ * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+ * before the new state gets committed on the software side with
+ * drm_atomic_helper_swap_state().
+ *
+ * This scheme allows new atomic state updates to be prepared and
+ * checked in parallel to the asynchronous completion of the previous
+ * update. Which is important since compositors need to figure out the
+ * composition of the next frame right after having submitted the
+ * current layout.
+ */
+
+ /* Keep HW on while we commit the state. */
+ pm_runtime_get_sync(dev->dev);
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ /* Now HW should remain on if need becase the crtc is enabled */
+ pm_runtime_put_sync(dev->dev);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ return 0;
+}
+
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = tilcdc_fb_create,
.output_poll_changed = tilcdc_fb_output_poll_changed,
+ .atomic_check = tilcdc_atomic_check,
+ .atomic_commit = tilcdc_commit,
};
static int modeset_init(struct drm_device *dev)
@@ -93,12 +184,9 @@ static int cpufreq_transition(struct notifier_block *nb,
{
struct tilcdc_drm_private *priv = container_of(nb,
struct tilcdc_drm_private, freq_transition);
- if (val == CPUFREQ_POSTCHANGE) {
- if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
- priv->lcd_fck_rate = clk_get_rate(priv->clk);
- tilcdc_crtc_update_clk(priv->crtc);
- }
- }
+
+ if (val == CPUFREQ_POSTCHANGE)
+ tilcdc_crtc_update_clk(priv->crtc);
return 0;
}
@@ -112,8 +200,6 @@ static int tilcdc_unload(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
tilcdc_remove_external_encoders(dev);
drm_fbdev_cma_fini(priv->fbdev);
@@ -121,9 +207,7 @@ static int tilcdc_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
- pm_runtime_get_sync(dev->dev);
drm_irq_uninstall(dev);
- pm_runtime_put_sync(dev->dev);
#ifdef CONFIG_CPU_FREQ
cpufreq_unregister_notifier(&priv->freq_transition,
@@ -146,24 +230,17 @@ static int tilcdc_unload(struct drm_device *dev)
return 0;
}
-static size_t tilcdc_num_regs(void);
-
static int tilcdc_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
struct device_node *node = pdev->dev.of_node;
struct tilcdc_drm_private *priv;
- struct tilcdc_module *mod;
struct resource *res;
u32 bpp = 0;
int ret;
priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
- if (priv)
- priv->saved_register =
- devm_kcalloc(dev->dev, tilcdc_num_regs(),
- sizeof(*priv->saved_register), GFP_KERNEL);
- if (!priv || !priv->saved_register) {
+ if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
return -ENOMEM;
}
@@ -201,7 +278,6 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
}
#ifdef CONFIG_CPU_FREQ
- priv->lcd_fck_rate = clk_get_rate(priv->clk);
priv->freq_transition.notifier_call = cpufreq_transition;
ret = cpufreq_register_notifier(&priv->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -249,6 +325,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
pm_runtime_put_sync(dev->dev);
+ if (priv->rev == 1) {
+ DBG("Revision 1 LCDC supports only RGB565 format");
+ priv->pixelformats = tilcdc_rev1_formats;
+ priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats);
+ bpp = 16;
+ } else {
+ const char *str = "\0";
+
+ of_property_read_string(node, "blue-and-red-wiring", &str);
+ if (0 == strcmp(str, "crossed")) {
+ DBG("Configured for crossed blue and red wires");
+ priv->pixelformats = tilcdc_crossed_formats;
+ priv->num_pixelformats =
+ ARRAY_SIZE(tilcdc_crossed_formats);
+ bpp = 32; /* Choose bpp with RGB support for fbdef */
+ } else if (0 == strcmp(str, "straight")) {
+ DBG("Configured for straight blue and red wires");
+ priv->pixelformats = tilcdc_straight_formats;
+ priv->num_pixelformats =
+ ARRAY_SIZE(tilcdc_straight_formats);
+ bpp = 16; /* Choose bpp with RGB support for fbdef */
+ } else {
+ DBG("Blue and red wiring '%s' unknown, use legacy mode",
+ str);
+ priv->pixelformats = tilcdc_legacy_formats;
+ priv->num_pixelformats =
+ ARRAY_SIZE(tilcdc_legacy_formats);
+ bpp = 16; /* This is just a guess */
+ }
+ }
+
ret = modeset_init(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");
@@ -262,7 +369,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
if (ret < 0)
goto fail_mode_config_cleanup;
- ret = tilcdc_add_external_encoders(dev, &bpp);
+ ret = tilcdc_add_external_encoders(dev);
if (ret < 0)
goto fail_component_cleanup;
}
@@ -279,22 +386,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
goto fail_external_cleanup;
}
- pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
- pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
goto fail_vblank_cleanup;
}
- list_for_each_entry(mod, &module_list, list) {
- DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
- bpp = mod->preferred_bpp;
- if (bpp > 0)
- break;
- }
+ drm_mode_config_reset(dev);
- drm_helper_disable_unused_functions(dev);
priv->fbdev = drm_fbdev_cma_init(dev, bpp,
dev->mode_config.num_crtc,
dev->mode_config.num_connector);
@@ -308,20 +407,18 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
return 0;
fail_irq_uninstall:
- pm_runtime_get_sync(dev->dev);
drm_irq_uninstall(dev);
- pm_runtime_put_sync(dev->dev);
fail_vblank_cleanup:
drm_vblank_cleanup(dev);
-fail_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
-
fail_component_cleanup:
if (priv->is_componentized)
component_unbind_all(dev->dev, dev);
+fail_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+
fail_external_cleanup:
tilcdc_remove_external_encoders(dev);
@@ -361,45 +458,6 @@ static irqreturn_t tilcdc_irq(int irq, void *arg)
return tilcdc_crtc_irq(priv->crtc);
}
-static void tilcdc_irq_preinstall(struct drm_device *dev)
-{
- tilcdc_clear_irqstatus(dev, 0xffffffff);
-}
-
-static int tilcdc_irq_postinstall(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- /* enable FIFO underflow irq: */
- if (priv->rev == 1) {
- tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
- } else {
- tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
- LCDC_V2_UNDERFLOW_INT_ENA |
- LCDC_V2_END_OF_FRAME0_INT_ENA |
- LCDC_FRAME_DONE | LCDC_SYNC_LOST);
- }
-
- return 0;
-}
-
-static void tilcdc_irq_uninstall(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- /* disable irqs that we might have enabled: */
- if (priv->rev == 1) {
- tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
- LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
- tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
- } else {
- tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
- LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
- LCDC_V2_END_OF_FRAME0_INT_ENA |
- LCDC_FRAME_DONE | LCDC_SYNC_LOST);
- }
-}
-
static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
return 0;
@@ -410,7 +468,7 @@ static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
return;
}
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+#if defined(CONFIG_DEBUG_FS)
static const struct {
const char *name;
uint8_t rev;
@@ -441,15 +499,6 @@ static const struct {
#undef REG
};
-static size_t tilcdc_num_regs(void)
-{
- return ARRAY_SIZE(registers);
-}
-#else
-static size_t tilcdc_num_regs(void)
-{
- return 0;
-}
#endif
#ifdef CONFIG_DEBUG_FS
@@ -537,14 +586,11 @@ static const struct file_operations fops = {
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME),
+ DRIVER_PRIME | DRIVER_ATOMIC),
.load = tilcdc_load,
.unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
.irq_handler = tilcdc_irq,
- .irq_preinstall = tilcdc_irq_preinstall,
- .irq_postinstall = tilcdc_irq_postinstall,
- .irq_uninstall = tilcdc_irq_uninstall,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = tilcdc_enable_vblank,
.disable_vblank = tilcdc_disable_vblank,
@@ -584,28 +630,12 @@ static int tilcdc_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct tilcdc_drm_private *priv = ddev->dev_private;
- unsigned i, n = 0;
- drm_kms_helper_poll_disable(ddev);
+ priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
- if (pm_runtime_suspended(dev)) {
- priv->ctx_valid = false;
- return 0;
- }
-
- /* Disable the LCDC controller, to avoid locking up the PRCM */
- tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF);
-
- /* Save register state: */
- for (i = 0; i < ARRAY_SIZE(registers); i++)
- if (registers[i].save && (priv->rev >= registers[i].rev))
- priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
-
- priv->ctx_valid = true;
-
return 0;
}
@@ -613,23 +643,15 @@ static int tilcdc_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct tilcdc_drm_private *priv = ddev->dev_private;
- unsigned i, n = 0;
+ int ret = 0;
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
- if (priv->ctx_valid == true) {
- /* Restore register state: */
- for (i = 0; i < ARRAY_SIZE(registers); i++)
- if (registers[i].save &&
- (priv->rev >= registers[i].rev))
- tilcdc_write(ddev, registers[i].reg,
- priv->saved_register[n++]);
- }
-
- drm_kms_helper_poll_enable(ddev);
+ if (priv->saved_state)
+ ret = drm_atomic_helper_resume(ddev, priv->saved_state);
- return 0;
+ return ret;
}
#endif
@@ -648,6 +670,12 @@ static int tilcdc_bind(struct device *dev)
static void tilcdc_unbind(struct device *dev)
{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ /* Check if a subcomponent has already triggered the unloading. */
+ if (!ddev->dev_private)
+ return;
+
drm_put_dev(dev_get_drvdata(dev));
}
@@ -680,17 +708,15 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
static int tilcdc_pdev_remove(struct platform_device *pdev)
{
- struct drm_device *ddev = dev_get_drvdata(&pdev->dev);
- struct tilcdc_drm_private *priv = ddev->dev_private;
-
- /* Check if a subcomponent has already triggered the unloading. */
- if (!priv)
- return 0;
+ int ret;
- if (priv->is_componentized)
- component_master_del(&pdev->dev, &tilcdc_comp_ops);
- else
+ ret = tilcdc_get_external_components(&pdev->dev, NULL);
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
drm_put_dev(platform_get_drvdata(pdev));
+ else
+ component_master_del(&pdev->dev, &tilcdc_comp_ops);
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index c1de18bae415..9780c37ec4cd 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -65,13 +65,15 @@ struct tilcdc_drm_private {
*/
uint32_t max_width;
- /* register contents saved across suspend/resume: */
- u32 *saved_register;
- bool ctx_valid;
+ /* Supported pixel formats */
+ const uint32_t *pixelformats;
+ uint32_t num_pixelformats;
+
+ /* The context for pm susped/resume cycle is stored here */
+ struct drm_atomic_state *saved_state;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
- unsigned int lcd_fck_rate;
#endif
struct workqueue_struct *wq;
@@ -113,7 +115,6 @@ struct tilcdc_module {
const char *name;
struct list_head list;
const struct tilcdc_module_ops *funcs;
- unsigned int preferred_bpp;
};
void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
@@ -171,6 +172,11 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
-void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode);
+void tilcdc_crtc_disable(struct drm_crtc *crtc);
+int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
+
+int tilcdc_plane_init(struct drm_device *dev, struct drm_plane *plane);
#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 03acb4f99982..68e895021005 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -52,7 +52,7 @@ static int tilcdc_external_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
+static int tilcdc_add_external_encoder(struct drm_device *dev,
struct drm_connector *connector)
{
struct tilcdc_drm_private *priv = dev->dev_private;
@@ -64,7 +64,6 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
/* Only tda998x is supported at the moment. */
tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
- *bpp = panel_info_tda998x.bpp;
connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
GFP_KERNEL);
@@ -94,7 +93,7 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp,
return 0;
}
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
+int tilcdc_add_external_encoders(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_connector *connector;
@@ -108,7 +107,7 @@ int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp)
if (connector == priv->connectors[i])
found = true;
if (!found) {
- ret = tilcdc_add_external_encoder(dev, bpp, connector);
+ ret = tilcdc_add_external_encoder(dev, connector);
if (ret)
return ret;
}
@@ -138,14 +137,23 @@ static int dev_match_of(struct device *dev, void *data)
int tilcdc_get_external_components(struct device *dev,
struct component_match **match)
{
+ struct device_node *node;
struct device_node *ep = NULL;
int count = 0;
- while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
- struct device_node *node;
+ /* Avoid error print by of_graph_get_next_endpoint() if there
+ * is no ports present.
+ */
+ node = of_get_child_by_name(dev->of_node, "ports");
+ if (!node)
+ node = of_get_child_by_name(dev->of_node, "port");
+ if (!node)
+ return 0;
+ of_node_put(node);
+ while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) {
node = of_graph_get_remote_port_parent(ep);
- if (!node && !of_device_is_available(node)) {
+ if (!node || !of_device_is_available(node)) {
of_node_put(node);
continue;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index 6aabe2788760..c700e0c1623e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -18,7 +18,7 @@
#ifndef __TILCDC_EXTERNAL_H__
#define __TILCDC_EXTERNAL_H__
-int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp);
+int tilcdc_add_external_encoders(struct drm_device *dev);
void tilcdc_remove_external_encoders(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index ff7774c17d7c..2134bb20fbe9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -22,8 +22,10 @@
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
#include "tilcdc_drv.h"
+#include "tilcdc_panel.h"
struct panel_module {
struct tilcdc_module base;
@@ -64,9 +66,7 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
static void panel_encoder_prepare(struct drm_encoder *encoder)
{
- struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
- tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
}
static void panel_encoder_commit(struct drm_encoder *encoder)
@@ -196,9 +196,12 @@ static struct drm_encoder *panel_connector_best_encoder(
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
@@ -268,6 +271,9 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
+ tilcdc_crtc_set_panel_info(priv->crtc,
+ to_panel_encoder(encoder)->mod->info);
+
return 0;
}
@@ -392,8 +398,6 @@ static int panel_probe(struct platform_device *pdev)
goto fail_timings;
}
- mod->preferred_bpp = panel_mod->info->bpp;
-
return 0;
fail_timings:
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
new file mode 100644
index 000000000000..74c65fa859b2
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 Texas Instruments
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <drm/drmP.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <uapi/drm/drm_fourcc.h>
+
+#include "tilcdc_drv.h"
+
+static struct drm_plane_funcs tilcdc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int tilcdc_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *old_state = plane->state;
+ unsigned int depth, bpp;
+
+ if (!state->crtc)
+ return 0;
+
+ if (WARN_ON(!state->fb))
+ return -EINVAL;
+
+ if (state->crtc_x || state->crtc_y) {
+ dev_err(plane->dev->dev, "%s: crtc position must be zero.",
+ __func__);
+ return -EINVAL;
+ }
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ /* we should have a crtc state if the plane is attached to a crtc */
+ if (WARN_ON(!crtc_state))
+ return 0;
+
+ if (crtc_state->mode.hdisplay != state->crtc_w ||
+ crtc_state->mode.vdisplay != state->crtc_h) {
+ dev_err(plane->dev->dev,
+ "%s: Size must match mode (%dx%d == %dx%d)", __func__,
+ crtc_state->mode.hdisplay, crtc_state->mode.vdisplay,
+ state->crtc_w, state->crtc_h);
+ return -EINVAL;
+ }
+
+ drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
+ if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
+ dev_err(plane->dev->dev,
+ "Invalid pitch: fb and crtc widths must be the same");
+ return -EINVAL;
+ }
+
+ if (state->fb && old_state->fb &&
+ state->fb->pixel_format != old_state->fb->pixel_format) {
+ dev_dbg(plane->dev->dev,
+ "%s(): pixel format change requires mode_change\n",
+ __func__);
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static void tilcdc_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+
+ if (!state->crtc)
+ return;
+
+ if (WARN_ON(!state->fb || !state->crtc->state))
+ return;
+
+ tilcdc_crtc_update_fb(state->crtc,
+ state->fb,
+ state->crtc->state->event);
+}
+
+static const struct drm_plane_helper_funcs plane_helper_funcs = {
+ .atomic_check = tilcdc_plane_atomic_check,
+ .atomic_update = tilcdc_plane_atomic_update,
+};
+
+int tilcdc_plane_init(struct drm_device *dev,
+ struct drm_plane *plane)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ int ret;
+
+ ret = drm_plane_init(dev, plane, 1,
+ &tilcdc_plane_funcs,
+ priv->pixelformats,
+ priv->num_pixelformats,
+ true);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
+ return ret;
+ }
+
+ drm_plane_helper_add(plane, &plane_helper_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 1bf5e2553acc..f57c0d62c76a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -119,6 +119,20 @@ static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
iowrite32(data, priv->mmio + reg);
}
+static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
+{
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ volatile void __iomem *addr = priv->mmio + reg;
+
+#ifdef iowrite64
+ iowrite64(data, addr);
+#else
+ __iowmb();
+ /* This compiles to strd (=64-bit write) on ARM7 */
+ *(volatile u64 __force *)addr = __cpu_to_le64(data);
+#endif
+}
+
static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
{
struct tilcdc_drm_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
index f9c79dabce20..623a9140493c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
@@ -139,7 +139,7 @@ static void __init tilcdc_node_disable(struct device_node *node)
of_update_property(node, prop);
}
-struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
+static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
{
const int size = __dtb_tilcdc_slave_compat_end -
__dtb_tilcdc_slave_compat_begin;
@@ -195,7 +195,7 @@ static const char * const tilcdc_slave_props[] __initconst = {
NULL
};
-void __init tilcdc_convert_slave_node(void)
+static void __init tilcdc_convert_slave_node(void)
{
struct device_node *slave = NULL, *lcdc = NULL;
struct device_node *i2c = NULL, *fragment = NULL;
@@ -207,7 +207,7 @@ void __init tilcdc_convert_slave_node(void)
int ret;
if (kfree_table_init(&kft))
- goto out;
+ return;
lcdc = of_find_matching_node(NULL, tilcdc_of_match);
slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
@@ -261,7 +261,7 @@ out:
of_node_put(fragment);
}
-int __init tilcdc_slave_compat_init(void)
+static int __init tilcdc_slave_compat_init(void)
{
tilcdc_convert_slave_node();
return 0;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 6b8c5b3bf588..458043a53995 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -20,8 +20,10 @@
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
+#include <drm/drm_atomic_helper.h>
#include "tilcdc_drv.h"
+#include "tilcdc_tfp410.h"
struct tfp410_module {
struct tilcdc_module base;
@@ -75,7 +77,6 @@ static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
static void tfp410_encoder_prepare(struct drm_encoder *encoder)
{
tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
- tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
}
static void tfp410_encoder_commit(struct drm_encoder *encoder)
@@ -201,9 +202,12 @@ static struct drm_encoder *tfp410_connector_best_encoder(
static const struct drm_connector_funcs tfp410_connector_funcs = {
.destroy = tfp410_connector_destroy,
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.detect = tfp410_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
@@ -276,6 +280,7 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
+ tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info);
return 0;
}
@@ -323,8 +328,6 @@ static int tfp410_probe(struct platform_device *pdev)
goto fail;
}
- mod->preferred_bpp = dvi_info.bpp;
-
i2c_node = of_find_node_by_phandle(i2c_phandle);
if (!i2c_node) {
dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 42c074a9c955..fc6217dfe401 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -57,14 +57,14 @@ static struct attribute ttm_bo_count = {
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
uint32_t *mem_type)
{
- int i;
+ int pos;
- for (i = 0; i <= TTM_PL_PRIV5; i++)
- if (place->flags & (1 << i)) {
- *mem_type = i;
- return 0;
- }
- return -EINVAL;
+ pos = ffs(place->flags & TTM_PL_MASK_MEM);
+ if (unlikely(!pos))
+ return -EINVAL;
+
+ *mem_type = pos - 1;
+ return 0;
}
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
@@ -354,14 +354,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
- mem);
+ ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, interruptible,
- no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f157a9efd220..bf6e21655c57 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,8 +45,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+ bool interruptible, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@@ -329,8 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index a1803fbcc898..29855be96be0 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size)
return 0;
}
EXPORT_SYMBOL(ttm_round_pot);
+
+uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
+{
+ return glob->zone_kernel->max_mem;
+}
+EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bef9f6feb635..cec4b4baa179 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -858,7 +858,6 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
- ttm_dma->cpu_address[index] = d_page->vaddr;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
@@ -989,7 +988,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
- ttm_dma->cpu_address[i] = 0;
ttm_dma->dma_address[i] = 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bc5aa573f466..aee3c00f836e 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -57,10 +57,8 @@ static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address) +
- sizeof(*ttm->cpu_address));
- ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
- ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+ sizeof(*ttm->dma_address));
+ ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
}
#ifdef CONFIG_X86
@@ -244,7 +242,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
drm_free_large(ttm->pages);
ttm->pages = NULL;
- ttm_dma->cpu_address = NULL;
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 4709b54c204c..d2f57c52f7db 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -150,8 +150,5 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index e2243edd1ce3..ac90ffdb5912 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -209,7 +209,7 @@ struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
exp_info.flags = flags;
exp_info.priv = obj;
- return dma_buf_export(&exp_info);
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
static int udl_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 17d34e0edbdd..cc45d98f9bb5 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -16,6 +16,20 @@ static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
return 0;
}
+static int udl_usb_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ return 0;
+}
+
+static int udl_usb_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+
+ udl_modeset_restore(dev);
+ return 0;
+}
+
static const struct vm_operations_struct udl_gem_vm_ops = {
.fault = udl_gem_fault,
.open = drm_gem_vm_open,
@@ -72,8 +86,8 @@ static int udl_usb_probe(struct usb_interface *interface,
int r;
dev = drm_dev_alloc(&driver, &interface->dev);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
r = drm_dev_register(dev, (unsigned long)udev);
if (r)
@@ -122,6 +136,8 @@ static struct usb_driver udl_driver = {
.name = "udl",
.probe = udl_usb_probe,
.disconnect = udl_usb_disconnect,
+ .suspend = udl_usb_suspend,
+ .resume = udl_usb_resume,
.id_table = id_table,
};
module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 0b03d34ffdee..f338a576efc8 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -52,6 +52,7 @@ struct udl_device {
struct device *dev;
struct drm_device *ddev;
struct usb_device *udev;
+ struct drm_crtc *crtc;
int sku_pixel_limit;
@@ -87,6 +88,7 @@ struct udl_framebuffer {
/* modeset */
int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_restore(struct drm_device *dev);
void udl_modeset_cleanup(struct drm_device *dev);
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 33dbfb2c4748..873f010d9616 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -16,6 +16,8 @@
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
#define BULK_SIZE 512
+#define NR_USB_REQUEST_CHANNEL 0x12
+
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
#define WRITES_IN_FLIGHT (4)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
@@ -90,6 +92,32 @@ success:
return true;
}
+/*
+ * Need to ensure a channel is selected before submitting URBs
+ */
+static int udl_select_std_channel(struct udl_device *udl)
+{
+ int ret;
+ static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
+ 0x1C, 0x88, 0x5E, 0x15,
+ 0x60, 0xFE, 0xC6, 0x97,
+ 0x16, 0x3D, 0x47, 0xF2};
+ void *sendbuf;
+
+ sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
+ if (!sendbuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udl->udev,
+ usb_sndctrlpipe(udl->udev, 0),
+ NR_USB_REQUEST_CHANNEL,
+ (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
+ sendbuf, sizeof(set_def_chn),
+ USB_CTRL_SET_TIMEOUT);
+ kfree(sendbuf);
+ return ret < 0 ? ret : 0;
+}
+
static void udl_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
@@ -301,6 +329,9 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
goto err;
}
+ if (udl_select_std_channel(udl))
+ DRM_ERROR("Selecting channel failed\n");
+
if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
goto err;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index f92ea9579674..f2b2481cad52 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -309,6 +309,8 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
char *wrptr;
int color_depth = 0;
+ udl->crtc = crtc;
+
buf = (char *)udl->mode_buf;
/* for now we just clip 24 -> 16 - if we fix that fix this */
@@ -441,8 +443,6 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &udl_mode_funcs;
- drm_mode_create_dirty_info_property(dev);
-
udl_crtc_init(dev);
encoder = udl_encoder_init(dev);
@@ -452,6 +452,18 @@ int udl_modeset_init(struct drm_device *dev)
return 0;
}
+void udl_modeset_restore(struct drm_device *dev)
+{
+ struct udl_device *udl = dev->dev_private;
+ struct udl_framebuffer *ufb;
+
+ if (!udl->crtc || !udl->crtc->primary->fb)
+ return;
+ udl_crtc_commit(udl->crtc);
+ ufb = to_udl_fb(udl->crtc->primary->fb);
+ udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+}
+
void udl_modeset_cleanup(struct drm_device *dev)
{
drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 8fc2b731b59a..7f08d681a74b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -163,14 +163,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
int vblank_lines;
int ret = 0;
- /*
- * XXX Doesn't work well in interlaced mode yet, partially due
- * to problems in vc4 kms or drm core interlaced mode handling,
- * so disable for now in interlaced mode.
- */
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return ret;
-
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
@@ -191,10 +183,15 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
/* Vertical position of hvs composed scanline. */
*vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+ *hpos = 0;
- /* No hpos info available. */
- if (hpos)
- *hpos = 0;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ *vpos /= 2;
+
+ /* Use hpos to correct for field offset in interlaced mode. */
+ if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2)
+ *hpos += mode->crtc_htotal / 2;
+ }
/* This is the offset we need for translating hvs -> pv scanout pos. */
fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
@@ -217,8 +214,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
* position of the PV.
*/
*vpos -= fifo_lines + 1;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- *vpos /= 2;
ret |= DRM_SCANOUTPOS_ACCURATE;
return ret;
@@ -234,7 +229,7 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
* and need to make things up in a approximative but consistent way.
*/
ret |= DRM_SCANOUTPOS_IN_VBLANK;
- vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+ vblank_lines = mode->vtotal - mode->vdisplay;
if (flags & DRM_CALLED_FROM_VBLIRQ) {
/*
@@ -383,7 +378,7 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_crtc_state *state = crtc->state;
struct drm_display_mode *mode = &state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
- u32 vactive = (mode->vdisplay >> (interlace ? 1 : 0));
+ u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
u32 format = PV_CONTROL_FORMAT_24;
bool debug_dump_regs = false;
int clock_select = vc4_get_clock_select(crtc);
@@ -399,47 +394,65 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
CRTC_WRITE(PV_CONTROL, 0);
CRTC_WRITE(PV_HORZA,
- VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+ VC4_SET_FIELD((mode->htotal -
+ mode->hsync_end) * pixel_rep,
PV_HORZA_HBP) |
- VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+ VC4_SET_FIELD((mode->hsync_end -
+ mode->hsync_start) * pixel_rep,
PV_HORZA_HSYNC));
CRTC_WRITE(PV_HORZB,
- VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+ VC4_SET_FIELD((mode->hsync_start -
+ mode->hdisplay) * pixel_rep,
PV_HORZB_HFP) |
- VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE));
+ VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE));
CRTC_WRITE(PV_VERTA,
- VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB,
- VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
PV_VERTB_VFP) |
- VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+ VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
if (interlace) {
CRTC_WRITE(PV_VERTA_EVEN,
- VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1,
+ VC4_SET_FIELD(mode->crtc_vtotal -
+ mode->crtc_vsync_end - 1,
PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ VC4_SET_FIELD(mode->crtc_vsync_end -
+ mode->crtc_vsync_start,
PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB_EVEN,
- VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ VC4_SET_FIELD(mode->crtc_vsync_start -
+ mode->crtc_vdisplay,
PV_VERTB_VFP) |
- VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE));
+ VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
+
+ /* We set up first field even mode for HDMI. VEC's
+ * NTSC mode would want first field odd instead, once
+ * we support it (to do so, set ODD_FIRST and put the
+ * delay in VSYNCD_EVEN instead).
+ */
+ CRTC_WRITE(PV_V_CONTROL,
+ PV_VCONTROL_CONTINUOUS |
+ PV_VCONTROL_INTERLACE |
+ VC4_SET_FIELD(mode->htotal * pixel_rep / 2,
+ PV_VCONTROL_ODD_DELAY));
+ CRTC_WRITE(PV_VSYNCD_EVEN, 0);
+ } else {
+ CRTC_WRITE(PV_V_CONTROL, PV_VCONTROL_CONTINUOUS);
}
- CRTC_WRITE(PV_HACT_ACT, mode->hdisplay);
+ CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- CRTC_WRITE(PV_V_CONTROL,
- PV_VCONTROL_CONTINUOUS |
- (interlace ? PV_VCONTROL_INTERLACE : 0));
CRTC_WRITE(PV_CONTROL,
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
VC4_SET_FIELD(vc4_get_fifo_full_level(format),
PV_CONTROL_FIFO_LEVEL) |
+ VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) |
PV_CONTROL_CLR_AT_START |
PV_CONTROL_TRIGGER_UNDERFLOW |
PV_CONTROL_WAIT_HSTART |
@@ -480,6 +493,9 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
int ret;
require_hvs_enabled(dev);
+ /* Disable vblank irq handling before crtc is disabled. */
+ drm_crtc_vblank_off(crtc);
+
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
@@ -530,6 +546,23 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+
+ /* Enable vblank irq handling after crtc is started. */
+ drm_crtc_vblank_on(crtc);
+}
+
+static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Do not allow doublescan modes from user space */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
+ crtc->base.id);
+ return false;
+ }
+
+ return true;
}
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -819,6 +852,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.mode_set_nofb = vc4_crtc_mode_set_nofb,
.disable = vc4_crtc_disable,
.enable = vc4_crtc_enable,
+ .mode_fixup = vc4_crtc_mode_fixup,
.atomic_check = vc4_crtc_atomic_check,
.atomic_flush = vc4_crtc_atomic_flush,
};
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 275fedbdbd9e..1e1f6b8184d0 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -340,9 +340,20 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
}
}
+static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ return true;
+}
+
static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.disable = vc4_dpi_encoder_disable,
.enable = vc4_dpi_encoder_enable,
+ .mode_fixup = vc4_dpi_encoder_mode_fixup,
};
static const struct of_device_id vc4_dpi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 9ecef9385491..8703f56b7947 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "drm_fb_cma_helper.h"
+#include <drm/drm_fb_helper.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
@@ -214,7 +215,7 @@ static void vc4_kick_out_firmware_fb(void)
ap->ranges[0].base = 0;
ap->ranges[0].size = ~0;
- remove_conflicting_framebuffers(ap, "vc4drmfb", false);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false);
kfree(ap);
}
@@ -232,8 +233,8 @@ static int vc4_drm_bind(struct device *dev)
return -ENOMEM;
drm = drm_dev_alloc(&vc4_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
platform_set_drvdata(pdev, drm);
vc4->dev = drm;
drm->dev_private = vc4;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 428e24919ef1..7c1e4d97486f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -122,9 +122,16 @@ to_vc4_dev(struct drm_device *dev)
struct vc4_bo {
struct drm_gem_cma_object base;
- /* seqno of the last job to render to this BO. */
+ /* seqno of the last job to render using this BO. */
uint64_t seqno;
+ /* seqno of the last job to use the RCL to write to this BO.
+ *
+ * Note that this doesn't include binner overflow memory
+ * writes.
+ */
+ uint64_t write_seqno;
+
/* List entry for the BO's position in either
* vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
*/
@@ -216,6 +223,9 @@ struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
+ /* Latest write_seqno of any BO that binning depends on. */
+ uint64_t bin_dep_seqno;
+
/* Last current addresses the hardware was processing when the
* hangcheck timer checked on us.
*/
@@ -230,6 +240,13 @@ struct vc4_exec_info {
struct drm_gem_cma_object **bo;
uint32_t bo_count;
+ /* List of BOs that are being written by the RCL. Other than
+ * the binner temporary storage, this is all the BOs written
+ * by the job.
+ */
+ struct drm_gem_cma_object *rcl_write_bo[4];
+ uint32_t rcl_write_bo_count;
+
/* Pointers for our position in vc4->job_list */
struct list_head head;
@@ -307,18 +324,15 @@ struct vc4_exec_info {
static inline struct vc4_exec_info *
vc4_first_bin_job(struct vc4_dev *vc4)
{
- if (list_empty(&vc4->bin_job_list))
- return NULL;
- return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head);
+ return list_first_entry_or_null(&vc4->bin_job_list,
+ struct vc4_exec_info, head);
}
static inline struct vc4_exec_info *
vc4_first_render_job(struct vc4_dev *vc4)
{
- if (list_empty(&vc4->render_job_list))
- return NULL;
- return list_first_entry(&vc4->render_job_list,
- struct vc4_exec_info, head);
+ return list_first_entry_or_null(&vc4->render_job_list,
+ struct vc4_exec_info, head);
}
static inline struct vc4_exec_info *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b262c5c26f10..47a095f392f8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -419,10 +419,6 @@ again:
vc4_flush_caches(dev);
- /* Disable the binner's pre-loaded overflow memory address */
- V3D_WRITE(V3D_BPOA, 0);
- V3D_WRITE(V3D_BPOS, 0);
-
/* Either put the job in the binner if it uses the binner, or
* immediately move it to the to-be-rendered queue.
*/
@@ -471,6 +467,11 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
list_for_each_entry(bo, &exec->unref_list, unref_head) {
bo->seqno = seqno;
}
+
+ for (i = 0; i < exec->rcl_write_bo_count; i++) {
+ bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
+ bo->write_seqno = seqno;
+ }
}
/* Queues a struct vc4_exec_info for execution. If no job is
@@ -673,6 +674,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
goto fail;
ret = vc4_validate_shader_recs(dev, exec);
+ if (ret)
+ goto fail;
+
+ /* Block waiting on any previous rendering into the CS's VBO,
+ * IB, or textures, so that pixels are actually written by the
+ * time we try to read them.
+ */
+ ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
fail:
drm_free_large(temp);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 4452f3631cac..c4cb2e26de32 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -62,6 +62,8 @@ struct vc4_hdmi {
struct vc4_hdmi_encoder {
struct vc4_encoder base;
bool hdmi_monitor;
+ bool limited_rgb_range;
+ bool rgb_range_selectable;
};
static inline struct vc4_hdmi_encoder *
@@ -174,6 +176,9 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
+ if (drm_probe_ddc(vc4->hdmi->ddc))
+ return connector_status_connected;
+
if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
return connector_status_connected;
else
@@ -202,6 +207,12 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
return -ENODEV;
vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
+
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+ vc4_encoder->rgb_range_selectable =
+ drm_rgb_quant_range_selectable(edid);
+ }
+
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -246,7 +257,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT);
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_mode_connector_attach_encoder(connector, encoder);
@@ -269,25 +280,143 @@ static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
.destroy = vc4_hdmi_encoder_destroy,
};
+static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
+ enum hdmi_infoframe_type type)
+{
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 packet_id = type - 0x80;
+
+ HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+
+ return wait_for(!(HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+ BIT(packet_id)), 100);
+}
+
+static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
+ union hdmi_infoframe *frame)
+{
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 packet_id = frame->any.type - 0x80;
+ u32 packet_reg = VC4_HDMI_GCP_0 + VC4_HDMI_PACKET_STRIDE * packet_id;
+ uint8_t buffer[VC4_HDMI_PACKET_STRIDE];
+ ssize_t len, i;
+ int ret;
+
+ WARN_ONCE(!(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+ VC4_HDMI_RAM_PACKET_ENABLE),
+ "Packet RAM has to be on to store the packet.");
+
+ len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
+ if (len < 0)
+ return;
+
+ ret = vc4_hdmi_stop_packet(encoder, frame->any.type);
+ if (ret) {
+ DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < len; i += 7) {
+ HDMI_WRITE(packet_reg,
+ buffer[i + 0] << 0 |
+ buffer[i + 1] << 8 |
+ buffer[i + 2] << 16);
+ packet_reg += 4;
+
+ HDMI_WRITE(packet_reg,
+ buffer[i + 3] << 0 |
+ buffer[i + 4] << 8 |
+ buffer[i + 5] << 16 |
+ buffer[i + 6] << 24);
+ packet_reg += 4;
+ }
+
+ HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
+ ret = wait_for((HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+ BIT(packet_id)), 100);
+ if (ret)
+ DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+}
+
+static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ if (vc4_encoder->rgb_range_selectable) {
+ if (vc4_encoder->limited_rgb_range) {
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
+ }
+ }
+
+ vc4_hdmi_write_infoframe(encoder, &frame);
+}
+
+static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = hdmi_spd_infoframe_init(&frame.spd, "Broadcom", "Videocore");
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill SPD infoframe\n");
+ return;
+ }
+
+ frame.spd.sdi = HDMI_SPD_SDI_PC;
+
+ vc4_hdmi_write_infoframe(encoder, &frame);
+}
+
+static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
+{
+ vc4_hdmi_set_avi_infoframe(encoder);
+ vc4_hdmi_set_spd_infoframe(encoder);
+}
+
static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *unadjusted_mode,
struct drm_display_mode *mode)
{
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool debug_dump_regs = false;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
- u32 vactive = (mode->vdisplay >>
- ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0));
- u32 verta = (VC4_SET_FIELD(mode->vsync_end - mode->vsync_start,
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
+ u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
VC4_HDMI_VERTA_VSP) |
- VC4_SET_FIELD(mode->vsync_start - mode->vdisplay,
+ VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
VC4_HDMI_VERTA_VFP) |
- VC4_SET_FIELD(vactive, VC4_HDMI_VERTA_VAL));
+ VC4_SET_FIELD(mode->crtc_vdisplay, VC4_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
- VC4_SET_FIELD(mode->vtotal - mode->vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
+ u32 vertb_even = (VC4_SET_FIELD(0, VC4_HDMI_VERTB_VSPO) |
+ VC4_SET_FIELD(mode->crtc_vtotal -
+ mode->crtc_vsync_end -
+ interlaced,
+ VC4_HDMI_VERTB_VBP));
+ u32 csc_ctl;
if (debug_dump_regs) {
DRM_INFO("HDMI regs before:\n");
@@ -296,7 +425,8 @@ static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
HD_WRITE(VC4_HD_VID_CTL, 0);
- clk_set_rate(vc4->hdmi->pixel_clock, mode->clock * 1000);
+ clk_set_rate(vc4->hdmi->pixel_clock, mode->clock * 1000 *
+ ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1));
HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
@@ -306,29 +436,62 @@ static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder,
HDMI_WRITE(VC4_HDMI_HORZA,
(vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
(hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
- VC4_SET_FIELD(mode->hdisplay, VC4_HDMI_HORZA_HAP));
+ VC4_SET_FIELD(mode->hdisplay * pixel_rep,
+ VC4_HDMI_HORZA_HAP));
HDMI_WRITE(VC4_HDMI_HORZB,
- VC4_SET_FIELD(mode->htotal - mode->hsync_end,
+ VC4_SET_FIELD((mode->htotal -
+ mode->hsync_end) * pixel_rep,
VC4_HDMI_HORZB_HBP) |
- VC4_SET_FIELD(mode->hsync_end - mode->hsync_start,
+ VC4_SET_FIELD((mode->hsync_end -
+ mode->hsync_start) * pixel_rep,
VC4_HDMI_HORZB_HSP) |
- VC4_SET_FIELD(mode->hsync_start - mode->hdisplay,
+ VC4_SET_FIELD((mode->hsync_start -
+ mode->hdisplay) * pixel_rep,
VC4_HDMI_HORZB_HFP));
HDMI_WRITE(VC4_HDMI_VERTA0, verta);
HDMI_WRITE(VC4_HDMI_VERTA1, verta);
- HDMI_WRITE(VC4_HDMI_VERTB0, vertb);
+ HDMI_WRITE(VC4_HDMI_VERTB0, vertb_even);
HDMI_WRITE(VC4_HDMI_VERTB1, vertb);
HD_WRITE(VC4_HD_VID_CTL,
(vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
(hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
+ csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
+ VC4_HD_CSC_CTL_ORDER);
+
+ if (vc4_encoder->hdmi_monitor && drm_match_cea_mode(mode) > 1) {
+ /* CEA VICs other than #1 requre limited range RGB
+ * output unless overridden by an AVI infoframe.
+ * Apply a colorspace conversion to squash 0-255 down
+ * to 16-235. The matrix here is:
+ *
+ * [ 0 0 0.8594 16]
+ * [ 0 0.8594 0 16]
+ * [ 0.8594 0 0 16]
+ * [ 0 0 0 1]
+ */
+ csc_ctl |= VC4_HD_CSC_CTL_ENABLE;
+ csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC;
+ csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
+ VC4_HD_CSC_CTL_MODE);
+
+ HD_WRITE(VC4_HD_CSC_12_11, (0x000 << 16) | 0x000);
+ HD_WRITE(VC4_HD_CSC_14_13, (0x100 << 16) | 0x6e0);
+ HD_WRITE(VC4_HD_CSC_22_21, (0x6e0 << 16) | 0x000);
+ HD_WRITE(VC4_HD_CSC_24_23, (0x100 << 16) | 0x000);
+ HD_WRITE(VC4_HD_CSC_32_31, (0x000 << 16) | 0x6e0);
+ HD_WRITE(VC4_HD_CSC_34_33, (0x100 << 16) | 0x000);
+ vc4_encoder->limited_rgb_range = true;
+ } else {
+ vc4_encoder->limited_rgb_range = false;
+ }
+
/* The RGB order applies even when CSC is disabled. */
- HD_WRITE(VC4_HD_CSC_CTL, VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
- VC4_HD_CSC_CTL_ORDER));
+ HD_WRITE(VC4_HD_CSC_CTL, csc_ctl);
HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
@@ -343,6 +506,8 @@ static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, 0);
+
HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
HD_WRITE(VC4_HD_VID_CTL,
HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
@@ -369,7 +534,7 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
- VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1);
+ VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
} else {
@@ -381,7 +546,7 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
- VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1);
+ VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000);
WARN_ONCE(ret, "Timeout waiting for "
"!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
}
@@ -395,9 +560,10 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
- /* XXX: Set HDMI_RAM_PACKET_CONFIG (1 << 16) and set
- * up the infoframe.
- */
+ HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+ VC4_HDMI_RAM_PACKET_ENABLE);
+
+ vc4_hdmi_set_infoframes(encoder);
drift = HDMI_READ(VC4_HDMI_FIFO_CTL);
drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4ac894d993cd..c1f65c6c8e60 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -44,7 +44,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_commit_modeset_disables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, false);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 29e4b400e25e..881bf489478b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -735,8 +735,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
}
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
- .prepare_fb = NULL,
- .cleanup_fb = NULL,
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 160942a9180e..1aa44c2db556 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -175,6 +175,8 @@
# define PV_CONTROL_CLR_AT_START BIT(14)
# define PV_CONTROL_TRIGGER_UNDERFLOW BIT(13)
# define PV_CONTROL_WAIT_HSTART BIT(12)
+# define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4)
+# define PV_CONTROL_PIXEL_REP_SHIFT 4
# define PV_CONTROL_CLK_SELECT_DSI_VEC 0
# define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1
# define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2)
@@ -183,6 +185,9 @@
# define PV_CONTROL_EN BIT(0)
#define PV_V_CONTROL 0x04
+# define PV_VCONTROL_ODD_DELAY_MASK VC4_MASK(22, 6)
+# define PV_VCONTROL_ODD_DELAY_SHIFT 6
+# define PV_VCONTROL_ODD_FIRST BIT(5)
# define PV_VCONTROL_INTERLACE BIT(4)
# define PV_VCONTROL_CONTINUOUS BIT(1)
# define PV_VCONTROL_VIDEN BIT(0)
@@ -438,6 +443,8 @@
#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0
# define VC4_HDMI_RAM_PACKET_ENABLE BIT(16)
+#define VC4_HDMI_RAM_PACKET_STATUS 0x0a4
+
#define VC4_HDMI_HORZA 0x0c4
# define VC4_HDMI_HORZA_VPOS BIT(14)
# define VC4_HDMI_HORZA_HPOS BIT(13)
@@ -499,6 +506,9 @@
#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
+#define VC4_HDMI_GCP_0 0x400
+#define VC4_HDMI_PACKET_STRIDE 0x24
+
#define VC4_HD_M_CTL 0x00c
# define VC4_HD_M_REGISTER_FILE_STANDBY (3 << 6)
# define VC4_HD_M_RAM_STANDBY (3 << 4)
@@ -528,10 +538,17 @@
# define VC4_HD_CSC_CTL_MODE_SHIFT 2
# define VC4_HD_CSC_CTL_MODE_RGB_TO_SD_YPRPB 0
# define VC4_HD_CSC_CTL_MODE_RGB_TO_HD_YPRPB 1
-# define VC4_HD_CSC_CTL_MODE_CUSTOM 2
+# define VC4_HD_CSC_CTL_MODE_CUSTOM 3
# define VC4_HD_CSC_CTL_RGB2YCC BIT(1)
# define VC4_HD_CSC_CTL_ENABLE BIT(0)
+#define VC4_HD_CSC_12_11 0x044
+#define VC4_HD_CSC_14_13 0x048
+#define VC4_HD_CSC_22_21 0x04c
+#define VC4_HD_CSC_24_23 0x050
+#define VC4_HD_CSC_32_31 0x054
+#define VC4_HD_CSC_34_33 0x058
+
#define VC4_HD_FRAME_COUNT 0x068
/* HVS display list information. */
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 0f12418725e5..08886a309757 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -45,6 +45,8 @@ struct vc4_rcl_setup {
struct drm_gem_cma_object *rcl;
u32 next_offset;
+
+ u32 next_write_bo_index;
};
static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
@@ -407,6 +409,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (surf->offset & 0xf) {
DRM_ERROR("MSAA write must be 16b aligned.\n");
return -EINVAL;
@@ -417,7 +421,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
struct drm_gem_cma_object **obj,
- struct drm_vc4_submit_rcl_surface *surf)
+ struct drm_vc4_submit_rcl_surface *surf,
+ bool is_write)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
VC4_LOADSTORE_TILE_BUFFER_TILING);
@@ -440,6 +445,9 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ if (is_write)
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
if (surf == &exec->args->zs_write) {
DRM_ERROR("general zs write may not be a full-res.\n");
@@ -542,6 +550,8 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (tiling > VC4_TILING_FORMAT_LT) {
DRM_ERROR("Bad tiling format\n");
return -EINVAL;
@@ -599,15 +609,18 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
+ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read,
+ false);
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read,
+ false);
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 9ce1d0adf882..26503e307438 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -267,6 +267,9 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (!ib)
return -EINVAL;
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&ib->base)->write_seqno);
+
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
@@ -555,8 +558,7 @@ static bool
reloc_tex(struct vc4_exec_info *exec,
void *uniform_data_u,
struct vc4_texture_sample_info *sample,
- uint32_t texture_handle_index)
-
+ uint32_t texture_handle_index, bool is_cs)
{
struct drm_gem_cma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
@@ -714,6 +716,11 @@ reloc_tex(struct vc4_exec_info *exec,
*validated_p0 = tex->paddr + p0;
+ if (is_cs) {
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&tex->base)->write_seqno);
+ }
+
return true;
fail:
DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
@@ -835,7 +842,8 @@ validate_gl_shader_rec(struct drm_device *dev,
if (!reloc_tex(exec,
uniform_data_u,
&validated_shader->texture_samples[tex],
- texture_handles_u[tex])) {
+ texture_handles_u[tex],
+ i == 2)) {
return -EINVAL;
}
}
@@ -867,6 +875,9 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
uint32_t max_index;
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&vbo->base)->write_seqno);
+
if (state->addr & 0x8)
stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c15bafb06665..f36c14729b55 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -334,8 +334,8 @@ static int __init vgem_init(void)
int ret;
vgem_device = drm_dev_alloc(&vgem_driver, NULL);
- if (!vgem_device) {
- ret = -ENOMEM;
+ if (IS_ERR(vgem_device)) {
+ ret = PTR_ERR(vgem_device);
goto out;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
down_read(&current->mm->mmap_sem);
ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index ed8aa8ff861a..e5582bab7e3c 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -72,7 +72,7 @@ static const struct file_operations via_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
+ DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4e192aa2d021..58048709c34e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -338,7 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_planes(dev, state, true);
+ drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_hw_done(state);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 7f0e93f87a55..49e5996cb9f2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -24,9 +24,20 @@
*/
#include <linux/pci.h>
+#include <drm/drm_fb_helper.h>
#include "virtgpu_drv.h"
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (pdev) {
+ return drm_pci_set_busid(dev, master);
+ }
+ return 0;
+}
+
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
@@ -42,7 +53,7 @@ static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
& IORESOURCE_ROM_SHADOW;
- remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
+ drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
kfree(ap);
}
@@ -53,8 +64,8 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
int ret;
dev = drm_dev_alloc(driver, &vdev->dev);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
dev->virtdev = vdev;
vdev->priv = dev;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index c13f70cfc461..5820b7020ae5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,6 +117,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
+ .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index b18ef3111f0c..ae59080d63d1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,6 +49,7 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
+int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -75,6 +76,7 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence_driver {
atomic64_t last_seq;
uint64_t sync_seq;
+ uint64_t context;
struct list_head fences;
spinlock_t lock;
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index cf4418709e76..f3f70fa8a4c7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -89,7 +89,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
(*fence)->drv = drv;
(*fence)->seq = ++drv->sync_seq;
fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- 0, (*fence)->seq);
+ drv->context, (*fence)->seq);
fence_get(&(*fence)->f);
list_add_tail(&(*fence)->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c046903cb47b..818478b4c4f0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -89,10 +89,16 @@ static void virtio_gpu_unref_list(struct list_head *head)
}
}
-static int virtio_gpu_execbuffer(struct drm_device *dev,
- struct drm_virtgpu_execbuffer *exbuf,
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full VIRTIO_GPUDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
+ */
+static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *drm_file)
{
+ struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj;
@@ -152,15 +158,10 @@ static int virtio_gpu_execbuffer(struct drm_device *dev,
if (ret)
goto out_free;
- buf = kmalloc(exbuf->size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto out_unresv;
- }
- if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
- exbuf->size)) {
- kfree(buf);
- ret = -EFAULT;
+ buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
+ exbuf->size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
goto out_unresv;
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
@@ -182,20 +183,6 @@ out_free:
return ret;
}
-/*
- * Usage of execbuffer:
- * Relocations need to take into account the full VIRTIO_GPUDrawable size.
- * However, the command as passed from user space must *not* contain the initial
- * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
- */
-static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_virtgpu_execbuffer *execbuffer = data;
- return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
-}
-
-
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 4150873d432e..036b0fbae0fb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -159,6 +159,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
+ vgdev->fence_drv.context = fence_context_alloc(1);
spin_lock_init(&vgdev->fence_drv.lock);
INIT_LIST_HEAD(&vgdev->fence_drv.fences);
INIT_LIST_HEAD(&vgdev->cap_cache);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 925ca25209df..ba28c0f6f28a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -76,7 +76,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
if (old_state->crtc)
output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
- WARN_ON(!output);
+ if (WARN_ON(!output))
+ return;
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
@@ -129,7 +130,8 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
if (old_state->crtc)
output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
- WARN_ON(!output);
+ if (WARN_ON(!output))
+ return;
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index b49445df8a7e..fb7b82aad763 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -6,6 +6,7 @@ config DRM_VMWGFX
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_TTM
+ select FB
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 74304b03f9d4..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -67,10 +67,10 @@
VMWGFX_NUM_GB_SURFACE +\
VMWGFX_NUM_GB_SCREEN_TARGET)
-#define VMW_PL_GMR TTM_PL_PRIV0
-#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
-#define VMW_PL_MOB TTM_PL_PRIV1
-#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
+#define VMW_PL_GMR (TTM_PL_PRIV + 0)
+#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
+#define VMW_PL_MOB (TTM_PL_PRIV + 1)
+#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
#define VMW_RES_HT_ORDER 12
/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
+/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+ return (unsigned long) b - (unsigned long) a;
+}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
+ u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
+ *addr = rel->res->id;
+ break;
+ case vmw_res_rel_nop:
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start, id_loc),
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_cond_nop);
}
/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
cmd->body.shaderResourceViewId);
}
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTransferFromBuffer body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.destSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_buffer_copy_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
&vmw_cmd_pred_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+ &vmw_cmd_dx_transfer_from_buffer,
+ true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
-
out_unlock:
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 63ccd9871ec9..23ec673d5e16 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -377,9 +377,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
- drm_object_attach_property(&connector->base,
dev_priv->hotplug_mode_update_property, 1);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
@@ -421,10 +418,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_free;
- ret = drm_mode_create_dirty_info_property(dev);
- if (ret != 0)
- goto err_vblank_cleanup;
-
vmw_kms_create_implicit_placement_property(dev_priv, true);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
@@ -439,8 +432,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
return 0;
-err_vblank_cleanup:
- drm_vblank_cleanup(dev);
err_free:
kfree(dev_priv->ldu_priv);
dev_priv->ldu_priv = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- if (nonblock)
- return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b74eae2b8594..f42359084adc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -538,9 +538,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
- drm_object_attach_property(&connector->base,
dev_priv->hotplug_mode_update_property, 1);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
@@ -574,10 +571,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = drm_mode_create_dirty_info_property(dev);
- if (unlikely(ret != 0))
- goto err_vblank_cleanup;
-
vmw_kms_create_implicit_placement_property(dev_priv, false);
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
@@ -588,10 +581,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
DRM_INFO("Screen Objects Display Unit initialized\n");
return 0;
-
-err_vblank_cleanup:
- drm_vblank_cleanup(dev);
- return ret;
}
int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41932a7c4f79..94ad8d2acf9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1131,9 +1131,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
- drm_object_attach_property(&connector->base,
dev_priv->hotplug_mode_update_property, 1);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
@@ -1202,10 +1199,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = drm_mode_create_dirty_info_property(dev);
- if (unlikely(ret != 0))
- goto err_vblank_cleanup;
-
dev_priv->active_display_unit = vmw_du_screen_target;
vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
- BUG_ON(val_buf->bo == NULL);
-
+ BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res = &srf->res;
- BUG_ON(res_free == NULL);
+ BUG_ON(!res_free);
if (!dev_priv->has_mob)
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
+ srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*srf->sizes) * srf->num_sizes);
+ if (IS_ERR(srf->sizes)) {
+ ret = PTR_ERR(srf->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->offsets == NULL)) {
+ srf->offsets = kmalloc_array(srf->num_sizes,
+ sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
- if (unlikely(base == NULL)) {
+ if (unlikely(!base)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd1 == NULL)) {
+ if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (srf->res.backup == NULL) {
+ if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 107ec236a4a6..5f961416c4ee 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
- ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-smfc.o
+ ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
+ ipu-smfc.o ipu-vdi.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 99dcacf05b99..b9539f7c5e9a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -45,6 +45,12 @@ static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
writel(value, ipu->cm_reg + offset);
}
+int ipu_get_num(struct ipu_soc *ipu)
+{
+ return ipu->id;
+}
+EXPORT_SYMBOL_GPL(ipu_get_num);
+
void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
{
u32 val;
@@ -724,6 +730,137 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
}
EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
+
+/* Frame Synchronization Unit Channel Linking */
+
+struct fsu_link_reg_info {
+ int chno;
+ u32 reg;
+ u32 mask;
+ u32 val;
+};
+
+struct fsu_link_info {
+ struct fsu_link_reg_info src;
+ struct fsu_link_reg_info sink;
+};
+
+static const struct fsu_link_info fsu_link_info[] = {
+ {
+ .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
+ FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
+ .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
+ FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
+ }, {
+ .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
+ FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
+ .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
+ FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
+ }, {
+ .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
+ FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
+ .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
+ FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
+ }, {
+ .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
+ .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
+ FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
+ },
+};
+
+static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
+ if (src == fsu_link_info[i].src.chno &&
+ sink == fsu_link_info[i].sink.chno)
+ return &fsu_link_info[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * Links a source channel to a sink channel in the FSU.
+ */
+int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+ const struct fsu_link_info *link;
+ u32 src_reg, sink_reg;
+ unsigned long flags;
+
+ link = find_fsu_link_info(src_ch, sink_ch);
+ if (!link)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ if (link->src.mask) {
+ src_reg = ipu_cm_read(ipu, link->src.reg);
+ src_reg &= ~link->src.mask;
+ src_reg |= link->src.val;
+ ipu_cm_write(ipu, src_reg, link->src.reg);
+ }
+
+ if (link->sink.mask) {
+ sink_reg = ipu_cm_read(ipu, link->sink.reg);
+ sink_reg &= ~link->sink.mask;
+ sink_reg |= link->sink.val;
+ ipu_cm_write(ipu, sink_reg, link->sink.reg);
+ }
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_link);
+
+/*
+ * Unlinks source and sink channels in the FSU.
+ */
+int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
+{
+ const struct fsu_link_info *link;
+ u32 src_reg, sink_reg;
+ unsigned long flags;
+
+ link = find_fsu_link_info(src_ch, sink_ch);
+ if (!link)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ if (link->src.mask) {
+ src_reg = ipu_cm_read(ipu, link->src.reg);
+ src_reg &= ~link->src.mask;
+ ipu_cm_write(ipu, src_reg, link->src.reg);
+ }
+
+ if (link->sink.mask) {
+ sink_reg = ipu_cm_read(ipu, link->sink.reg);
+ sink_reg &= ~link->sink.mask;
+ ipu_cm_write(ipu, sink_reg, link->sink.reg);
+ }
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
+
+/* Link IDMAC channels in the FSU */
+int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+ return ipu_fsu_link(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_link);
+
+/* Unlink IDMAC channels in the FSU */
+int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
+{
+ return ipu_fsu_unlink(src->ipu, src->num, sink->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
+
struct ipu_devtype {
const char *name;
unsigned long cm_ofs;
@@ -833,6 +970,20 @@ static int ipu_submodules_init(struct ipu_soc *ipu,
goto err_ic;
}
+ ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
+ IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
+ IPU_CONF_IC_INPUT);
+ if (ret) {
+ unit = "vdi";
+ goto err_vdi;
+ }
+
+ ret = ipu_image_convert_init(ipu, dev);
+ if (ret) {
+ unit = "image_convert";
+ goto err_image_convert;
+ }
+
ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
IPU_CONF_DI0_EN, ipu_clk);
if (ret) {
@@ -887,6 +1038,10 @@ err_dc:
err_di_1:
ipu_di_exit(ipu, 0);
err_di_0:
+ ipu_image_convert_exit(ipu);
+err_image_convert:
+ ipu_vdi_exit(ipu);
+err_vdi:
ipu_ic_exit(ipu);
err_ic:
ipu_csi_exit(ipu, 1);
@@ -971,6 +1126,8 @@ static void ipu_submodules_exit(struct ipu_soc *ipu)
ipu_dc_exit(ipu);
ipu_di_exit(ipu, 1);
ipu_di_exit(ipu, 0);
+ ipu_image_convert_exit(ipu);
+ ipu_vdi_exit(ipu);
ipu_ic_exit(ipu);
ipu_csi_exit(ipu, 1);
ipu_csi_exit(ipu, 0);
@@ -1004,14 +1161,14 @@ static struct ipu_platform_reg client_reg[] = {
.dma[0] = IPUV3_CHANNEL_CSI0,
.dma[1] = -EINVAL,
},
- .name = "imx-ipuv3-camera",
+ .name = "imx-ipuv3-csi",
}, {
.pdata = {
.csi = 1,
.dma[0] = IPUV3_CHANNEL_CSI1,
.dma[1] = -EINVAL,
},
- .name = "imx-ipuv3-camera",
+ .name = "imx-ipuv3-csi",
}, {
.pdata = {
.di = 0,
@@ -1207,15 +1364,16 @@ EXPORT_SYMBOL_GPL(ipu_dump);
static int ipu_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(imx_ipu_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
struct ipu_soc *ipu;
struct resource *res;
unsigned long ipu_base;
int i, ret, irq_sync, irq_err;
const struct ipu_devtype *devtype;
- devtype = of_id->data;
+ devtype = of_device_get_match_data(&pdev->dev);
+ if (!devtype)
+ return -EINVAL;
irq_sync = platform_get_irq(pdev, 0);
irq_err = platform_get_irq(pdev, 1);
@@ -1237,6 +1395,7 @@ static int ipu_probe(struct platform_device *pdev)
ipu->channel[i].ipu = ipu;
ipu->devtype = devtype;
ipu->ipu_type = devtype->type;
+ ipu->id = of_alias_get_id(np, "ipu");
spin_lock_init(&ipu->lock);
mutex_init(&ipu->channel_lock);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 6494a4d28171..fcb7dc86167b 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -253,6 +253,13 @@ void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
+void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
+{
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
+
void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
{
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
@@ -268,6 +275,12 @@ void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_axi_id);
+int ipu_cpmem_get_burstsize(struct ipuv3_channel *ch)
+{
+ return ipu_ch_param_read_field(ch, IPU_FIELD_NPB) + 1;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_get_burstsize);
+
void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize)
{
ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 06631ac61b04..d6e5ded24418 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -258,12 +258,8 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
- cfg->mipi_dt = MIPI_DT_YUV422;
- cfg->data_width = IPU_CSI_DATA_WIDTH_16;
- break;
case MEDIA_BUS_FMT_YUYV8_1X16:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_YUV422;
cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
@@ -365,10 +361,14 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
{
struct ipu_csi_bus_config cfg;
unsigned long flags;
- u32 data = 0;
+ u32 width, height, data = 0;
fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ /* set default sensor frame width and height */
+ width = mbus_fmt->width;
+ height = mbus_fmt->height;
+
/* Set the CSI_SENS_CONF register remaining fields */
data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
cfg.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
@@ -386,11 +386,6 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
ipu_csi_write(csi, data, CSI_SENS_CONF);
- /* Setup sensor frame size */
- ipu_csi_write(csi,
- (mbus_fmt->width - 1) | ((mbus_fmt->height - 1) << 16),
- CSI_SENS_FRM_SIZE);
-
/* Set CCIR registers */
switch (cfg.clk_mode) {
@@ -408,11 +403,12 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
* Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
* Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
*/
+ height = 625; /* framelines for PAL */
+
ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
-
} else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
/*
* NTSC case
@@ -422,6 +418,8 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
* Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
* Field1ActiveEnd = 0x4, Field1ActiveStart = 0
*/
+ height = 525; /* framelines for NTSC */
+
ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
CSI_CCIR_CODE_1);
ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
@@ -447,6 +445,10 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
break;
}
+ /* Setup sensor frame size */
+ ipu_csi_write(csi, (width - 1) | ((height - 1) << 16),
+ CSI_SENS_FRM_SIZE);
+
dev_dbg(csi->ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
ipu_csi_read(csi, CSI_SENS_CONF));
dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 42705bb5aaa3..a40f211f382f 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -123,20 +123,6 @@ int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
}
EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
-static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
- while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
- if (time_after(jiffies, timeout)) {
- dev_warn(priv->dev,
- "Timeout waiting for DMFC FIFOs to clear\n");
- break;
- }
- cpu_relax();
- }
-}
-
void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -145,10 +131,8 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
priv->use_count--;
- if (!priv->use_count) {
- ipu_dmfc_wait_fifos(priv);
+ if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
- }
if (priv->use_count < 0)
priv->use_count = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 1dcb96ccda66..321eb983c2f5 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -160,6 +160,7 @@ struct ipu_ic_priv {
spinlock_t lock;
struct ipu_soc *ipu;
int use_count;
+ int irt_use_count;
struct ipu_ic task[IC_NUM_TASKS];
};
@@ -379,8 +380,6 @@ void ipu_ic_task_disable(struct ipu_ic *ic)
ipu_ic_write(ic, ic_conf, IC_CONF);
- ic->rotation = ic->graphics = false;
-
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(ipu_ic_task_disable);
@@ -620,7 +619,7 @@ int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2);
ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3);
- if (rot >= IPU_ROTATE_90_RIGHT)
+ if (ipu_rot_mode_is_irt(rot))
ic->rotation = true;
unlock:
@@ -629,22 +628,41 @@ unlock:
}
EXPORT_SYMBOL_GPL(ipu_ic_task_idma_init);
+static void ipu_irt_enable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+
+ if (!priv->irt_use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_ROT_EN);
+
+ priv->irt_use_count++;
+}
+
+static void ipu_irt_disable(struct ipu_ic *ic)
+{
+ struct ipu_ic_priv *priv = ic->priv;
+
+ if (priv->irt_use_count) {
+ if (!--priv->irt_use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_ROT_EN);
+ }
+}
+
int ipu_ic_enable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
- u32 module = IPU_CONF_IC_EN;
spin_lock_irqsave(&priv->lock, flags);
- if (ic->rotation)
- module |= IPU_CONF_ROT_EN;
-
if (!priv->use_count)
- ipu_module_enable(priv->ipu, module);
+ ipu_module_enable(priv->ipu, IPU_CONF_IC_EN);
priv->use_count++;
+ if (ic->rotation)
+ ipu_irt_enable(ic);
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -655,18 +673,22 @@ int ipu_ic_disable(struct ipu_ic *ic)
{
struct ipu_ic_priv *priv = ic->priv;
unsigned long flags;
- u32 module = IPU_CONF_IC_EN | IPU_CONF_ROT_EN;
spin_lock_irqsave(&priv->lock, flags);
priv->use_count--;
if (!priv->use_count)
- ipu_module_disable(priv->ipu, module);
+ ipu_module_disable(priv->ipu, IPU_CONF_IC_EN);
if (priv->use_count < 0)
priv->use_count = 0;
+ if (ic->rotation)
+ ipu_irt_disable(ic);
+
+ ic->rotation = ic->graphics = false;
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
new file mode 100644
index 000000000000..805b6fa7b5f4
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ *
+ * Queued image conversion support, with tiling and rotation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <video/imx-ipu-image-convert.h>
+#include "ipu-prv.h"
+
+/*
+ * The IC Resizer has a restriction that the output frame from the
+ * resizer must be 1024 or less in both width (pixels) and height
+ * (lines).
+ *
+ * The image converter attempts to split up a conversion when
+ * the desired output (converted) frame resolution exceeds the
+ * IC resizer limit of 1024 in either dimension.
+ *
+ * If either dimension of the output frame exceeds the limit, the
+ * dimension is split into 1, 2, or 4 equal stripes, for a maximum
+ * of 4*4 or 16 tiles. A conversion is then carried out for each
+ * tile (but taking care to pass the full frame stride length to
+ * the DMA channel's parameter memory!). IDMA double-buffering is used
+ * to convert each tile back-to-back when possible (see note below
+ * when double_buffering boolean is set).
+ *
+ * Note that the input frame must be split up into the same number
+ * of tiles as the output frame.
+ *
+ * FIXME: at this point there is no attempt to deal with visible seams
+ * at the tile boundaries when upscaling. The seams are caused by a reset
+ * of the bilinear upscale interpolation when starting a new tile. The
+ * seams are barely visible for small upscale factors, but become
+ * increasingly visible as the upscale factor gets larger, since more
+ * interpolated pixels get thrown out at the tile boundaries. A possilble
+ * fix might be to overlap tiles of different sizes, but this must be done
+ * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
+ * alignment restrictions of each tile.
+ */
+
+#define MAX_STRIPES_W 4
+#define MAX_STRIPES_H 4
+#define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
+
+#define MIN_W 16
+#define MIN_H 8
+#define MAX_W 4096
+#define MAX_H 4096
+
+enum ipu_image_convert_type {
+ IMAGE_CONVERT_IN = 0,
+ IMAGE_CONVERT_OUT,
+};
+
+struct ipu_image_convert_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
+struct ipu_image_convert_dma_chan {
+ int in;
+ int out;
+ int rot_in;
+ int rot_out;
+ int vdi_in_p;
+ int vdi_in;
+ int vdi_in_n;
+};
+
+/* dimensions of one tile */
+struct ipu_image_tile {
+ u32 width;
+ u32 height;
+ /* size and strides are in bytes */
+ u32 size;
+ u32 stride;
+ u32 rot_stride;
+ /* start Y or packed offset of this tile */
+ u32 offset;
+ /* offset from start to tile in U plane, for planar formats */
+ u32 u_off;
+ /* offset from start to tile in V plane, for planar formats */
+ u32 v_off;
+};
+
+struct ipu_image_convert_image {
+ struct ipu_image base;
+ enum ipu_image_convert_type type;
+
+ const struct ipu_image_pixfmt *fmt;
+ unsigned int stride;
+
+ /* # of rows (horizontal stripes) if dest height is > 1024 */
+ unsigned int num_rows;
+ /* # of columns (vertical stripes) if dest width is > 1024 */
+ unsigned int num_cols;
+
+ struct ipu_image_tile tile[MAX_TILES];
+};
+
+struct ipu_image_pixfmt {
+ u32 fourcc; /* V4L2 fourcc */
+ int bpp; /* total bpp */
+ int uv_width_dec; /* decimation in width for U/V planes */
+ int uv_height_dec; /* decimation in height for U/V planes */
+ bool planar; /* planar format */
+ bool uv_swapped; /* U and V planes are swapped */
+ bool uv_packed; /* partial planar (U and V in same plane) */
+};
+
+struct ipu_image_convert_ctx;
+struct ipu_image_convert_chan;
+struct ipu_image_convert_priv;
+
+struct ipu_image_convert_ctx {
+ struct ipu_image_convert_chan *chan;
+
+ ipu_image_convert_cb_t complete;
+ void *complete_context;
+
+ /* Source/destination image data and rotation mode */
+ struct ipu_image_convert_image in;
+ struct ipu_image_convert_image out;
+ enum ipu_rotate_mode rot_mode;
+
+ /* intermediate buffer for rotation */
+ struct ipu_image_convert_dma_buf rot_intermediate[2];
+
+ /* current buffer number for double buffering */
+ int cur_buf_num;
+
+ bool aborting;
+ struct completion aborted;
+
+ /* can we use double-buffering for this conversion operation? */
+ bool double_buffering;
+ /* num_rows * num_cols */
+ unsigned int num_tiles;
+ /* next tile to process */
+ unsigned int next_tile;
+ /* where to place converted tile in dest image */
+ unsigned int out_tile_map[MAX_TILES];
+
+ struct list_head list;
+};
+
+struct ipu_image_convert_chan {
+ struct ipu_image_convert_priv *priv;
+
+ enum ipu_ic_task ic_task;
+ const struct ipu_image_convert_dma_chan *dma_ch;
+
+ struct ipu_ic *ic;
+ struct ipuv3_channel *in_chan;
+ struct ipuv3_channel *out_chan;
+ struct ipuv3_channel *rotation_in_chan;
+ struct ipuv3_channel *rotation_out_chan;
+
+ /* the IPU end-of-frame irqs */
+ int out_eof_irq;
+ int rot_out_eof_irq;
+
+ spinlock_t irqlock;
+
+ /* list of convert contexts */
+ struct list_head ctx_list;
+ /* queue of conversion runs */
+ struct list_head pending_q;
+ /* queue of completed runs */
+ struct list_head done_q;
+
+ /* the current conversion run */
+ struct ipu_image_convert_run *current_run;
+};
+
+struct ipu_image_convert_priv {
+ struct ipu_image_convert_chan chan[IC_NUM_TASKS];
+ struct ipu_soc *ipu;
+};
+
+static const struct ipu_image_convert_dma_chan
+image_convert_dma_chan[IC_NUM_TASKS] = {
+ [IC_TASK_VIEWFINDER] = {
+ .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
+ .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+ .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
+ .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
+ .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
+ .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
+ .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
+ },
+ [IC_TASK_POST_PROCESSOR] = {
+ .in = IPUV3_CHANNEL_MEM_IC_PP,
+ .out = IPUV3_CHANNEL_IC_PP_MEM,
+ .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
+ .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
+ },
+};
+
+static const struct ipu_image_pixfmt image_convert_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 16,
+ .uv_width_dec = 2,
+ .uv_height_dec = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 16,
+ .uv_width_dec = 2,
+ .uv_height_dec = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .bpp = 12,
+ .planar = true,
+ .uv_width_dec = 2,
+ .uv_height_dec = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .bpp = 12,
+ .planar = true,
+ .uv_width_dec = 2,
+ .uv_height_dec = 2,
+ .uv_swapped = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .planar = true,
+ .uv_width_dec = 2,
+ .uv_height_dec = 2,
+ .uv_packed = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .bpp = 16,
+ .planar = true,
+ .uv_width_dec = 2,
+ .uv_height_dec = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .planar = true,
+ .uv_width_dec = 2,
+ .uv_height_dec = 1,
+ .uv_packed = true,
+ },
+};
+
+static const struct ipu_image_pixfmt *get_format(u32 fourcc)
+{
+ const struct ipu_image_pixfmt *ret = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
+ if (image_convert_formats[i].fourcc == fourcc) {
+ ret = &image_convert_formats[i];
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void dump_format(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *ic_image)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+
+ dev_dbg(priv->ipu->dev,
+ "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+ chan->ic_task, ctx,
+ ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
+ ic_image->base.pix.width, ic_image->base.pix.height,
+ ic_image->num_cols, ic_image->num_rows,
+ ic_image->tile[0].width, ic_image->tile[0].height,
+ ic_image->fmt->fourcc & 0xff,
+ (ic_image->fmt->fourcc >> 8) & 0xff,
+ (ic_image->fmt->fourcc >> 16) & 0xff,
+ (ic_image->fmt->fourcc >> 24) & 0xff);
+}
+
+int ipu_image_convert_enum_format(int index, u32 *fourcc)
+{
+ const struct ipu_image_pixfmt *fmt;
+
+ if (index >= (int)ARRAY_SIZE(image_convert_formats))
+ return -EINVAL;
+
+ /* Format found */
+ fmt = &image_convert_formats[index];
+ *fourcc = fmt->fourcc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
+
+static void free_dma_buf(struct ipu_image_convert_priv *priv,
+ struct ipu_image_convert_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(priv->ipu->dev,
+ buf->len, buf->virt, buf->phys);
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+
+static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
+ struct ipu_image_convert_dma_buf *buf,
+ int size)
+{
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt) {
+ dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline int num_stripes(int dim)
+{
+ if (dim <= 1024)
+ return 1;
+ else if (dim <= 2048)
+ return 2;
+ else
+ return 4;
+}
+
+static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
+{
+ int i;
+
+ for (i = 0; i < ctx->num_tiles; i++) {
+ struct ipu_image_tile *tile = &image->tile[i];
+
+ tile->height = image->base.pix.height / image->num_rows;
+ tile->width = image->base.pix.width / image->num_cols;
+ tile->size = ((tile->height * image->fmt->bpp) >> 3) *
+ tile->width;
+
+ if (image->fmt->planar) {
+ tile->stride = tile->width;
+ tile->rot_stride = tile->height;
+ } else {
+ tile->stride =
+ (image->fmt->bpp * tile->width) >> 3;
+ tile->rot_stride =
+ (image->fmt->bpp * tile->height) >> 3;
+ }
+ }
+}
+
+/*
+ * Use the rotation transformation to find the tile coordinates
+ * (row, col) of a tile in the destination frame that corresponds
+ * to the given tile coordinates of a source frame. The destination
+ * coordinate is then converted to a tile index.
+ */
+static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
+ int src_row, int src_col)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_image *s_image = &ctx->in;
+ struct ipu_image_convert_image *d_image = &ctx->out;
+ int dst_row, dst_col;
+
+ /* with no rotation it's a 1:1 mapping */
+ if (ctx->rot_mode == IPU_ROTATE_NONE)
+ return src_row * s_image->num_cols + src_col;
+
+ /*
+ * before doing the transform, first we have to translate
+ * source row,col for an origin in the center of s_image
+ */
+ src_row = src_row * 2 - (s_image->num_rows - 1);
+ src_col = src_col * 2 - (s_image->num_cols - 1);
+
+ /* do the rotation transform */
+ if (ctx->rot_mode & IPU_ROT_BIT_90) {
+ dst_col = -src_row;
+ dst_row = src_col;
+ } else {
+ dst_col = src_col;
+ dst_row = src_row;
+ }
+
+ /* apply flip */
+ if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+ dst_col = -dst_col;
+ if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
+ dst_row = -dst_row;
+
+ dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
+ chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
+
+ /*
+ * finally translate dest row,col using an origin in upper
+ * left of d_image
+ */
+ dst_row += d_image->num_rows - 1;
+ dst_col += d_image->num_cols - 1;
+ dst_row /= 2;
+ dst_col /= 2;
+
+ return dst_row * d_image->num_cols + dst_col;
+}
+
+/*
+ * Fill the out_tile_map[] with transformed destination tile indeces.
+ */
+static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_image *s_image = &ctx->in;
+ unsigned int row, col, tile = 0;
+
+ for (row = 0; row < s_image->num_rows; row++) {
+ for (col = 0; col < s_image->num_cols; col++) {
+ ctx->out_tile_map[tile] =
+ transform_tile_index(ctx, row, col);
+ tile++;
+ }
+ }
+}
+
+static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ const struct ipu_image_pixfmt *fmt = image->fmt;
+ unsigned int row, col, tile = 0;
+ u32 H, w, h, y_stride, uv_stride;
+ u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
+ u32 y_row_off, y_col_off, y_off;
+ u32 y_size, uv_size;
+
+ /* setup some convenience vars */
+ H = image->base.pix.height;
+
+ y_stride = image->stride;
+ uv_stride = y_stride / fmt->uv_width_dec;
+ if (fmt->uv_packed)
+ uv_stride *= 2;
+
+ y_size = H * y_stride;
+ uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
+
+ for (row = 0; row < image->num_rows; row++) {
+ w = image->tile[tile].width;
+ h = image->tile[tile].height;
+ y_row_off = row * h * y_stride;
+ uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+
+ for (col = 0; col < image->num_cols; col++) {
+ y_col_off = col * w;
+ uv_col_off = y_col_off / fmt->uv_width_dec;
+ if (fmt->uv_packed)
+ uv_col_off *= 2;
+
+ y_off = y_row_off + y_col_off;
+ uv_off = uv_row_off + uv_col_off;
+
+ u_off = y_size - y_off + uv_off;
+ v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
+ if (fmt->uv_swapped) {
+ tmp = u_off;
+ u_off = v_off;
+ v_off = tmp;
+ }
+
+ image->tile[tile].offset = y_off;
+ image->tile[tile].u_off = u_off;
+ image->tile[tile++].v_off = v_off;
+
+ dev_dbg(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ y_off, u_off, v_off);
+ }
+ }
+}
+
+static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ const struct ipu_image_pixfmt *fmt = image->fmt;
+ unsigned int row, col, tile = 0;
+ u32 w, h, bpp, stride;
+ u32 row_off, col_off;
+
+ /* setup some convenience vars */
+ stride = image->stride;
+ bpp = fmt->bpp;
+
+ for (row = 0; row < image->num_rows; row++) {
+ w = image->tile[tile].width;
+ h = image->tile[tile].height;
+ row_off = row * h * stride;
+
+ for (col = 0; col < image->num_cols; col++) {
+ col_off = (col * w * bpp) >> 3;
+
+ image->tile[tile].offset = row_off + col_off;
+ image->tile[tile].u_off = 0;
+ image->tile[tile++].v_off = 0;
+
+ dev_dbg(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ row_off + col_off);
+ }
+ }
+}
+
+static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
+{
+ if (image->fmt->planar)
+ calc_tile_offsets_planar(ctx, image);
+ else
+ calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * return the number of runs in given queue (pending_q or done_q)
+ * for this context. hold irqlock when calling.
+ */
+static int get_run_count(struct ipu_image_convert_ctx *ctx,
+ struct list_head *q)
+{
+ struct ipu_image_convert_run *run;
+ int count = 0;
+
+ lockdep_assert_held(&ctx->chan->irqlock);
+
+ list_for_each_entry(run, q, list) {
+ if (run->ctx == ctx)
+ count++;
+ }
+
+ return count;
+}
+
+static void convert_stop(struct ipu_image_convert_run *run)
+{
+ struct ipu_image_convert_ctx *ctx = run->ctx;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
+ __func__, chan->ic_task, ctx, run);
+
+ /* disable IC tasks and the channels */
+ ipu_ic_task_disable(chan->ic);
+ ipu_idmac_disable_channel(chan->in_chan);
+ ipu_idmac_disable_channel(chan->out_chan);
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ ipu_idmac_disable_channel(chan->rotation_in_chan);
+ ipu_idmac_disable_channel(chan->rotation_out_chan);
+ ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
+ }
+
+ ipu_ic_disable(chan->ic);
+}
+
+static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
+ struct ipuv3_channel *channel,
+ struct ipu_image_convert_image *image,
+ enum ipu_rotate_mode rot_mode,
+ bool rot_swap_width_height)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ unsigned int burst_size;
+ u32 width, height, stride;
+ dma_addr_t addr0, addr1 = 0;
+ struct ipu_image tile_image;
+ unsigned int tile_idx[2];
+
+ if (image->type == IMAGE_CONVERT_OUT) {
+ tile_idx[0] = ctx->out_tile_map[0];
+ tile_idx[1] = ctx->out_tile_map[1];
+ } else {
+ tile_idx[0] = 0;
+ tile_idx[1] = 1;
+ }
+
+ if (rot_swap_width_height) {
+ width = image->tile[0].height;
+ height = image->tile[0].width;
+ stride = image->tile[0].rot_stride;
+ addr0 = ctx->rot_intermediate[0].phys;
+ if (ctx->double_buffering)
+ addr1 = ctx->rot_intermediate[1].phys;
+ } else {
+ width = image->tile[0].width;
+ height = image->tile[0].height;
+ stride = image->stride;
+ addr0 = image->base.phys0 +
+ image->tile[tile_idx[0]].offset;
+ if (ctx->double_buffering)
+ addr1 = image->base.phys0 +
+ image->tile[tile_idx[1]].offset;
+ }
+
+ ipu_cpmem_zero(channel);
+
+ memset(&tile_image, 0, sizeof(tile_image));
+ tile_image.pix.width = tile_image.rect.width = width;
+ tile_image.pix.height = tile_image.rect.height = height;
+ tile_image.pix.bytesperline = stride;
+ tile_image.pix.pixelformat = image->fmt->fourcc;
+ tile_image.phys0 = addr0;
+ tile_image.phys1 = addr1;
+ ipu_cpmem_set_image(channel, &tile_image);
+
+ if (image->fmt->planar && !rot_swap_width_height)
+ ipu_cpmem_set_uv_offset(channel,
+ image->tile[tile_idx[0]].u_off,
+ image->tile[tile_idx[0]].v_off);
+
+ if (rot_mode)
+ ipu_cpmem_set_rotation(channel, rot_mode);
+
+ if (channel == chan->rotation_in_chan ||
+ channel == chan->rotation_out_chan) {
+ burst_size = 8;
+ ipu_cpmem_set_block_mode(channel);
+ } else
+ burst_size = (width % 16) ? 8 : 16;
+
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ ipu_ic_task_idma_init(chan->ic, channel, width, height,
+ burst_size, rot_mode);
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
+}
+
+static int convert_start(struct ipu_image_convert_run *run)
+{
+ struct ipu_image_convert_ctx *ctx = run->ctx;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_image *s_image = &ctx->in;
+ struct ipu_image_convert_image *d_image = &ctx->out;
+ enum ipu_color_space src_cs, dest_cs;
+ unsigned int dest_width, dest_height;
+ int ret;
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
+ __func__, chan->ic_task, ctx, run);
+
+ src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
+ dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* swap width/height for resizer */
+ dest_width = d_image->tile[0].height;
+ dest_height = d_image->tile[0].width;
+ } else {
+ dest_width = d_image->tile[0].width;
+ dest_height = d_image->tile[0].height;
+ }
+
+ /* setup the IC resizer and CSC */
+ ret = ipu_ic_task_init(chan->ic,
+ s_image->tile[0].width,
+ s_image->tile[0].height,
+ dest_width,
+ dest_height,
+ src_cs, dest_cs);
+ if (ret) {
+ dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
+ return ret;
+ }
+
+ /* init the source MEM-->IC PP IDMAC channel */
+ init_idmac_channel(ctx, chan->in_chan, s_image,
+ IPU_ROTATE_NONE, false);
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* init the IC PP-->MEM IDMAC channel */
+ init_idmac_channel(ctx, chan->out_chan, d_image,
+ IPU_ROTATE_NONE, true);
+
+ /* init the MEM-->IC PP ROT IDMAC channel */
+ init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
+ ctx->rot_mode, true);
+
+ /* init the destination IC PP ROT-->MEM IDMAC channel */
+ init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
+ IPU_ROTATE_NONE, false);
+
+ /* now link IC PP-->MEM to MEM-->IC PP ROT */
+ ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
+ } else {
+ /* init the destination IC PP-->MEM IDMAC channel */
+ init_idmac_channel(ctx, chan->out_chan, d_image,
+ ctx->rot_mode, false);
+ }
+
+ /* enable the IC */
+ ipu_ic_enable(chan->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(chan->in_chan, 0);
+ ipu_idmac_select_buffer(chan->out_chan, 0);
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
+ if (ctx->double_buffering) {
+ ipu_idmac_select_buffer(chan->in_chan, 1);
+ ipu_idmac_select_buffer(chan->out_chan, 1);
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
+ }
+
+ /* enable the channels! */
+ ipu_idmac_enable_channel(chan->in_chan);
+ ipu_idmac_enable_channel(chan->out_chan);
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ ipu_idmac_enable_channel(chan->rotation_in_chan);
+ ipu_idmac_enable_channel(chan->rotation_out_chan);
+ }
+
+ ipu_ic_task_enable(chan->ic);
+
+ ipu_cpmem_dump(chan->in_chan);
+ ipu_cpmem_dump(chan->out_chan);
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ ipu_cpmem_dump(chan->rotation_in_chan);
+ ipu_cpmem_dump(chan->rotation_out_chan);
+ }
+
+ ipu_dump(priv->ipu);
+
+ return 0;
+}
+
+/* hold irqlock when calling */
+static int do_run(struct ipu_image_convert_run *run)
+{
+ struct ipu_image_convert_ctx *ctx = run->ctx;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+
+ lockdep_assert_held(&chan->irqlock);
+
+ ctx->in.base.phys0 = run->in_phys;
+ ctx->out.base.phys0 = run->out_phys;
+
+ ctx->cur_buf_num = 0;
+ ctx->next_tile = 1;
+
+ /* remove run from pending_q and set as current */
+ list_del(&run->list);
+ chan->current_run = run;
+
+ return convert_start(run);
+}
+
+/* hold irqlock when calling */
+static void run_next(struct ipu_image_convert_chan *chan)
+{
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_run *run, *tmp;
+ int ret;
+
+ lockdep_assert_held(&chan->irqlock);
+
+ list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+ /* skip contexts that are aborting */
+ if (run->ctx->aborting) {
+ dev_dbg(priv->ipu->dev,
+ "%s: task %u: skipping aborting ctx %p run %p\n",
+ __func__, chan->ic_task, run->ctx, run);
+ continue;
+ }
+
+ ret = do_run(run);
+ if (!ret)
+ break;
+
+ /*
+ * something went wrong with start, add the run
+ * to done q and continue to the next run in the
+ * pending q.
+ */
+ run->status = ret;
+ list_add_tail(&run->list, &chan->done_q);
+ chan->current_run = NULL;
+ }
+}
+
+static void empty_done_q(struct ipu_image_convert_chan *chan)
+{
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_run *run;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ while (!list_empty(&chan->done_q)) {
+ run = list_entry(chan->done_q.next,
+ struct ipu_image_convert_run,
+ list);
+
+ list_del(&run->list);
+
+ dev_dbg(priv->ipu->dev,
+ "%s: task %u: completing ctx %p run %p with %d\n",
+ __func__, chan->ic_task, run->ctx, run, run->status);
+
+ /* call the completion callback and free the run */
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ run->ctx->complete(run, run->ctx->complete_context);
+ spin_lock_irqsave(&chan->irqlock, flags);
+ }
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+}
+
+/*
+ * the bottom half thread clears out the done_q, calling the
+ * completion handler for each.
+ */
+static irqreturn_t do_bh(int irq, void *dev_id)
+{
+ struct ipu_image_convert_chan *chan = dev_id;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_ctx *ctx;
+ unsigned long flags;
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
+ chan->ic_task);
+
+ empty_done_q(chan);
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ /*
+ * the done_q is cleared out, signal any contexts
+ * that are aborting that abort can complete.
+ */
+ list_for_each_entry(ctx, &chan->ctx_list, list) {
+ if (ctx->aborting) {
+ dev_dbg(priv->ipu->dev,
+ "%s: task %u: signaling abort for ctx %p\n",
+ __func__, chan->ic_task, ctx);
+ complete(&ctx->aborted);
+ }
+ }
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
+ chan->ic_task);
+
+ return IRQ_HANDLED;
+}
+
+/* hold irqlock when calling */
+static irqreturn_t do_irq(struct ipu_image_convert_run *run)
+{
+ struct ipu_image_convert_ctx *ctx = run->ctx;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_tile *src_tile, *dst_tile;
+ struct ipu_image_convert_image *s_image = &ctx->in;
+ struct ipu_image_convert_image *d_image = &ctx->out;
+ struct ipuv3_channel *outch;
+ unsigned int dst_idx;
+
+ lockdep_assert_held(&chan->irqlock);
+
+ outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
+ chan->rotation_out_chan : chan->out_chan;
+
+ /*
+ * It is difficult to stop the channel DMA before the channels
+ * enter the paused state. Without double-buffering the channels
+ * are always in a paused state when the EOF irq occurs, so it
+ * is safe to stop the channels now. For double-buffering we
+ * just ignore the abort until the operation completes, when it
+ * is safe to shut down.
+ */
+ if (ctx->aborting && !ctx->double_buffering) {
+ convert_stop(run);
+ run->status = -EIO;
+ goto done;
+ }
+
+ if (ctx->next_tile == ctx->num_tiles) {
+ /*
+ * the conversion is complete
+ */
+ convert_stop(run);
+ run->status = 0;
+ goto done;
+ }
+
+ /*
+ * not done, place the next tile buffers.
+ */
+ if (!ctx->double_buffering) {
+
+ src_tile = &s_image->tile[ctx->next_tile];
+ dst_idx = ctx->out_tile_map[ctx->next_tile];
+ dst_tile = &d_image->tile[dst_idx];
+
+ ipu_cpmem_set_buffer(chan->in_chan, 0,
+ s_image->base.phys0 + src_tile->offset);
+ ipu_cpmem_set_buffer(outch, 0,
+ d_image->base.phys0 + dst_tile->offset);
+ if (s_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(chan->in_chan,
+ src_tile->u_off,
+ src_tile->v_off);
+ if (d_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(outch,
+ dst_tile->u_off,
+ dst_tile->v_off);
+
+ ipu_idmac_select_buffer(chan->in_chan, 0);
+ ipu_idmac_select_buffer(outch, 0);
+
+ } else if (ctx->next_tile < ctx->num_tiles - 1) {
+
+ src_tile = &s_image->tile[ctx->next_tile + 1];
+ dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
+ dst_tile = &d_image->tile[dst_idx];
+
+ ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
+ s_image->base.phys0 + src_tile->offset);
+ ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
+ d_image->base.phys0 + dst_tile->offset);
+
+ ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
+ ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
+
+ ctx->cur_buf_num ^= 1;
+ }
+
+ ctx->next_tile++;
+ return IRQ_HANDLED;
+done:
+ list_add_tail(&run->list, &chan->done_q);
+ chan->current_run = NULL;
+ run_next(chan);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t norotate_irq(int irq, void *data)
+{
+ struct ipu_image_convert_chan *chan = data;
+ struct ipu_image_convert_ctx *ctx;
+ struct ipu_image_convert_run *run;
+ unsigned long flags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ /* get current run and its context */
+ run = chan->current_run;
+ if (!run) {
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ ctx = run->ctx;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* this is a rotation operation, just ignore */
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return IRQ_HANDLED;
+ }
+
+ ret = do_irq(run);
+out:
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return ret;
+}
+
+static irqreturn_t rotate_irq(int irq, void *data)
+{
+ struct ipu_image_convert_chan *chan = data;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_ctx *ctx;
+ struct ipu_image_convert_run *run;
+ unsigned long flags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ /* get current run and its context */
+ run = chan->current_run;
+ if (!run) {
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ ctx = run->ctx;
+
+ if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* this was NOT a rotation operation, shouldn't happen */
+ dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return IRQ_HANDLED;
+ }
+
+ ret = do_irq(run);
+out:
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return ret;
+}
+
+/*
+ * try to force the completion of runs for this ctx. Called when
+ * abort wait times out in ipu_image_convert_abort().
+ */
+static void force_abort(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_run *run;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ run = chan->current_run;
+ if (run && run->ctx == ctx) {
+ convert_stop(run);
+ run->status = -EIO;
+ list_add_tail(&run->list, &chan->done_q);
+ chan->current_run = NULL;
+ run_next(chan);
+ }
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+ empty_done_q(chan);
+}
+
+static void release_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+ if (chan->out_eof_irq >= 0)
+ free_irq(chan->out_eof_irq, chan);
+ if (chan->rot_out_eof_irq >= 0)
+ free_irq(chan->rot_out_eof_irq, chan);
+
+ if (!IS_ERR_OR_NULL(chan->in_chan))
+ ipu_idmac_put(chan->in_chan);
+ if (!IS_ERR_OR_NULL(chan->out_chan))
+ ipu_idmac_put(chan->out_chan);
+ if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
+ ipu_idmac_put(chan->rotation_in_chan);
+ if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
+ ipu_idmac_put(chan->rotation_out_chan);
+ if (!IS_ERR_OR_NULL(chan->ic))
+ ipu_ic_put(chan->ic);
+
+ chan->in_chan = chan->out_chan = chan->rotation_in_chan =
+ chan->rotation_out_chan = NULL;
+ chan->out_eof_irq = chan->rot_out_eof_irq = -1;
+}
+
+static int get_ipu_resources(struct ipu_image_convert_chan *chan)
+{
+ const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ int ret;
+
+ /* get IC */
+ chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
+ if (IS_ERR(chan->ic)) {
+ dev_err(priv->ipu->dev, "could not acquire IC\n");
+ ret = PTR_ERR(chan->ic);
+ goto err;
+ }
+
+ /* get IDMAC channels */
+ chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
+ chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
+ if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
+ dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
+ chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
+ if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
+ dev_err(priv->ipu->dev,
+ "could not acquire idmac rotation channels\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* acquire the EOF interrupts */
+ chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ chan->out_chan,
+ IPU_IRQ_EOF);
+
+ ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
+ 0, "ipu-ic", chan);
+ if (ret < 0) {
+ dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+ chan->out_eof_irq);
+ chan->out_eof_irq = -1;
+ goto err;
+ }
+
+ chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ chan->rotation_out_chan,
+ IPU_IRQ_EOF);
+
+ ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
+ 0, "ipu-ic", chan);
+ if (ret < 0) {
+ dev_err(priv->ipu->dev, "could not acquire irq %d\n",
+ chan->rot_out_eof_irq);
+ chan->rot_out_eof_irq = -1;
+ goto err;
+ }
+
+ return 0;
+err:
+ release_ipu_resources(chan);
+ return ret;
+}
+
+static int fill_image(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *ic_image,
+ struct ipu_image *image,
+ enum ipu_image_convert_type type)
+{
+ struct ipu_image_convert_priv *priv = ctx->chan->priv;
+
+ ic_image->base = *image;
+ ic_image->type = type;
+
+ ic_image->fmt = get_format(image->pix.pixelformat);
+ if (!ic_image->fmt) {
+ dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
+ type == IMAGE_CONVERT_OUT ? "Output" : "Input");
+ return -EINVAL;
+ }
+
+ if (ic_image->fmt->planar)
+ ic_image->stride = ic_image->base.pix.width;
+ else
+ ic_image->stride = ic_image->base.pix.bytesperline;
+
+ calc_tile_dimensions(ctx, ic_image);
+ calc_tile_offsets(ctx, ic_image);
+
+ return 0;
+}
+
+/* borrowed from drivers/media/v4l2-core/v4l2-common.c */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Clamp to aligned min and max */
+ x = clamp(x, (min + ~mask) & mask, max & mask);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ return x;
+}
+
+/*
+ * We have to adjust the tile width such that the tile physaddrs and
+ * U and V plane offsets are multiples of 8 bytes as required by
+ * the IPU DMA Controller. For the planar formats, this corresponds
+ * to a pixel alignment of 16 (but use a more formal equation since
+ * the variables are available). For all the packed formats, 8 is
+ * good enough.
+ */
+static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
+{
+ return fmt->planar ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * For tile height alignment, we have to ensure that the output tile
+ * heights are multiples of 8 lines if the IRT is required by the
+ * given rotation mode (the IRT performs rotations on 8x8 blocks
+ * at a time). If the IRT is not used, or for input image tiles,
+ * 2 lines are good enough.
+ */
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+ enum ipu_rotate_mode rot_mode)
+{
+ return (type == IMAGE_CONVERT_OUT &&
+ ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
+}
+
+/* Adjusts input/output images to IPU restrictions */
+void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode)
+{
+ const struct ipu_image_pixfmt *infmt, *outfmt;
+ unsigned int num_in_rows, num_in_cols;
+ unsigned int num_out_rows, num_out_cols;
+ u32 w_align, h_align;
+
+ infmt = get_format(in->pix.pixelformat);
+ outfmt = get_format(out->pix.pixelformat);
+
+ /* set some default pixel formats if needed */
+ if (!infmt) {
+ in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+ infmt = get_format(V4L2_PIX_FMT_RGB24);
+ }
+ if (!outfmt) {
+ out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
+ outfmt = get_format(V4L2_PIX_FMT_RGB24);
+ }
+
+ /* image converter does not handle fields */
+ in->pix.field = out->pix.field = V4L2_FIELD_NONE;
+
+ /* resizer cannot downsize more than 4:1 */
+ if (ipu_rot_mode_is_irt(rot_mode)) {
+ out->pix.height = max_t(__u32, out->pix.height,
+ in->pix.width / 4);
+ out->pix.width = max_t(__u32, out->pix.width,
+ in->pix.height / 4);
+ } else {
+ out->pix.width = max_t(__u32, out->pix.width,
+ in->pix.width / 4);
+ out->pix.height = max_t(__u32, out->pix.height,
+ in->pix.height / 4);
+ }
+
+ /* get tiling rows/cols from output format */
+ num_out_rows = num_stripes(out->pix.height);
+ num_out_cols = num_stripes(out->pix.width);
+ if (ipu_rot_mode_is_irt(rot_mode)) {
+ num_in_rows = num_out_cols;
+ num_in_cols = num_out_rows;
+ } else {
+ num_in_rows = num_out_rows;
+ num_in_cols = num_out_cols;
+ }
+
+ /* align input width/height */
+ w_align = ilog2(tile_width_align(infmt) * num_in_cols);
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
+ num_in_rows);
+ in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
+ in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
+
+ /* align output width/height */
+ w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
+ num_out_rows);
+ out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
+ out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
+
+ /* set input/output strides and image sizes */
+ in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
+ in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
+ out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
+ out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
+
+/*
+ * this is used by ipu_image_convert_prepare() to verify set input and
+ * output images are valid before starting the conversion. Clients can
+ * also call it before calling ipu_image_convert_prepare().
+ */
+int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode)
+{
+ struct ipu_image testin, testout;
+
+ testin = *in;
+ testout = *out;
+
+ ipu_image_convert_adjust(&testin, &testout, rot_mode);
+
+ if (testin.pix.width != in->pix.width ||
+ testin.pix.height != in->pix.height ||
+ testout.pix.width != out->pix.width ||
+ testout.pix.height != out->pix.height)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
+
+/*
+ * Call ipu_image_convert_prepare() to prepare for the conversion of
+ * given images and rotation mode. Returns a new conversion context.
+ */
+struct ipu_image_convert_ctx *
+ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode,
+ ipu_image_convert_cb_t complete,
+ void *complete_context)
+{
+ struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
+ struct ipu_image_convert_image *s_image, *d_image;
+ struct ipu_image_convert_chan *chan;
+ struct ipu_image_convert_ctx *ctx;
+ unsigned long flags;
+ bool get_res;
+ int ret;
+
+ if (!in || !out || !complete ||
+ (ic_task != IC_TASK_VIEWFINDER &&
+ ic_task != IC_TASK_POST_PROCESSOR))
+ return ERR_PTR(-EINVAL);
+
+ /* verify the in/out images before continuing */
+ ret = ipu_image_convert_verify(in, out, rot_mode);
+ if (ret) {
+ dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
+ __func__);
+ return ERR_PTR(ret);
+ }
+
+ chan = &priv->chan[ic_task];
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
+ chan->ic_task, ctx);
+
+ ctx->chan = chan;
+ init_completion(&ctx->aborted);
+
+ s_image = &ctx->in;
+ d_image = &ctx->out;
+
+ /* set tiling and rotation */
+ d_image->num_rows = num_stripes(out->pix.height);
+ d_image->num_cols = num_stripes(out->pix.width);
+ if (ipu_rot_mode_is_irt(rot_mode)) {
+ s_image->num_rows = d_image->num_cols;
+ s_image->num_cols = d_image->num_rows;
+ } else {
+ s_image->num_rows = d_image->num_rows;
+ s_image->num_cols = d_image->num_cols;
+ }
+
+ ctx->num_tiles = d_image->num_cols * d_image->num_rows;
+ ctx->rot_mode = rot_mode;
+
+ ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
+ if (ret)
+ goto out_free;
+ ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
+ if (ret)
+ goto out_free;
+
+ calc_out_tile_map(ctx);
+
+ dump_format(ctx, s_image);
+ dump_format(ctx, d_image);
+
+ ctx->complete = complete;
+ ctx->complete_context = complete_context;
+
+ /*
+ * Can we use double-buffering for this operation? If there is
+ * only one tile (the whole image can be converted in a single
+ * operation) there's no point in using double-buffering. Also,
+ * the IPU's IDMAC channels allow only a single U and V plane
+ * offset shared between both buffers, but these offsets change
+ * for every tile, and therefore would have to be updated for
+ * each buffer which is not possible. So double-buffering is
+ * impossible when either the source or destination images are
+ * a planar format (YUV420, YUV422P, etc.).
+ */
+ ctx->double_buffering = (ctx->num_tiles > 1 &&
+ !s_image->fmt->planar &&
+ !d_image->fmt->planar);
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
+ d_image->tile[0].size);
+ if (ret)
+ goto out_free;
+ if (ctx->double_buffering) {
+ ret = alloc_dma_buf(priv,
+ &ctx->rot_intermediate[1],
+ d_image->tile[0].size);
+ if (ret)
+ goto out_free_dmabuf0;
+ }
+ }
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ get_res = list_empty(&chan->ctx_list);
+
+ list_add_tail(&ctx->list, &chan->ctx_list);
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+ if (get_res) {
+ ret = get_ipu_resources(chan);
+ if (ret)
+ goto out_free_dmabuf1;
+ }
+
+ return ctx;
+
+out_free_dmabuf1:
+ free_dma_buf(priv, &ctx->rot_intermediate[1]);
+ spin_lock_irqsave(&chan->irqlock, flags);
+ list_del(&ctx->list);
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+out_free_dmabuf0:
+ free_dma_buf(priv, &ctx->rot_intermediate[0]);
+out_free:
+ kfree(ctx);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
+
+/*
+ * Carry out a single image conversion run. Only the physaddr's of the input
+ * and output image buffers are needed. The conversion context must have
+ * been created previously with ipu_image_convert_prepare().
+ */
+int ipu_image_convert_queue(struct ipu_image_convert_run *run)
+{
+ struct ipu_image_convert_chan *chan;
+ struct ipu_image_convert_priv *priv;
+ struct ipu_image_convert_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!run || !run->ctx || !run->in_phys || !run->out_phys)
+ return -EINVAL;
+
+ ctx = run->ctx;
+ chan = ctx->chan;
+ priv = chan->priv;
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
+ chan->ic_task, ctx, run);
+
+ INIT_LIST_HEAD(&run->list);
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ if (ctx->aborting) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ list_add_tail(&run->list, &chan->pending_q);
+
+ if (!chan->current_run) {
+ ret = do_run(run);
+ if (ret)
+ chan->current_run = NULL;
+ }
+unlock:
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+
+/* Abort any active or pending conversions for this context */
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_convert_run *run, *active_run, *tmp;
+ unsigned long flags;
+ int run_count, ret;
+ bool need_abort;
+
+ reinit_completion(&ctx->aborted);
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ /* move all remaining pending runs in this context to done_q */
+ list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
+ if (run->ctx != ctx)
+ continue;
+ run->status = -EIO;
+ list_move_tail(&run->list, &chan->done_q);
+ }
+
+ run_count = get_run_count(ctx, &chan->done_q);
+ active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
+ chan->current_run : NULL;
+
+ need_abort = (run_count || active_run);
+
+ ctx->aborting = need_abort;
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+ if (!need_abort) {
+ dev_dbg(priv->ipu->dev,
+ "%s: task %u: no abort needed for ctx %p\n",
+ __func__, chan->ic_task, ctx);
+ return;
+ }
+
+ dev_dbg(priv->ipu->dev,
+ "%s: task %u: wait for completion: %d runs, active run %p\n",
+ __func__, chan->ic_task, run_count, active_run);
+
+ ret = wait_for_completion_timeout(&ctx->aborted,
+ msecs_to_jiffies(10000));
+ if (ret == 0) {
+ dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+ force_abort(ctx);
+ }
+
+ ctx->aborting = false;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+
+/* Unprepare image conversion context */
+void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ unsigned long flags;
+ bool put_res;
+
+ /* make sure no runs are hanging around */
+ ipu_image_convert_abort(ctx);
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+ chan->ic_task, ctx);
+
+ spin_lock_irqsave(&chan->irqlock, flags);
+
+ list_del(&ctx->list);
+
+ put_res = list_empty(&chan->ctx_list);
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+ if (put_res)
+ release_ipu_resources(chan);
+
+ free_dma_buf(priv, &ctx->rot_intermediate[1]);
+ free_dma_buf(priv, &ctx->rot_intermediate[0]);
+
+ kfree(ctx);
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
+
+/*
+ * "Canned" asynchronous single image conversion. Allocates and returns
+ * a new conversion run. On successful return the caller must free the
+ * run and call ipu_image_convert_unprepare() after conversion completes.
+ */
+struct ipu_image_convert_run *
+ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode,
+ ipu_image_convert_cb_t complete,
+ void *complete_context)
+{
+ struct ipu_image_convert_ctx *ctx;
+ struct ipu_image_convert_run *run;
+ int ret;
+
+ ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
+ complete, complete_context);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ run = kzalloc(sizeof(*run), GFP_KERNEL);
+ if (!run) {
+ ipu_image_convert_unprepare(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ run->ctx = ctx;
+ run->in_phys = in->phys0;
+ run->out_phys = out->phys0;
+
+ ret = ipu_image_convert_queue(run);
+ if (ret) {
+ ipu_image_convert_unprepare(ctx);
+ kfree(run);
+ return ERR_PTR(ret);
+ }
+
+ return run;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert);
+
+/* "Canned" synchronous single image conversion */
+static void image_convert_sync_complete(struct ipu_image_convert_run *run,
+ void *data)
+{
+ struct completion *comp = data;
+
+ complete(comp);
+}
+
+int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
+ struct ipu_image *in, struct ipu_image *out,
+ enum ipu_rotate_mode rot_mode)
+{
+ struct ipu_image_convert_run *run;
+ struct completion comp;
+ int ret;
+
+ init_completion(&comp);
+
+ run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
+ image_convert_sync_complete, &comp);
+ if (IS_ERR(run))
+ return PTR_ERR(run);
+
+ ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
+ ret = (ret == 0) ? -ETIMEDOUT : 0;
+
+ ipu_image_convert_unprepare(run->ctx);
+ kfree(run);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
+{
+ struct ipu_image_convert_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ipu->image_convert_priv = priv;
+ priv->ipu = ipu;
+
+ for (i = 0; i < IC_NUM_TASKS; i++) {
+ struct ipu_image_convert_chan *chan = &priv->chan[i];
+
+ chan->ic_task = i;
+ chan->priv = priv;
+ chan->dma_ch = &image_convert_dma_chan[i];
+ chan->out_eof_irq = -1;
+ chan->rot_out_eof_irq = -1;
+
+ spin_lock_init(&chan->irqlock);
+ INIT_LIST_HEAD(&chan->ctx_list);
+ INIT_LIST_HEAD(&chan->pending_q);
+ INIT_LIST_HEAD(&chan->done_q);
+ }
+
+ return 0;
+}
+
+void ipu_image_convert_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index bfb1e8a4483f..22e47b68b14a 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -75,6 +75,33 @@ struct ipu_soc;
#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n))
#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n))
+/* FS_PROC_FLOW1 */
+#define FS_PRPENC_ROT_SRC_SEL_MASK (0xf << 0)
+#define FS_PRPENC_ROT_SRC_SEL_ENC (0x7 << 0)
+#define FS_PRPVF_ROT_SRC_SEL_MASK (0xf << 8)
+#define FS_PRPVF_ROT_SRC_SEL_VF (0x8 << 8)
+#define FS_PP_SRC_SEL_MASK (0xf << 12)
+#define FS_PP_ROT_SRC_SEL_MASK (0xf << 16)
+#define FS_PP_ROT_SRC_SEL_PP (0x5 << 16)
+#define FS_VDI1_SRC_SEL_MASK (0x3 << 20)
+#define FS_VDI3_SRC_SEL_MASK (0x3 << 20)
+#define FS_PRP_SRC_SEL_MASK (0xf << 24)
+#define FS_VDI_SRC_SEL_MASK (0x3 << 28)
+#define FS_VDI_SRC_SEL_CSI_DIRECT (0x1 << 28)
+#define FS_VDI_SRC_SEL_VDOA (0x2 << 28)
+
+/* FS_PROC_FLOW2 */
+#define FS_PRP_ENC_DEST_SEL_MASK (0xf << 0)
+#define FS_PRP_ENC_DEST_SEL_IRT_ENC (0x1 << 0)
+#define FS_PRPVF_DEST_SEL_MASK (0xf << 4)
+#define FS_PRPVF_DEST_SEL_IRT_VF (0x1 << 4)
+#define FS_PRPVF_ROT_DEST_SEL_MASK (0xf << 8)
+#define FS_PP_DEST_SEL_MASK (0xf << 12)
+#define FS_PP_DEST_SEL_IRT_PP (0x3 << 12)
+#define FS_PP_ROT_DEST_SEL_MASK (0xf << 16)
+#define FS_PRPENC_ROT_DEST_SEL_MASK (0xf << 20)
+#define FS_PRP_DEST_SEL_MASK (0xf << 24)
+
#define IPU_DI0_COUNTER_RELEASE (1 << 24)
#define IPU_DI1_COUNTER_RELEASE (1 << 25)
@@ -138,6 +165,8 @@ struct ipu_dc_priv;
struct ipu_dmfc_priv;
struct ipu_di;
struct ipu_ic_priv;
+struct ipu_vdi;
+struct ipu_image_convert_priv;
struct ipu_smfc_priv;
struct ipu_devtype;
@@ -152,6 +181,7 @@ struct ipu_soc {
void __iomem *cm_reg;
void __iomem *idmac_reg;
+ int id;
int usecount;
struct clk *clk;
@@ -169,6 +199,8 @@ struct ipu_soc {
struct ipu_di *di_priv[2];
struct ipu_csi *csi_priv[2];
struct ipu_ic_priv *ic_priv;
+ struct ipu_vdi *vdi_priv;
+ struct ipu_image_convert_priv *image_convert_priv;
struct ipu_smfc_priv *smfc_priv;
};
@@ -199,6 +231,13 @@ int ipu_ic_init(struct ipu_soc *ipu, struct device *dev,
unsigned long base, unsigned long tpmem_base);
void ipu_ic_exit(struct ipu_soc *ipu);
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, u32 module);
+void ipu_vdi_exit(struct ipu_soc *ipu);
+
+int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev);
+void ipu_image_convert_exit(struct ipu_soc *ipu);
+
int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *ipu_clk);
void ipu_di_exit(struct ipu_soc *ipu, int id);
diff --git a/drivers/gpu/ipu-v3/ipu-vdi.c b/drivers/gpu/ipu-v3/ipu-vdi.c
new file mode 100644
index 000000000000..f27bf5a12ebc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-vdi.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012-2016 Mentor Graphics Inc.
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/io.h>
+#include "ipu-prv.h"
+
+struct ipu_vdi {
+ void __iomem *base;
+ u32 module;
+ spinlock_t lock;
+ int use_count;
+ struct ipu_soc *ipu;
+};
+
+
+/* VDI Register Offsets */
+#define VDI_FSIZE 0x0000
+#define VDI_C 0x0004
+
+/* VDI Register Fields */
+#define VDI_C_CH_420 (0 << 1)
+#define VDI_C_CH_422 (1 << 1)
+#define VDI_C_MOT_SEL_MASK (0x3 << 2)
+#define VDI_C_MOT_SEL_FULL (2 << 2)
+#define VDI_C_MOT_SEL_LOW (1 << 2)
+#define VDI_C_MOT_SEL_MED (0 << 2)
+#define VDI_C_BURST_SIZE1_4 (3 << 4)
+#define VDI_C_BURST_SIZE2_4 (3 << 8)
+#define VDI_C_BURST_SIZE3_4 (3 << 12)
+#define VDI_C_BURST_SIZE_MASK 0xF
+#define VDI_C_BURST_SIZE1_OFFSET 4
+#define VDI_C_BURST_SIZE2_OFFSET 8
+#define VDI_C_BURST_SIZE3_OFFSET 12
+#define VDI_C_VWM1_SET_1 (0 << 16)
+#define VDI_C_VWM1_SET_2 (1 << 16)
+#define VDI_C_VWM1_CLR_2 (1 << 19)
+#define VDI_C_VWM3_SET_1 (0 << 22)
+#define VDI_C_VWM3_SET_2 (1 << 22)
+#define VDI_C_VWM3_CLR_2 (1 << 25)
+#define VDI_C_TOP_FIELD_MAN_1 (1 << 30)
+#define VDI_C_TOP_FIELD_AUTO_1 (1 << 31)
+
+static inline u32 ipu_vdi_read(struct ipu_vdi *vdi, unsigned int offset)
+{
+ return readl(vdi->base + offset);
+}
+
+static inline void ipu_vdi_write(struct ipu_vdi *vdi, u32 value,
+ unsigned int offset)
+{
+ writel(value, vdi->base + offset);
+}
+
+void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
+{
+ bool top_field_0 = false;
+ unsigned long flags;
+ u32 reg;
+
+ switch (field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_TOP:
+ top_field_0 = true;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_BOTTOM:
+ top_field_0 = false;
+ break;
+ default:
+ top_field_0 = (std & V4L2_STD_525_60) ? true : false;
+ break;
+ }
+
+ spin_lock_irqsave(&vdi->lock, flags);
+
+ reg = ipu_vdi_read(vdi, VDI_C);
+ if (top_field_0)
+ reg &= ~VDI_C_TOP_FIELD_MAN_1;
+ else
+ reg |= VDI_C_TOP_FIELD_MAN_1;
+ ipu_vdi_write(vdi, reg, VDI_C);
+
+ spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_field_order);
+
+void ipu_vdi_set_motion(struct ipu_vdi *vdi, enum ipu_motion_sel motion_sel)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&vdi->lock, flags);
+
+ reg = ipu_vdi_read(vdi, VDI_C);
+
+ reg &= ~VDI_C_MOT_SEL_MASK;
+
+ switch (motion_sel) {
+ case MED_MOTION:
+ reg |= VDI_C_MOT_SEL_MED;
+ break;
+ case HIGH_MOTION:
+ reg |= VDI_C_MOT_SEL_FULL;
+ break;
+ default:
+ reg |= VDI_C_MOT_SEL_LOW;
+ break;
+ }
+
+ ipu_vdi_write(vdi, reg, VDI_C);
+
+ spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_set_motion);
+
+void ipu_vdi_setup(struct ipu_vdi *vdi, u32 code, int xres, int yres)
+{
+ unsigned long flags;
+ u32 pixel_fmt, reg;
+
+ spin_lock_irqsave(&vdi->lock, flags);
+
+ reg = ((yres - 1) << 16) | (xres - 1);
+ ipu_vdi_write(vdi, reg, VDI_FSIZE);
+
+ /*
+ * Full motion, only vertical filter is used.
+ * Burst size is 4 accesses
+ */
+ if (code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+ code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ code == MEDIA_BUS_FMT_YUYV8_1X16)
+ pixel_fmt = VDI_C_CH_422;
+ else
+ pixel_fmt = VDI_C_CH_420;
+
+ reg = ipu_vdi_read(vdi, VDI_C);
+ reg |= pixel_fmt;
+ reg |= VDI_C_BURST_SIZE2_4;
+ reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_CLR_2;
+ reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_CLR_2;
+ ipu_vdi_write(vdi, reg, VDI_C);
+
+ spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_setup);
+
+void ipu_vdi_unsetup(struct ipu_vdi *vdi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vdi->lock, flags);
+ ipu_vdi_write(vdi, 0, VDI_FSIZE);
+ ipu_vdi_write(vdi, 0, VDI_C);
+ spin_unlock_irqrestore(&vdi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_unsetup);
+
+int ipu_vdi_enable(struct ipu_vdi *vdi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vdi->lock, flags);
+
+ if (!vdi->use_count)
+ ipu_module_enable(vdi->ipu, vdi->module);
+
+ vdi->use_count++;
+
+ spin_unlock_irqrestore(&vdi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_enable);
+
+int ipu_vdi_disable(struct ipu_vdi *vdi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vdi->lock, flags);
+
+ if (vdi->use_count) {
+ if (!--vdi->use_count)
+ ipu_module_disable(vdi->ipu, vdi->module);
+ }
+
+ spin_unlock_irqrestore(&vdi->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_disable);
+
+struct ipu_vdi *ipu_vdi_get(struct ipu_soc *ipu)
+{
+ return ipu->vdi_priv;
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_get);
+
+void ipu_vdi_put(struct ipu_vdi *vdi)
+{
+}
+EXPORT_SYMBOL_GPL(ipu_vdi_put);
+
+int ipu_vdi_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, u32 module)
+{
+ struct ipu_vdi *vdi;
+
+ vdi = devm_kzalloc(dev, sizeof(*vdi), GFP_KERNEL);
+ if (!vdi)
+ return -ENOMEM;
+
+ ipu->vdi_priv = vdi;
+
+ spin_lock_init(&vdi->lock);
+ vdi->module = module;
+ vdi->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!vdi->base)
+ return -ENOMEM;
+
+ dev_dbg(dev, "VDI base: 0x%08lx remapped to %p\n", base, vdi->base);
+ vdi->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_vdi_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index f17cb0431833..1887f199ccb7 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -131,7 +131,24 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
return NULL;
}
-/* Returns the default VGA device (vgacon's babe) */
+/**
+ * vga_default_device - return the default VGA device, for vgacon
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
struct pci_dev *vga_default_device(void)
{
return vga_default;
@@ -356,6 +373,40 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
wake_up_all(&vga_wait_queue);
}
+/**
+ * vga_get - acquire & locks VGA resources
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given card and mark those
+ * resources locked. If the resource requested are "normal" (and not legacy)
+ * resources, the arbiter will first check whether the card is doing legacy
+ * decoding for that type of resource. If yes, the lock is "converted" into a
+ * legacy resource lock.
+ *
+ * The arbiter will first look for all VGA cards that might conflict and disable
+ * their IOs and/or Memory access, including VGA forwarding on P2P bridges if
+ * necessary, so that the requested resources can be used. Then, the card is
+ * marked as locking these resources and the IO and/or Memory accesses are
+ * enabled on the card (including VGA forwarding on parent P2P bridges if any).
+ *
+ * This function will block if some conflicting card is already locking one of
+ * the required resources (or any resource on a different bus segment, since P2P
+ * bridges don't differentiate VGA memory and IO afaik). You can indicate
+ * whether this blocking should be interruptible by a signal (for userland
+ * interface) or not.
+ *
+ * Must not be called at interrupt time or in atomic context. If the card
+ * already owns the resources, the function succeeds. Nested calls are
+ * supported (a per-resource counter is maintained)
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
{
struct vga_device *vgadev, *conflict;
@@ -408,6 +459,21 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
}
EXPORT_SYMBOL(vga_get);
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but will return an
+ * error (-EBUSY) instead of blocking if the resources are already locked by
+ * another card. It can be called in any context
+ *
+ * On success, release the VGA resource again with vga_put().
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
@@ -435,6 +501,16 @@ bail:
}
EXPORT_SYMBOL(vga_tryget);
+/**
+ * vga_put - release lock on legacy VGA resources
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This fuction releases resources previously locked by vga_get() or
+ * vga_tryget(). The resources aren't disabled right away, so that a subsequence
+ * vga_get() on the same card will succeed immediately. Resources have a
+ * counter, so locks are only released if the counter reaches 0.
+ */
void vga_put(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
@@ -716,7 +792,37 @@ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
}
EXPORT_SYMBOL(vga_set_legacy_decoding);
-/* call with NULL to unregister */
+/**
+ * vga_client_register - register or unregister a VGA arbitration client
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * Clients have two callback mechanisms they can use.
+ *
+ * @irq_set_state callback: If a client can't disable its GPUs VGA
+ * resources, then we need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ *
+ * @set_vga_decode callback: If a client can disable its GPU VGA resource, it
+ * will get a callback from this to set the encode/decode state.
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally some single
+ * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
+ * control things like backlights etc. Hopefully newer multi-GPU laptops do
+ * something saner, and desktops won't have any special ACPI for this. The
+ * driver will get a callback when VGA arbitration is first used by userspace
+ * since some older X servers have issues.
+ *
+ * This function does not check whether a client for @pdev has been registered
+ * already.
+ *
+ * To unregister just call this function with @irq_set_state and @set_vga_decode
+ * both set to NULL for the same @pdev as originally used to register them.
+ *
+ * Returns: 0 on success, -1 on failure
+ */
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
unsigned int (*set_vga_decode)(void *cookie,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c0523b..4070b7386e9d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@ config HID_ASUS
tristate "Asus"
depends on I2C_HID
---help---
- Support for Asus notebook built-in keyboard via i2c.
+ Support for Asus notebook built-in keyboard and touchpad via i2c.
Supported devices:
- EeeBook X205TA
@@ -214,7 +214,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@ config HID_MAGICMOUSE
Say Y here if you want support for the multi-touch features of the
Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
+config HID_MAYFLASH
+ tristate "Mayflash game controller adapter force feedback"
+ depends on HID
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have HJZ Mayflash PS3 game controller adapters
+ and want to enable force feedback support.
+
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
@@ -861,6 +869,13 @@ config THRUSTMASTER_FF
a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
Rumble Force or Force Feedback Wheel.
+config HID_UDRAW_PS3
+ tristate "THQ PS3 uDraw tablet"
+ depends on HID
+ ---help---
+ Say Y here if you want to use the THQ uDraw gaming tablet for
+ the PS3.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b5785fd2..4d111f23e801 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
@@ -96,6 +97,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec4f2e1..d40ed9fdf68d 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
* This module based on hid-ortek by
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
+ *
+ * This module has been updated to add support for Asus i2c touchpad.
+ *
+ * Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ * Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ * Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
*/
/*
@@ -20,16 +26,287 @@
* any later version.
*/
-#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/input/mt.h>
#include "hid-ids.h"
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT BIT(0)
+#define QUIRK_NO_INIT_REPORTS BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
+#define QUIRK_IS_MULTITOUCH BIT(3)
+
+#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
+ QUIRK_SKIP_INPUT_MAPPING | \
+ QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+ unsigned long quirks;
+ struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+ int toolType, u8 *data)
+{
+ int touch_major, pressure;
+ int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ if (toolType == MT_TOOL_PALM) {
+ touch_major = MAX_TOUCH_MAJOR;
+ pressure = MAX_PRESSURE;
+ } else {
+ touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+ pressure = data[4] & CONTACT_PRESSURE_MASK;
+ }
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+ struct input_mt *mt = input->mt;
+ struct input_mt_slot *oldest;
+ int oldid, count, i;
+
+ oldest = NULL;
+ oldid = mt->trkid;
+ count = 0;
+
+ for (i = 0; i < mt->num_slots; ++i) {
+ struct input_mt_slot *ps = &mt->slots[i];
+ int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+ if (id < 0)
+ continue;
+ if ((id - oldid) & TRKID_SGN) {
+ oldest = ps;
+ oldid = id;
+ }
+ count++;
+ }
+
+ if (oldest) {
+ input_report_abs(input, ABS_TOOL_WIDTH,
+ input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+ }
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+ int i;
+ u8 *contactData = data + 2;
+
+ for (i = 0; i < MAX_CONTACTS; i++) {
+ bool down = !!(data[1] & BIT(i+3));
+ int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+ MT_TOOL_PALM : MT_TOOL_FINGER;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, toolType, down);
+
+ if (down) {
+ asus_report_contact_down(input, toolType, contactData);
+ contactData += CONTACT_DATA_SIZE;
+ }
+ }
+
+ input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(input);
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+ data[0] == INPUT_REPORT_ID &&
+ size == INPUT_REPORT_SIZE) {
+ asus_report_input(drvdata->input, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ int ret;
+ struct input_dev *input = hi->input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+ if (ret) {
+ hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+ return ret;
+ }
+
+ drvdata->input = input;
+ }
+
+ return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit,
+ int *max)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+ /* Don't map anything from the HID report.
+ * We do it all manually in asus_input_configured
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+ int ret;
+ const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+ unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ kfree(dmabuf);
+
+ if (ret != sizeof(buf)) {
+ hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ return asus_start_multitouch(hdev);
+
+ return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct asus_drvdata *drvdata;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "Can't alloc Asus descriptor\n");
+ return -ENOMEM;
+ }
+
+ hid_set_drvdata(hdev, drvdata);
+
+ drvdata->quirks = id->driver_data;
+
+ if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "Asus hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!drvdata->input) {
+ hid_err(hdev, "Asus input not registered\n");
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
+ drvdata->input->name = "Asus TouchPad";
+
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ ret = asus_start_multitouch(hdev);
+ if (ret)
+ goto err_stop_hw;
+ }
+
+ return 0;
+err_stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+ *rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
static const struct hid_device_id asus_devices[] = {
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
static struct hid_driver asus_driver = {
- .name = "asus",
- .id_table = asus_devices,
- .report_fixup = asus_report_fixup
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup,
+ .probe = asus_probe,
+ .input_mapping = asus_input_mapping,
+ .input_configured = asus_input_configured,
+#ifdef CONFIG_PM
+ .reset_resume = asus_reset_resume,
+#endif
+ .raw_event = asus_raw_event
};
module_hid_driver(asus_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c701076f..cff060b56da9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
(hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+ hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
- hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
hid->group == HID_GROUP_MULTITOUCH)
hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 086d8a507157..f31a778b0851 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
* http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
*/
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/i2c.h>
@@ -32,6 +33,11 @@
#include <linux/usb/ch9.h>
#include "hid-ids.h"
+#define CP2112_REPORT_MAX_LENGTH 64
+#define CP2112_GPIO_CONFIG_LENGTH 5
+#define CP2112_GPIO_GET_LENGTH 2
+#define CP2112_GPIO_SET_LENGTH 3
+
enum {
CP2112_GPIO_CONFIG = 0x02,
CP2112_GPIO_GET = 0x03,
@@ -161,6 +167,14 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ u8 *in_out_buffer;
+ spinlock_t lock;
+
+ struct gpio_desc *desc[8];
+ bool gpio_poll;
+ struct delayed_work gpio_poll_worker;
+ unsigned long irq_mask;
+ u8 gpio_prev_state;
};
static int gpio_push_pull = 0xFF;
@@ -171,62 +185,97 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
buf[1] &= ~(1 << offset);
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
- return 0;
+ ret = 0;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret <= 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[3];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
+ CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
}
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[2];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- return ret;
+ ret = ret < 0 ? ret : -EIO;
+ goto exit;
}
- return (buf[1] >> offset) & 1;
+ ret = buf[1];
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
+}
+
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = cp2112_gpio_get_all(chip);
+ if (ret < 0)
+ return ret;
+
+ return (ret >> offset) & 1;
}
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +283,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +317,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
cp2112_gpio_set(chip, offset, value);
return 0;
+
+fail:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1000,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
}
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ __set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+ struct cp2112_device *dev = container_of(work, struct cp2112_device,
+ gpio_poll_worker.work);
+ struct irq_data *d;
+ u8 gpio_mask;
+ u8 virqs = (u8)dev->irq_mask;
+ u32 irq_type;
+ int irq, virq, ret;
+
+ ret = cp2112_gpio_get_all(&dev->gc);
+ if (ret == -ENODEV) /* the hardware has been disconnected */
+ return;
+ if (ret < 0)
+ goto exit;
+
+ gpio_mask = ret;
+
+ while (virqs) {
+ virq = ffs(virqs) - 1;
+ virqs &= ~BIT(virq);
+
+ if (!dev->gc.to_irq)
+ break;
+
+ irq = dev->gc.to_irq(&dev->gc, virq);
+
+ d = irq_get_irq_data(irq);
+ if (!d)
+ continue;
+
+ irq_type = irqd_get_trigger_type(d);
+
+ if (gpio_mask & BIT(virq)) {
+ /* Level High */
+
+ if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+ !(dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ } else {
+ /* Level Low */
+
+ if (irq_type & IRQ_TYPE_LEVEL_LOW)
+ handle_nested_irq(irq);
+
+ if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+ (dev->gpio_prev_state & BIT(virq)))
+ handle_nested_irq(irq);
+ }
+ }
+
+ dev->gpio_prev_state = gpio_mask;
+
+exit:
+ if (dev->gpio_poll)
+ schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+ cp2112_gpio_direction_input(gc, d->hwirq);
+
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+ }
+
+ cp2112_gpio_irq_unmask(d);
+ return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct cp2112_device *dev = gpiochip_get_data(gc);
+
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ int pin)
+{
+ int ret;
+
+ if (dev->desc[pin])
+ return -EINVAL;
+
+ dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+ "HID/I2C:Event");
+ if (IS_ERR(dev->desc[pin])) {
+ dev_err(dev->gc.parent, "Failed to request GPIO\n");
+ return PTR_ERR(dev->desc[pin]);
+ }
+
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+ goto err_desc;
+ }
+
+ ret = gpiod_to_irq(dev->desc[pin]);
+ if (ret < 0) {
+ dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+ goto err_lock;
+ }
+
+ return ret;
+
+err_lock:
+ gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+ gpiochip_free_own_desc(dev->desc[pin]);
+ dev->desc[pin] = NULL;
+ return ret;
+}
+
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
@@ -1007,6 +1226,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct cp2112_smbus_config_report config;
int ret;
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
+ GFP_KERNEL);
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
@@ -1063,12 +1293,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_power_normal;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err_power_normal;
- }
-
hid_set_drvdata(hdev, (void *)dev);
dev->hdev = hdev;
dev->adap.owner = THIS_MODULE;
@@ -1087,7 +1311,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret) {
hid_err(hdev, "error registering i2c adapter\n");
- goto err_free_dev;
+ goto err_power_normal;
}
hid_dbg(hdev, "adapter registered\n");
@@ -1117,14 +1341,21 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
+ ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+ goto err_sysfs_remove;
+ }
+
return ret;
+err_sysfs_remove:
+ sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
i2c_del_adapter(&dev->adap);
-err_free_dev:
- kfree(dev);
err_power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
err_hid_close:
@@ -1137,10 +1368,22 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
+ int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
- gpiochip_remove(&dev->gc);
i2c_del_adapter(&dev->adap);
+
+ if (dev->gpio_poll) {
+ dev->gpio_poll = false;
+ cancel_delayed_work_sync(&dev->gpio_poll_worker);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+ gpiochip_unlock_as_irq(&dev->gc, i);
+ gpiochip_free_own_desc(dev->desc[i]);
+ }
+
+ gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
* and should have waited for any pending calls to finish. It has also
@@ -1149,7 +1392,6 @@ static void cp2112_remove(struct hid_device *hdev)
*/
hid_hw_close(hdev);
hid_hw_stop(hdev);
- kfree(dev);
}
static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 pid0006_rdesc_fixed[] = {
- 0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x04, /* Usage (Joystick) */
- 0xA1, 0x01, /* Collection (Application) */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x05, /* Report Count (5) */
- 0x15, 0x00, /* Logical Minimum (0) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x35, 0x00, /* Physical Minimum (0) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x09, 0x30, /* Usage (X) */
- 0x09, 0x33, /* Usage (Ry) */
- 0x09, 0x32, /* Usage (Z) */
- 0x09, 0x31, /* Usage (Y) */
- 0x09, 0x34, /* Usage (Ry) */
- 0x81, 0x02, /* Input (Variable) */
- 0x75, 0x04, /* Report Size (4) */
- 0x95, 0x01, /* Report Count (1) */
- 0x25, 0x07, /* Logical Maximum (7) */
- 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
- 0x65, 0x14, /* Unit (Centimeter) */
- 0x09, 0x39, /* Usage (Hat switch) */
- 0x81, 0x42, /* Input (Variable) */
- 0x65, 0x00, /* Unit (None) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x0C, /* Report Count (12) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x05, 0x09, /* Usage Page (Button) */
- 0x19, 0x01, /* Usage Minimum (0x01) */
- 0x29, 0x0C, /* Usage Maximum (0x0C) */
- 0x81, 0x02, /* Input (Variable) */
- 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x08, /* Report Count (8) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x09, 0x01, /* Usage (0x01) */
- 0x81, 0x02, /* Input (Variable) */
- 0xC0, /* End Collection */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x07, /* Report Count (7) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x09, 0x02, /* Usage (0x02) */
- 0x91, 0x02, /* Output (Variable) */
- 0xC0, /* End Collection */
- 0xC0 /* End Collection */
-};
-
static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pid0011_rdesc_fixed);
}
break;
- case 0x0006:
- if (*rsize == sizeof(pid0006_rdesc_fixed)) {
- rdesc = pid0006_rdesc_fixed;
- *rsize = sizeof(pid0006_rdesc_fixed);
- }
- break;
}
return rdesc;
}
+#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid) {
+ /*
+ * revert to the old hid-input behavior where axes
+ * can be randomly assigned when hid->usage is reused.
+ */
+ case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs(usage->hid & 0xf);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
.id_table = dr_devices,
.report_fixup = dr_report_fixup,
.probe = dr_probe,
+ .input_mapping = dr_input_mapping,
};
module_hid_driver(dr_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c36ec6d01b8b..ec277b96eaa1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
@@ -168,6 +171,7 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD 0x0101
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -176,6 +180,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
@@ -311,8 +316,10 @@
#define USB_VENDOR_ID_DMI 0x0c0b
#define USB_DEVICE_ID_DMI_ENC 0x5fab
-#define USB_VENDOR_ID_DRAGONRISE 0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
@@ -714,8 +721,9 @@
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 0x07dc
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 0x07e2
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07de
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_VENDOR_ID_MOJO 0x8282
@@ -899,6 +907,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -955,6 +965,9 @@
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TIVO 0x150a
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1cef8b..d05f903c7614 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
case ABS_RX:
case ABS_RY:
case ABS_RZ:
+ case ABS_WHEEL:
case ABS_TILT_X:
case ABS_TILT_Y:
if (field->unit == 0x14) { /* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
kfree(hidinput);
}
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+ struct hid_device *hid = report->device;
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ if (hidinput->report &&
+ hidinput->report->id == report->id)
+ return hidinput;
+ }
+
+ return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+ struct hid_report *report)
+{
+ int i, j;
+
+ for (i = 0; i < report->maxfield; i++)
+ for (j = 0; j < report->field[i]->maxusage; j++)
+ hidinput_configure_usage(hidinput, report->field[i],
+ report->field[i]->usage + j);
+}
+
/*
* Register the input device; print a message.
* Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
{
struct hid_driver *drv = hid->driver;
struct hid_report *report;
- struct hid_input *hidinput = NULL;
- int i, j, k;
+ struct hid_input *next, *hidinput = NULL;
+ int i, k;
INIT_LIST_HEAD(&hid->inputs);
INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
if (!report->maxfield)
continue;
+ /*
+ * Find the previous hidinput report attached
+ * to this report id.
+ */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+ hidinput = hidinput_match(report);
+
if (!hidinput) {
hidinput = hidinput_allocate(hid);
if (!hidinput)
goto out_unwind;
}
- for (i = 0; i < report->maxfield; i++)
- for (j = 0; j < report->field[i]->maxusage; j++)
- hidinput_configure_usage(hidinput, report->field[i],
- report->field[i]->usage + j);
-
- if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput))
- continue;
+ hidinput_configure_usages(hidinput, report);
- if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
- /* This will leave hidinput NULL, so that it
- * allocates another one if we have more inputs on
- * the same interface. Some devices (e.g. Happ's
- * UGCI) cram a lot of unrelated inputs into the
- * same interface. */
+ if (hid->quirks & HID_QUIRK_MULTI_INPUT)
hidinput->report = report;
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- hidinput = NULL;
- }
}
}
- if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
- !hidinput_has_been_populated(hidinput)) {
- /* no need to register an input device not populated */
- hidinput_cleanup_hidinput(hid, hidinput);
- hidinput = NULL;
+ list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+ if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+ !hidinput_has_been_populated(hidinput)) {
+ /* no need to register an input device not populated */
+ hidinput_cleanup_hidinput(hid, hidinput);
+ continue;
+ }
+
+ if (drv->input_configured &&
+ drv->input_configured(hid, hidinput))
+ goto out_unwind;
+ if (input_register_device(hidinput->input))
+ goto out_unwind;
+ hidinput->registered = true;
}
if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
goto out_unwind;
}
- if (hidinput) {
- if (drv->input_configured &&
- drv->input_configured(hid, hidinput))
- goto out_cleanup;
- if (input_register_device(hidinput->input))
- goto out_cleanup;
- }
-
return 0;
-out_cleanup:
- list_del(&hidinput->list);
- input_free_device(hidinput->input);
- kfree(hidinput);
out_unwind:
/* unwind the ones we already registered */
hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
- input_unregister_device(hidinput->input);
+ if (hidinput->registered)
+ input_unregister_device(hidinput->input);
+ else
+ input_free_device(hidinput->input);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
const struct hidled_config *config;
struct hid_device *hdev;
struct hidled_rgb *rgb;
+ u8 *buf;
struct mutex lock;
};
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
+ /*
+ * buffer provided to hid_hw_raw_request must not be on the stack
+ * and must not be part of a data structure
+ */
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
if (ldev->config->report_type == RAW_REQUEST)
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
else if (ldev->config->report_type == OUTPUT_REPORT)
- ret = hid_hw_output_report(ldev->hdev, buf,
+ ret = hid_hw_output_report(ldev->hdev, ldev->buf,
ldev->config->report_size);
else
ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
+
+ memcpy(buf, ldev->buf, ldev->config->report_size);
err:
mutex_unlock(&ldev->lock);
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ldev)
return -ENOMEM;
+ ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+ if (!ldev->buf)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret)
return ret;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 76f644deb0a7..c5c5fbe9d605 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0) {
/* insert a little delay of 10 jiffies ~ 40ms */
wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
buf[1] = 0xB2;
get_random_bytes(&buf[2], 2);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
+ kfree(buf);
}
if (drv_data->quirks & LG_FF)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d6fa496d0ca2..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- __u8 feature[] = { 0xd7, 0x01 };
+ const u8 feature[] = { 0xd7, 0x01 };
+ u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 000000000000..d9090765a6e5
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+ struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct mf_device *mf = data;
+ int strong, weak;
+
+ strong = effect->u.rumble.strong_magnitude;
+ weak = effect->u.rumble.weak_magnitude;
+
+ dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+ strong = strong * 0xff / 0xffff;
+ weak = weak * 0xff / 0xffff;
+
+ dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+ mf->report->field[0]->value[0] = weak;
+ mf->report->field[0]->value[1] = strong;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+ return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+ struct mf_device *mf;
+
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+ struct list_head *report_ptr;
+ struct hid_report *report;
+
+ struct list_head *input_ptr = &hid->inputs;
+ struct hid_input *input;
+
+ struct input_dev *dev;
+
+ int error;
+
+ /* Setup each of the four inputs */
+ list_for_each(report_ptr, report_list) {
+ report = list_entry(report_ptr, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+ hid_err(hid, "Invalid report, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ if (list_is_last(input_ptr, &hid->inputs)) {
+ hid_err(hid, "Missing input, this should never happen!\n");
+ return -ENODEV;
+ }
+
+ input_ptr = input_ptr->next;
+ input = list_entry(input_ptr, struct hid_input, list);
+
+ mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+ if (!mf)
+ return -ENOMEM;
+
+ dev = input->input;
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ error = input_ff_create_memless(dev, mf, mf_play);
+ if (error) {
+ kfree(mf);
+ return error;
+ }
+
+ mf->report = report;
+ mf->report->field[0]->value[0] = 0x00;
+ mf->report->field[0]->value[1] = 0x00;
+ hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+ }
+
+ hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+ "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+ return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int error;
+
+ dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+ /* Split device into four inputs */
+ hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "HID parse failed.\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "HID hw start failed\n");
+ return error;
+ }
+
+ error = mf_init(hid);
+ if (error) {
+ hid_err(hid, "Force feedback init failed.\n");
+ hid_hw_stop(hid);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+ .name = "hid_mf",
+ .id_table = mf_devices,
+ .probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392e9f99..74b7b84a0420 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
+ .driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
.driver_data = MS_HIDINPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f447279..6dca66806844 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
int cc_value_index; /* contact count value index in the field */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
+ unsigned long initial_quirks; /* initial quirks state */
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
__s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
u8 *buf;
/*
- * Only fetch the feature report if initial reports are not already
- * been retrieved. Currently this is only done for Windows 8 touch
- * devices.
+ * Do not fetch the feature report if the device has been explicitly
+ * marked as non-capable.
*/
- if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
- return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_UP_BUTTON:
code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+ /*
+ * MS PTP spec says that external buttons left and right have
+ * usages 2 and 3.
+ */
+ if (cls->name == MT_CLS_WIN_8 &&
+ field->application == HID_DG_TOUCHPAD &&
+ (usage->hid & HID_USAGE) > 1)
+ code--;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
input_set_capability(hi->input, EV_KEY, code);
return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!td->mtclass.export_all_inputs &&
field->application != HID_DG_TOUCHSCREEN &&
field->application != HID_DG_PEN &&
- field->application != HID_DG_TOUCHPAD)
+ field->application != HID_DG_TOUCHPAD &&
+ field->application != HID_GD_KEYBOARD &&
+ field->application != HID_CP_CONSUMER_CONTROL)
return -1;
/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
- /*
- * This allows the driver to handle different input sensors
- * that emits events through different reports on the same HID
- * device.
- */
- hdev->quirks |= HID_QUIRK_MULTI_INPUT;
- hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
- /*
- * Handle special quirks for Windows 8 certified devices.
- */
- if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
- /*
- * Some multitouch screens do not like to be polled for input
- * reports. Fortunately, the Win8 spec says that all touches
- * should be sent during each report, making the initialization
- * of input reports unnecessary.
- *
- * In addition some touchpads do not behave well if we read
- * all feature reports from them. Instead we prevent
- * initial report fetching and then selectively fetch each
- * report we are interested in.
- */
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+ /*
+ * Store the initial quirk state
+ */
+ td->initial_quirks = hdev->quirks;
+
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+ /*
+ * This allows the driver to handle different input sensors
+ * that emits events through different reports on the same HID
+ * device.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary. For Win7 devices, well, let's hope
+ * they will still be happy (this is only be a problem if a touch
+ * was already there while probing the device).
+ *
+ * In addition some touchpads do not behave well if we read
+ * all feature reports from them. Instead we prevent
+ * initial report fetching and then selectively fetch each
+ * report we are interested in.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
ret = hid_parse(hdev);
if (ret != 0)
return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
+ struct mt_device *td = hid_get_drvdata(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
+ hdev->quirks = td->initial_quirks;
}
/*
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9cd2ca34a6be..be89bcbf6a71 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
static int rmi_set_mode(struct hid_device *hdev, u8 mode)
{
int ret;
- u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ u8 *buf;
- ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
+ buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret < 0) {
dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
ret);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 5614fee82347..3a84aaf1418b 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -292,11 +292,11 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
bool input = false;
int value = 0;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
feature = true;
field_index = index + sensor_inst->input_field_count;
- } else if (sscanf(attr->attr.name, "input-%d-%x-%s", &index, &usage,
+ } else if (sscanf(attr->attr.name, "input-%x-%x-%s", &index, &usage,
name) == 3) {
input = true;
field_index = index;
@@ -398,7 +398,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
char name[HID_CUSTOM_NAME_LENGTH];
int value;
- if (sscanf(attr->attr.name, "feature-%d-%x-%s", &index, &usage,
+ if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
name) == 3) {
field_index = index + sensor_inst->input_field_count;
} else
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 658a607dc6d9..5c925228847c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
__s32 value;
int ret = 0;
+ memset(buffer, 0, buffer_size);
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield)) {
@@ -251,6 +252,9 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int report_size;
int ret = 0;
+ u8 *val_ptr;
+ int buffer_index = 0;
+ int i;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
@@ -271,7 +275,17 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
goto done_proc;
}
ret = min(report_size, buffer_size);
- memcpy(buffer, report->field[field_index]->value, ret);
+
+ val_ptr = (u8 *)report->field[field_index]->value;
+ for (i = 0; i < report->field[field_index]->report_count; ++i) {
+ if (buffer_index >= ret)
+ break;
+
+ memcpy(&((u8 *)buffer)[buffer_index], val_ptr,
+ report->field[field_index]->report_size / 8);
+ val_ptr += sizeof(__s32);
+ buffer_index += (report->field[field_index]->report_size / 8);
+ }
done_proc:
mutex_unlock(&data->mutex);
@@ -781,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
USB_DEVICE_ID_MS_TYPE_COVER_2),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+ 0x07bd), /* Microsoft Surface 3 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+ 0x0f01), /* MM7150 */
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a821bd..7687c0875395 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
#include <linux/list.h>
#include <linux/idr.h>
#include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
0x65, 0x00, /* Unit, */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
0x81, 0x42, /* Input (Variable, Null State), */
0x05, 0x09, /* Usage Page (Button), */
0x19, 0x01, /* Usage Minimum (01h), */
- 0x29, 0x0E, /* Usage Maximum (0Eh), */
+ 0x29, 0x0D, /* Usage Maximum (0Dh), */
0x15, 0x00, /* Logical Minimum (0), */
0x25, 0x01, /* Logical Maximum (1), */
0x75, 0x01, /* Report Size (1), */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
0x19, 0x40, /* Usage Minimum (40h), */
0x29, 0x42, /* Usage Maximum (42h), */
0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
- 0x26, 0x00, 0x7F, /* Logical Maximum (32767), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x75, 0x10, /* Report Size (16), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x19, 0x43, /* Usage Minimum (43h), */
0x29, 0x45, /* Usage Maximum (45h), */
- 0x16, 0x00, 0xE0, /* Logical Minimum (-8192), */
- 0x26, 0xFF, 0x1F, /* Logical Maximum (8191), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
0x95, 0x03, /* Report Count (3), */
0x81, 0x02, /* Input (Variable), */
0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
[20] = BTN_TRIGGER_HAPPY20,
};
+static const unsigned int ds4_absmap[] = {
+ [0x30] = ABS_X,
+ [0x31] = ABS_Y,
+ [0x32] = ABS_RX, /* right stick X */
+ [0x33] = ABS_Z, /* L2 */
+ [0x34] = ABS_RZ, /* R2 */
+ [0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+ [0x1] = BTN_WEST, /* Square */
+ [0x2] = BTN_SOUTH, /* Cross */
+ [0x3] = BTN_EAST, /* Circle */
+ [0x4] = BTN_NORTH, /* Triangle */
+ [0x5] = BTN_TL, /* L1 */
+ [0x6] = BTN_TR, /* R1 */
+ [0x7] = BTN_TL2, /* L2 */
+ [0x8] = BTN_TR2, /* R2 */
+ [0x9] = BTN_SELECT, /* Share */
+ [0xa] = BTN_START, /* Options */
+ [0xb] = BTN_THUMBL, /* L3 */
+ [0xc] = BTN_THUMBR, /* R3 */
+ [0xd] = BTN_MODE, /* PS */
+};
+
+
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
spinlock_t lock;
struct list_head list_node;
struct hid_device *hdev;
+ struct input_dev *touchpad;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+ unsigned int key = usage->hid & HID_USAGE;
+
+ if (key >= ARRAY_SIZE(ds4_keymap))
+ return -1;
+
+ key = ds4_keymap[key];
+ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+ return 1;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+ unsigned int abs = usage->hid & HID_USAGE;
+
+ /* Let the HID parser deal with the HAT. */
+ if (usage->hid == HID_GD_HATSWITCH)
+ return 0;
+
+ if (abs >= ARRAY_SIZE(ds4_absmap))
+ return -1;
+
+ abs = ds4_absmap[abs];
+ hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+ return 1;
+ }
+
+ return 0;
+}
+
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
unsigned long flags;
- int n, offset;
+ int n, m, offset, num_touch_data, max_touch_data;
u8 cable_state, battery_capacity, battery_charging;
- /*
- * Battery and touchpad data starts at byte 30 in the USB report and
- * 32 in Bluetooth report.
- */
- offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+ /* When using Bluetooth the header is 2 bytes longer, so skip these. */
+ int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+ /* Second bit of third button byte is for the touchpad button. */
+ offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+ input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
/*
- * The lower 4 bits of byte 30 contain the battery level
+ * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
* and the 5th bit contains the USB cable state.
*/
+ offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
cable_state = (rd[offset] >> 4) & 0x01;
battery_capacity = rd[offset] & 0x0F;
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
sc->battery_charging = battery_charging;
spin_unlock_irqrestore(&sc->lock, flags);
- offset += 5;
-
/*
- * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
- * and 37 on Bluetooth.
- * The first 7 bits of the first byte is a counter and bit 8 is a touch
- * indicator that is 0 when pressed and 1 when not pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and immediatly
- * follows the data for the first.
+ * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+ * and 35 on Bluetooth.
+ * The first byte indicates the number of touch data in the report.
+ * Trackpad data starts 2 bytes later (e.g. 35 for USB).
*/
- for (n = 0; n < 2; n++) {
- u16 x, y;
+ offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+ max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+ if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+ num_touch_data = rd[offset];
+ else
+ num_touch_data = 1;
+ offset += 1;
+
+ for (m = 0; m < num_touch_data; m++) {
+ /* Skip past timestamp */
+ offset += 1;
+
+ /*
+ * The first 7 bits of the first byte is a counter and bit 8 is
+ * a touch indicator that is 0 when pressed and 1 when not
+ * pressed.
+ * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+ * The data for the second touch is in the same format and
+ * immediately follows the data for the first.
+ */
+ for (n = 0; n < 2; n++) {
+ u16 x, y;
+ bool active;
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+ x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+ y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
- input_mt_slot(input_dev, n);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- !(rd[offset] >> 7));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ active = !(rd[offset] >> 7);
+ input_mt_slot(sc->touchpad, n);
+ input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
- offset += 4;
+ if (active) {
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+ input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+ }
+
+ offset += 4;
+ }
+ input_mt_sync_frame(sc->touchpad);
+ input_sync(sc->touchpad);
}
}
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
&& rd[0] == 0x11 && size == 78)) {
+ if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+ /* CRC check */
+ u8 bthdr = 0xA1;
+ u32 crc;
+ u32 report_crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+ report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+ if (crc != report_crc) {
+ hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+ report_crc, crc);
+ return -EILSEQ;
+ }
+ }
dualshock4_parse_report(sc, rd, size);
}
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
+
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ return ds4_mapping(hdev, hi, field, usage, bit, max);
+
/* Let hid-core decide for the others */
return 0;
}
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
int w, int h)
{
- struct input_dev *input_dev = hi->input;
+ size_t name_sz;
+ char *name;
int ret;
- ret = input_mt_init_slots(input_dev, touch_count, 0);
+ sc->touchpad = input_allocate_device();
+ if (!sc->touchpad)
+ return -ENOMEM;
+
+ input_set_drvdata(sc->touchpad, sc);
+ sc->touchpad->dev.parent = &sc->hdev->dev;
+ sc->touchpad->phys = sc->hdev->phys;
+ sc->touchpad->uniq = sc->hdev->uniq;
+ sc->touchpad->id.bustype = sc->hdev->bus;
+ sc->touchpad->id.vendor = sc->hdev->vendor;
+ sc->touchpad->id.product = sc->hdev->product;
+ sc->touchpad->id.version = sc->hdev->version;
+
+ /* Append a suffix to the controller name as there are various
+ * DS4 compatible non-Sony devices with different names.
+ */
+ name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name = kzalloc(name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ sc->touchpad->name = name;
+
+ ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
if (ret < 0)
- return ret;
+ goto err;
+
+ /* We map the button underneath the touchpad to BTN_LEFT. */
+ __set_bit(EV_KEY, sc->touchpad->evbit);
+ __set_bit(BTN_LEFT, sc->touchpad->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+ input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+ ret = input_register_device(sc->touchpad);
+ if (ret < 0)
+ goto err;
return 0;
+
+err:
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
+
+ input_free_device(sc->touchpad);
+ sc->touchpad = NULL;
+
+ return ret;
}
-static int sony_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
{
- struct sony_sc *sc = hid_get_drvdata(hdev);
- int ret;
+ if (!sc->touchpad)
+ return;
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = sony_register_touchpad(hidinput, 2, 1920, 942);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- return ret;
- }
- }
+ kfree(sc->touchpad->name);
+ sc->touchpad->name = NULL;
- return 0;
+ input_unregister_device(sc->touchpad);
+ sc->touchpad = NULL;
}
/*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
u8 *buf;
int ret;
- buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+ ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
* 0xD0 - 66hz
*/
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- memset(buf, 0, DS4_REPORT_0x05_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
buf[0] = 0x05;
buf[1] = 0xFF;
offset = 4;
} else {
- memset(buf, 0, DS4_REPORT_0x11_SIZE);
+ memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
buf[0] = 0x11;
- buf[1] = 0x80;
+ buf[1] = 0xC0; /* HID + CRC */
buf[3] = 0x0F;
offset = 6;
}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
buf[offset++] = sc->led_delay_off[3];
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
- else
- hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
- HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+ else {
+ /* CRC generation */
+ u8 bthdr = 0xA2;
+ u32 crc;
+
+ crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+ crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+ put_unaligned_le32(crc, &buf[74]);
+ hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+ }
}
static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
kmalloc(sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
GFP_KERNEL);
else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
- sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+ sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
return 0;
}
} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+ buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
* offset 1.
*/
ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+ DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
- if (ret != DS4_REPORT_0x81_SIZE) {
+ if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
ret = ret < 0 ? ret : -EINVAL;
goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
cancel_work_sync(&sc->state_worker);
}
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
{
- int ret;
+ struct sony_sc *sc = hid_get_drvdata(hdev);
int append_dev_id;
- unsigned long quirks = id->driver_data;
- struct sony_sc *sc;
- unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
- if (!strcmp(hdev->name, "FutureMax Dance Mat"))
- quirks |= FUTUREMAX_DANCE_MAT;
-
- sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
- if (sc == NULL) {
- hid_err(hdev, "can't alloc sony descriptor\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&sc->lock);
-
- sc->quirks = quirks;
- hid_set_drvdata(hdev, sc);
- sc->hdev = hdev;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- return ret;
- }
-
- if (sc->quirks & VAIO_RDESC_CONSTANT)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- return ret;
- }
+ int ret;
ret = sony_set_device_id(sc);
if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sony_init_output_report(sc, sixaxis_send_output_report);
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- /*
- * The DualShock 4 wants output reports sent on the ctrl
- * endpoint when connected via Bluetooth.
- */
- hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
ret = dualshock4_set_operational_bt(hdev);
if (ret < 0) {
hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
+ /*
+ * The Dualshock 4 touchpad supports 2 touches and has a
+ * resolution of 1920x942 (44.86 dots/mm).
+ */
+ ret = sony_register_touchpad(sc, 2, 1920, 942);
+ if (ret) {
+ hid_err(sc->hdev,
+ "Unable to initialize multi-touch slots: %d\n",
+ ret);
+ return ret;
+ }
+
sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & MOTION_CONTROLLER) {
sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ err_stop:
return ret;
}
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ unsigned long quirks = id->driver_data;
+ struct sony_sc *sc;
+ unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+ if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+ quirks |= FUTUREMAX_DANCE_MAT;
+
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+ if (sc == NULL) {
+ hid_err(hdev, "can't alloc sony descriptor\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&sc->lock);
+
+ sc->quirks = quirks;
+ hid_set_drvdata(hdev, sc);
+ sc->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (sc->quirks & VAIO_RDESC_CONSTANT)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
+ connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+ /* Patch the hw version on DS4 compatible devices, so applications can
+ * distinguish between the default HID mappings and the mappings defined
+ * by the Linux game controller spec. This is important for the SDL2
+ * library, which has a game controller database, which uses device ids
+ * in combination with version as a key.
+ */
+ if (sc->quirks & DUALSHOCK4_CONTROLLER)
+ hdev->version |= 0x8000;
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ /* sony_input_configured can fail, but this doesn't result
+ * in hid_hw_start failures (intended). Check whether
+ * the HID layer claimed the device else fail.
+ * We don't know the actual reason for the failure, most
+ * likely it is due to EEXIST in case of double connection
+ * of USB and Bluetooth, but could have been due to ENOMEM
+ * or other reasons as well.
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void sony_remove(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ hid_hw_close(hdev);
+
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
- if (sc->quirks & SONY_BATTERY_SUPPORT) {
- hid_hw_close(hdev);
+ if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
- }
+
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 000000000000..88ea390c10ad
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+ TOUCH_NONE,
+ TOUCH_PEN,
+ TOUCH_FINGER,
+ TOUCH_TWOFINGER
+};
+
+enum {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+ int min;
+ int max;
+} accel_limits[] = {
+ [AXIS_X] = { 490, 534 },
+ [AXIS_Y] = { 490, 534 },
+ [AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH 160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+ struct input_dev *joy_input_dev;
+ struct input_dev *touch_input_dev;
+ struct input_dev *pen_input_dev;
+ struct input_dev *accel_input_dev;
+ struct hid_device *hdev;
+
+ /*
+ * The device's two-finger support is pretty unreliable, as
+ * the device could report a single touch when the two fingers
+ * are too close together, and the distance between fingers, even
+ * though reported is not in the same unit as the touches.
+ *
+ * We'll make do without it, and try to report the first touch
+ * as reliably as possible.
+ */
+ int last_one_finger_x;
+ int last_one_finger_y;
+ int last_two_finger_x;
+ int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+ axis = clamp(axis,
+ accel_limits[offset].min,
+ accel_limits[offset].max);
+ axis = (axis - accel_limits[offset].min) /
+ ((accel_limits[offset].max -
+ accel_limits[offset].min) * 0xFF);
+ return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct udraw *udraw = hid_get_drvdata(hdev);
+ int touch;
+ int x, y, z;
+
+ if (len != 27)
+ return 0;
+
+ if (data[11] == 0x00)
+ touch = TOUCH_NONE;
+ else if (data[11] == 0x40)
+ touch = TOUCH_PEN;
+ else if (data[11] == 0x80)
+ touch = TOUCH_FINGER;
+ else
+ touch = TOUCH_TWOFINGER;
+
+ /* joypad */
+ input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+ input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+ input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+ input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+ input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+ input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+ x = y = 0;
+ switch (data[2]) {
+ case 0x0:
+ y = -127;
+ break;
+ case 0x1:
+ y = -127;
+ x = 127;
+ break;
+ case 0x2:
+ x = 127;
+ break;
+ case 0x3:
+ y = 127;
+ x = 127;
+ break;
+ case 0x4:
+ y = 127;
+ break;
+ case 0x5:
+ y = 127;
+ x = -127;
+ break;
+ case 0x6:
+ x = -127;
+ break;
+ case 0x7:
+ y = -127;
+ x = -127;
+ break;
+ default:
+ break;
+ }
+
+ input_report_abs(udraw->joy_input_dev, ABS_X, x);
+ input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+ input_sync(udraw->joy_input_dev);
+
+ /* For pen and touchpad */
+ x = y = 0;
+ if (touch != TOUCH_NONE) {
+ if (data[15] != 0x0F)
+ x = data[15] * 256 + data[17];
+ if (data[16] != 0x0F)
+ y = data[16] * 256 + data[18];
+ }
+
+ if (touch == TOUCH_FINGER) {
+ /* Save the last one-finger touch */
+ udraw->last_one_finger_x = x;
+ udraw->last_one_finger_y = y;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+ } else if (touch == TOUCH_TWOFINGER) {
+ /*
+ * We have a problem because x/y is the one for the
+ * second finger but we want the first finger given
+ * to user-space otherwise it'll look as if it jumped.
+ *
+ * See the udraw struct definition for why this was
+ * implemented this way.
+ */
+ if (udraw->last_two_finger_x == -1) {
+ /* Save the position of the 2nd finger */
+ udraw->last_two_finger_x = x;
+ udraw->last_two_finger_y = y;
+
+ x = udraw->last_one_finger_x;
+ y = udraw->last_one_finger_y;
+ } else {
+ /*
+ * Offset the 2-finger coords using the
+ * saved data from the first finger
+ */
+ x = x - (udraw->last_two_finger_x
+ - udraw->last_one_finger_x);
+ y = y - (udraw->last_two_finger_y
+ - udraw->last_one_finger_y);
+ }
+ }
+
+ /* touchpad */
+ if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+ touch == TOUCH_FINGER);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+ touch == TOUCH_TWOFINGER);
+
+ input_report_abs(udraw->touch_input_dev, ABS_X, x);
+ input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+ input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+ }
+ input_sync(udraw->touch_input_dev);
+
+ /* pen */
+ if (touch == TOUCH_PEN) {
+ int level;
+
+ level = clamp(data[13] - PRESSURE_OFFSET,
+ 0, MAX_PRESSURE);
+
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+ input_report_abs(udraw->pen_input_dev, ABS_X, x);
+ input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+ } else {
+ input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+ input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+ input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+ }
+ input_sync(udraw->pen_input_dev);
+
+ /* accel */
+ x = (data[19] + (data[20] << 8));
+ x = clamp_accel(x, AXIS_X);
+ y = (data[21] + (data[22] << 8));
+ y = clamp_accel(y, AXIS_Y);
+ z = (data[23] + (data[24] << 8));
+ z = clamp_accel(z, AXIS_Z);
+ input_report_abs(udraw->accel_input_dev, ABS_X, x);
+ input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+ input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+ input_sync(udraw->accel_input_dev);
+
+ /* let hidraw and hiddev handle the report */
+ return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+ struct udraw *udraw = input_get_drvdata(dev);
+
+ hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+ const char *name)
+{
+ struct input_dev *input_dev;
+
+ input_dev = devm_input_allocate_device(&hdev->dev);
+ if (!input_dev)
+ return NULL;
+
+ input_dev->name = name;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
+ input_dev->open = udraw_open;
+ input_dev->close = udraw_close;
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
+ input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+ return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->touch_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+ input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+ input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+ input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+ input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, MAX_PRESSURE, 0, 0);
+
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ udraw->pen_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_ABS);
+
+ /* 1G accel is reported as ~256, so clamp to 2G */
+ input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+ input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+ set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+ udraw->accel_input_dev = input_dev;
+
+ return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+ struct hid_device *hdev)
+{
+ struct input_dev *input_dev;
+
+ input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+ if (!input_dev)
+ return false;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+ set_bit(BTN_SOUTH, input_dev->keybit);
+ set_bit(BTN_NORTH, input_dev->keybit);
+ set_bit(BTN_EAST, input_dev->keybit);
+ set_bit(BTN_WEST, input_dev->keybit);
+ set_bit(BTN_SELECT, input_dev->keybit);
+ set_bit(BTN_START, input_dev->keybit);
+ set_bit(BTN_MODE, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+ udraw->joy_input_dev = input_dev;
+
+ return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct udraw *udraw;
+ int ret;
+
+ udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+ if (!udraw)
+ return -ENOMEM;
+
+ udraw->hdev = hdev;
+ udraw->last_two_finger_x = -1;
+ udraw->last_two_finger_y = -1;
+
+ hid_set_drvdata(hdev, udraw);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ if (!udraw_setup_joypad(udraw, hdev) ||
+ !udraw_setup_touch(udraw, hdev) ||
+ !udraw_setup_pen(udraw, hdev) ||
+ !udraw_setup_accel(udraw, hdev)) {
+ hid_err(hdev, "could not allocate interfaces\n");
+ return -ENOMEM;
+ }
+
+ ret = input_register_device(udraw->joy_input_dev) ||
+ input_register_device(udraw->touch_input_dev) ||
+ input_register_device(udraw->pen_input_dev) ||
+ input_register_device(udraw->accel_input_dev);
+ if (ret) {
+ hid_err(hdev, "failed to register interfaces\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+ .name = "hid-udraw",
+ .id_table = udraw_devices,
+ .raw_event = udraw_raw_event,
+ .probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index e2517c11e0ee..842d8416a7a6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include "client.h"
#include "hw-ish.h"
-#include "utils.h"
#include "hbm.h"
/* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
((uint32_t)tv_utc.tv_usec);
ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
time_update.primary_host_time = usec_system;
time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
sizeof(uint32_t) + size);
}
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
/**
* ish_fw_reset_handler() - FW reset handler
* @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
ishtp_reset_handler(dev);
if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
- ish_is_input_ready(dev), (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
- (2 * HZ));
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
if (!ishtp_fw_is_ready(dev)) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
return 0;
}
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
/**
* ish_fw_reset_work_fn() - FW reset worker function
* @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
rv = ish_fw_reset_handler(ishtp_dev);
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
- schedule_timeout(HZ / 3);
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
ishtp_dev->recvd_hw_ready = 1;
wake_up_interruptible(&ishtp_dev->wait_hw_ready);
@@ -638,6 +693,58 @@ eoi:
}
/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+static int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
* _ish_hw_reset() - HW reset
* @dev: ishtp device pointer
*
@@ -649,7 +756,6 @@ static int _ish_hw_reset(struct ishtp_device *dev)
{
struct pci_dev *pdev = dev->pdev;
int rv;
- unsigned int dma_delay;
uint16_t csr;
if (!pdev)
@@ -664,15 +770,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
return -EINVAL;
}
- /* Now trigger reset to FW */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
-
- for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
- _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
- dma_delay += 5)
- mdelay(5);
-
- if (dma_delay >= MAX_DMA_DELAY) {
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
dev_err(&pdev->dev,
"Can't reset - stuck with DMA in-progress\n");
return -EBUSY;
@@ -690,16 +789,8 @@ static int _ish_hw_reset(struct ishtp_device *dev)
csr |= PCI_D0;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
-
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
-
- /* Flush writes to doorbell and REMAP2 */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
return 0;
}
@@ -758,16 +849,9 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
int ish_hw_start(struct ishtp_device *dev)
{
ish_set_host_rdy(dev);
- /* After that we can enable ISH DMA operation */
- ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
- /*
- * Send 0 IPC message so that ISH FW wakes up if it was already
- * asleep
- */
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
- /* Flush write to doorbell */
- ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
set_host_ready(dev);
@@ -876,6 +960,21 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
*/
void ish_device_disable(struct ishtp_device *dev)
{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
dev->dev_state = ISHTP_DEV_DISABLED;
ish_clr_host_rdy(dev);
}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 42f0beeb09fd..20d647d2dd2c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -146,7 +146,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_NO_SUSPEND,
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
KBUILD_MODNAME, dev);
if (ret) {
dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
@@ -202,6 +202,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
+#ifdef CONFIG_PM
static struct device *ish_resume_device;
/**
@@ -293,7 +294,6 @@ static int ish_resume(struct device *device)
return 0;
}
-#ifdef CONFIG_PM
static const struct dev_pm_ops ish_pm_ops = {
.suspend = ish_suspend,
.resume = ish_resume,
@@ -301,7 +301,7 @@ static const struct dev_pm_ops ish_pm_ops = {
#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
#else
#define ISHTP_ISH_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM */
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123dc7b4..000000000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define WAIT_FOR_SEND_SLICE (HZ / 10)
-#define WAIT_FOR_CONNECT_SLICE (HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define timed_wait_for(timeinc, condition) \
- do { \
- int completed = 0; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- completed = (condition); \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- } while (!(completed)); \
- } while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define timed_wait_for_timeout(timeinc, condition, timeout) \
- do { \
- int t = timeout; \
- do { \
- unsigned long j; \
- int done = 0; \
- \
- for (j = jiffies, done = 0; !done; ) { \
- schedule_timeout(timeinc); \
- if (time_is_before_eq_jiffies(j + timeinc)) \
- done = 1; \
- } \
- t -= timeinc; \
- if (t <= 0) \
- break; \
- } while (!(condition)); \
- } while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..f4cbc744e657 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
*/
i = dev->fw_client_presentation_num - 1;
device_uuid = dev->fw_clients[i].props.protocol_name;
- dev_name = kasprintf(GFP_KERNEL,
- "{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
- device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
- device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
- device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
- device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
- device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
- device_uuid.b[15]);
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
if (!dev_name)
return -ENOMEM;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee60774..59460b66e689 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
list_for_each_entry(cl, &dev->cl_list, link) {
if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
cl->state = ISHTP_CL_DISCONNECTED;
cl->status = -ENODEV;
}
+ wake_up_interruptible(&cl->wait_ctrl_res);
break;
}
}
- if (cl)
- wake_up_interruptible(&cl->wait_ctrl_res);
spin_unlock_irqrestore(&dev->cl_list_lock, flags);
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af649a60..333108ef18cf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
if (!rdesc) {
dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
- return 1;
+ return -ENOMEM;
}
status = hid_get_class_descriptor(dev,
interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
if (status < 0) {
dbg_hid("reading report descriptor failed (post_reset)\n");
kfree(rdesc);
- return 1;
+ return status;
}
status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
kfree(rdesc);
if (status != 0) {
dbg_hid("report descriptor changed\n");
- return 1;
+ return -EPERM;
}
/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..b3e01c82af05 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,12 +56,14 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
@@ -80,6 +82,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -99,8 +103,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea891cb..d303e413306d 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
void wacom_wac_usage_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value);
void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a5648e708..b9779bcbd140 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
u8 *data;
int ret;
+ int n;
switch (usage->hid) {
case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
case HID_UP_DIGITIZER:
if (field->report->id == 0x0B &&
- (field->application == WACOM_G9_DIGITIZER ||
- field->application == WACOM_G11_DIGITIZER)) {
+ (field->application == WACOM_HID_G9_PEN ||
+ field->application == WACOM_HID_G11_PEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
- case WACOM_G9_PAGE:
- case WACOM_G11_PAGE:
+ case WACOM_HID_WD_DATAMODE:
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ break;
+
+ case WACOM_HID_UP_G9:
+ case WACOM_HID_UP_G11:
if (field->report->id == 0x03 &&
- (field->application == WACOM_G9_TOUCHSCREEN ||
- field->application == WACOM_G11_TOUCHSCREEN)) {
+ (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+ field->application == WACOM_HID_G11_TOUCHSCREEN)) {
wacom->wacom_wac.mode_report = field->report->id;
wacom->wacom_wac.mode_value = 0;
}
break;
+ case WACOM_HID_WD_OFFSETLEFT:
+ case WACOM_HID_WD_OFFSETTOP:
+ case WACOM_HID_WD_OFFSETRIGHT:
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ /* read manually */
+ n = hid_report_len(field->report);
+ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ data, n, WAC_CMD_RETRIES);
+ if (ret == n) {
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+ data, n, 0);
+ } else {
+ hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+ __func__);
+ }
+ kfree(data);
+ break;
}
}
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
features->touch_max = 1;
}
+ /*
+ * ISDv4 devices which predate HID's adoption of the
+ * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+ * position instead. We can accurately detect if a
+ * usage with that value should be HID_DG_BARRELSWITCH2
+ * based on the surrounding usages, which have remained
+ * constant across generations.
+ */
+ if (features->type == HID_GENERIC &&
+ usage->hid == 0x000D0000 &&
+ field->application == HID_DG_PEN &&
+ field->physical == HID_DG_STYLUS) {
+ int i = usage->usage_index;
+
+ if (i-4 >= 0 && i+1 < field->maxusage &&
+ field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+ field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+ field->usage[i-2].hid == HID_DG_ERASER &&
+ field->usage[i-1].hid == HID_DG_INVERT &&
+ field->usage[i+1].hid == HID_DG_INRANGE) {
+ usage->hid = HID_DG_BARRELSWITCH2;
+ }
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
-
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* shift everything including the terminator */
memmove(gap, gap+1, strlen(gap));
}
+
+ /* strip off excessive prefixing */
+ if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co.,Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+ if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+ int n = strlen(name);
+ int x = strlen("Wacom Co., Ltd. ");
+ memmove(name, name+x, n-x+1);
+ }
+
/* get rid of trailing whitespace */
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
+
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb79925730d..b1a9a3ca6d56 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
static void wacom_report_numbered_buttons(struct input_dev *input_dev,
int button_count, int mask);
+static int wacom_numbered_button_to_key(int n);
+
/*
* Percent of battery capacity for Graphire.
* 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_id_mangle(int tool_id)
+{
+ return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
static int wacom_intuos_get_tool_type(int tool_id)
{
int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
switch (tool_id) {
case 0x812: /* Inking pen */
case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x12802: /* Intuos4/5 Inking Pen */
case 0x012:
tool_type = BTN_TOOL_PENCIL;
break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x16802: /* Cintiq 13HD Pro Pen */
+ case 0x18802: /* DTH2242 Pen */
+ case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
break;
case 0x82a: /* Eraser */
+ case 0x84a:
case 0x85a:
case 0x91a:
case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x1880a: /* DTH2242 Eraser */
+ case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
tool_type = BTN_TOOL_RUBBER;
break;
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x112:
case 0x913: /* Intuos3 Airbrush */
case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
tool_type = BTN_TOOL_AIRBRUSH;
break;
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
(data[6] << 4) + (data[7] >> 4);
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+ ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
* don't report events for invalid data
*/
/* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ if ((!((wacom->id[idx] >> 16) & 0x01) &&
(features->type == WACOM_21UX2)) ||
/* Only large Intuos support Lense Cursor */
(wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
break;
}
- input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+ input_report_abs(input, ABS_MISC,
+ wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
input_report_key(input, wacom->tool[idx], 1);
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
return 0;
}
+static int wacom_equivalent_usage(int usage)
+{
+ if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+ int subpage = (usage & 0xFF00) << 8;
+ int subusage = (usage & 0xFF);
+
+ if (subpage == WACOM_HID_SP_PAD ||
+ subpage == WACOM_HID_SP_BUTTON ||
+ subpage == WACOM_HID_SP_DIGITIZER ||
+ subpage == WACOM_HID_SP_DIGITIZERINFO ||
+ usage == WACOM_HID_WD_SENSE ||
+ usage == WACOM_HID_WD_SERIALHI ||
+ usage == WACOM_HID_WD_TOOLTYPE ||
+ usage == WACOM_HID_WD_DISTANCE ||
+ usage == WACOM_HID_WD_TOUCHSTRIP ||
+ usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+ usage == WACOM_HID_WD_TOUCHRING ||
+ usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+ return usage;
+ }
+
+ if (subpage == HID_UP_UNDEFINED)
+ subpage = HID_UP_DIGITIZER;
+
+ return subpage | subusage;
+ }
+
+ return usage;
+}
+
static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
struct hid_field *field, __u8 type, __u16 code, int fuzz)
{
+ struct wacom *wacom = input_get_drvdata(input);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
int fmin = field->logical_minimum;
int fmax = field->logical_maximum;
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ int resolution_code = code;
+
+ if (equivalent_usage == HID_DG_TWIST) {
+ resolution_code = ABS_RZ;
+ }
+
+ if (equivalent_usage == HID_GD_X) {
+ fmin += features->offset_left;
+ fmax -= features->offset_right;
+ }
+ if (equivalent_usage == HID_GD_Y) {
+ fmin += features->offset_top;
+ fmax -= features->offset_bottom;
+ }
usage->type = type;
usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_ABS:
input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
input_abs_set_res(input, code,
- hidinput_calc_abs_res(field, code));
+ hidinput_calc_abs_res(field, resolution_code));
break;
case EV_KEY:
input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
case EV_MSC:
input_set_capability(input, EV_MSC, code);
break;
+ case EV_SW:
+ input_set_capability(input, EV_SW, code);
+ break;
+ }
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_X:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Y:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_ACCELEROMETER_Z:
+ __set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+ case WACOM_HID_WD_BUTTONLEFT:
+ case WACOM_HID_WD_BUTTONRIGHT:
+ case WACOM_HID_WD_BUTTONCENTER:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHONOFF:
+ wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHSTRIP2:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_TOUCHRING:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+
+ switch (equivalent_usage & 0xfffffff0) {
+ case WACOM_HID_WD_EXPRESSKEY00:
+ wacom_map_usage(input, usage, field, EV_KEY,
+ wacom_numbered_button_to_key(features->numbered_buttons),
+ 0);
+ features->numbered_buttons++;
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ }
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_BATTERY_LEVEL:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+
+ case WACOM_HID_WD_BATTERY_CHARGING:
+ wacom_wac->hid_data.bat_charging = value;
+ wacom_wac->hid_data.ps_connected = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
+ }
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct input_dev *input = wacom_wac->pad_input;
+ struct wacom_features *features = &wacom_wac->features;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ wacom_wac->hid_data.inrange_state |= value;
+ }
+
+ switch (equivalent_usage) {
+ case WACOM_HID_WD_TOUCHRINGSTATUS:
+ break;
+
+ default:
+ features->input_event_flag = true;
+ input_event(input, usage->type, usage->code, value);
+ break;
+ }
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+ wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->quirks & WACOM_QUIRK_BATTERY) {
+ int capacity = wacom_wac->hid_data.battery_capacity;
+ bool charging = wacom_wac->hid_data.bat_charging;
+ bool connected = wacom_wac->hid_data.bat_connected;
+ bool powered = wacom_wac->hid_data.ps_connected;
+
+ wacom_notify_battery(wacom_wac, capacity, charging,
+ connected, powered);
+ }
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+ struct hid_report *report)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct input_dev *input = wacom_wac->pad_input;
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+ if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+ features->input_event_flag = true;
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ }
+
+ if (features->input_event_flag) {
+ features->input_event_flag = false;
+ input_sync(input);
}
}
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
break;
case HID_GD_Y:
wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
break;
+ case WACOM_HID_WD_DISTANCE:
+ case HID_GD_Z:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+ break;
case HID_DG_TIPPRESSURE:
wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
break;
case HID_DG_INRANGE:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
break;
+ case HID_DG_BATTERYSTRENGTH:
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ break;
case HID_DG_INVERT:
wacom_map_usage(input, usage, field, EV_KEY,
BTN_TOOL_RUBBER, 0);
break;
+ case HID_DG_TILT_X:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+ break;
+ case HID_DG_TILT_Y:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+ break;
+ case HID_DG_TWIST:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+ break;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
case HID_DG_TOOLSERIALNUMBER:
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
break;
+ case WACOM_HID_WD_SENSE:
+ features->quirks |= WACOM_QUIRK_SENSE;
+ wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+ break;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ break;
+ case WACOM_HID_WD_FINGERWHEEL:
+ wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ break;
}
}
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
struct input_dev *input = wacom_wac->pen_input;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- /* checking which Tool / tip switch to send */
- switch (usage->hid) {
+ switch (equivalent_usage) {
+ case HID_GD_Z:
+ /*
+ * HID_GD_Z "should increase as the control's position is
+ * moved from high to low", while ABS_DISTANCE instead
+ * increases in value as the tool moves from low to high.
+ */
+ value = field->logical_maximum - value;
+ break;
case HID_DG_INRANGE:
wacom_wac->hid_data.inrange_state = value;
- return 0;
+ if (!(features->quirks & WACOM_QUIRK_SENSE))
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case HID_DG_BATTERYSTRENGTH:
+ wacom_wac->hid_data.battery_capacity = value;
+ wacom_wac->hid_data.bat_connected = 1;
+ break;
case HID_DG_INVERT:
wacom_wac->hid_data.invert_state = value;
- return 0;
+ return;
case HID_DG_ERASER:
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
- return 0;
+ return;
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+ wacom_wac->serial[0] |= value;
+ return;
+ case WACOM_HID_WD_SENSE:
+ wacom_wac->hid_data.sense_state = value;
+ return;
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ /*
+ * Non-USI EMR devices may contain additional tool type
+ * information here. See WACOM_HID_WD_TOOLTYPE case for
+ * more details.
+ */
+ if (value >> 20 == 1) {
+ wacom_wac->id[0] |= value & 0xFFFFF;
+ }
+ return;
+ case WACOM_HID_WD_TOOLTYPE:
+ /*
+ * Some devices (MobileStudio Pro, and possibly later
+ * devices as well) do not return the complete tool
+ * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+ * bitwise OR so the complete value can be built
+ * up over time :(
+ */
+ wacom_wac->id[0] |= value;
+ return;
+ case WACOM_HID_WD_OFFSETLEFT:
+ if (features->offset_left && value != features->offset_left)
+ hid_warn(hdev, "%s: overriding exising left offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_left);
+ features->offset_left = value;
+ return;
+ case WACOM_HID_WD_OFFSETRIGHT:
+ if (features->offset_right && value != features->offset_right)
+ hid_warn(hdev, "%s: overriding exising right offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_right);
+ features->offset_right = value;
+ return;
+ case WACOM_HID_WD_OFFSETTOP:
+ if (features->offset_top && value != features->offset_top)
+ hid_warn(hdev, "%s: overriding exising top offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_top);
+ features->offset_top = value;
+ return;
+ case WACOM_HID_WD_OFFSETBOTTOM:
+ if (features->offset_bottom && value != features->offset_bottom)
+ hid_warn(hdev, "%s: overriding exising bottom offset "
+ "%d -> %d\n", __func__, value,
+ features->offset_bottom);
+ features->offset_bottom = value;
+ return;
}
/* send pen events only when touch is up or forced out
* or touch arbitration is off
*/
if (!usage->type || delay_pen_events(wacom_wac))
- return 0;
+ return;
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in/entering/leaving proximity */
+ if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+ return;
- return 0;
+ input_event(input, usage->type, usage->code, value);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
bool prox = wacom_wac->hid_data.inrange_state;
+ bool range = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+ if (!wacom_wac->tool[0] && prox) { /* first in prox */
/* Going into proximity select tool */
- wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
- BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom_wac->hid_data.invert_state)
+ wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->id[0])
+ wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+ else
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
+ }
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = prox;
+ wacom_wac->shared->stylus_in_proximity = range;
- if (!delay_pen_events(wacom_wac)) {
+ if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+ int id = wacom_wac->id[0];
+
+ /*
+ * Non-USI EMR tools should have their IDs mangled to
+ * match the legacy behavior of wacom_intuos_general
+ */
+ if (wacom_wac->serial[0] >> 52 == 1)
+ id = wacom_intuos_id_mangle(id);
+
+ /*
+ * To ensure compatibility with xf86-input-wacom, we should
+ * report the BTN_TOOL_* event prior to the ABS_MISC or
+ * MSC_SERIAL events.
+ */
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], prox);
+ if (wacom_wac->serial[0]) {
+ input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ input_report_abs(input, ABS_MISC, id);
+ }
wacom_wac->hid_data.tipswitch = false;
input_sync(input);
}
+
+ if (!prox) {
+ wacom_wac->tool[0] = 0;
+ wacom_wac->id[0] = 0;
+ }
}
static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->touch_input;
unsigned touch_max = wacom_wac->features.touch_max;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
if (touch_max == 1)
wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
- switch (usage->hid) {
+ switch (equivalent_usage) {
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
if (usage->usage_index + 1 == field->report_count) {
- if (usage->hid == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
-
- return 0;
}
static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
/* currently, only direct devices have proper hid report descriptors */
features->device_type |= WACOM_DEVICETYPE_DIRECT;
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_usage_mapping(hdev, field, usage);
+ if (WACOM_PAD_FIELD(field))
+ wacom_wac_pad_usage_mapping(hdev, field, usage);
+ else if (WACOM_PEN_FIELD(field))
+ wacom_wac_pen_usage_mapping(hdev, field, usage);
+ else if (WACOM_FINGER_FIELD(field))
+ wacom_wac_finger_usage_mapping(hdev, field, usage);
}
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct wacom *wacom = hid_get_drvdata(hdev);
if (wacom->wacom_wac.features.type != HID_GENERIC)
- return 0;
-
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_event(hdev, field, usage, value);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_event(hdev, field, usage, value);
+ return;
- return 0;
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_event(hdev, field, usage, value);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_event(hdev, field, usage, value);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_event(hdev, field, usage, value);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_event(hdev, field, usage, value);
}
static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
if (wacom_wac->features.type != HID_GENERIC)
return;
- if (WACOM_PEN_FIELD(field))
+ if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+ wacom_wac_pad_pre_report(hdev, report);
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_pre_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_pre_report(hdev, report);
wacom_report_events(hdev, report);
- if (WACOM_PEN_FIELD(field))
- return wacom_wac_pen_report(hdev, report);
-
- if (WACOM_FINGER_FIELD(field))
- return wacom_wac_finger_report(hdev, report);
+ if (WACOM_PAD_FIELD(field)) {
+ wacom_wac_pad_battery_report(hdev, report);
+ if (wacom->wacom_wac.pad_input)
+ wacom_wac_pad_report(hdev, report);
+ } else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
}
static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
+ if (features->numbered_buttons > 0)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
features->type == DTUS ||
(features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
/*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
- * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+ * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
* out through the HID_GENERIC device created for interface 1,
* so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
*/
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
- input_set_abs_params(input_dev, ABS_X, features->x_min,
- features->x_max, features->x_fuzz, 0);
- input_set_abs_params(input_dev, ABS_Y, features->y_min,
- features->y_max, features->y_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+ features->x_max - features->offset_right,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+ features->y_max - features->offset_bottom,
+ features->y_fuzz, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0,
features->pressure_max, features->pressure_fuzz, 0);
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
return 0;
}
+static int wacom_numbered_button_to_key(int n)
+{
+ if (n < 10)
+ return BTN_0 + n;
+ else if (n < 16)
+ return BTN_A + (n-10);
+ else if (n < 18)
+ return BTN_BASE + (n-16);
+ else
+ return 0;
+}
+
static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
int button_count)
{
int i;
- for (i = 0; i < button_count && i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- for (i = 10; i < button_count && i < 16; i++)
- __set_bit(BTN_A + (i-10), input_dev->keybit);
- for (i = 16; i < button_count && i < 18; i++)
- __set_bit(BTN_BASE + (i-16), input_dev->keybit);
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ __set_bit(key, input_dev->keybit);
+ }
}
static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
for (i = 0; i < wacom->led.count; i++)
wacom_update_led(wacom, button_count, mask, i);
- for (i = 0; i < button_count && i < 10; i++)
- input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
- for (i = 10; i < button_count && i < 16; i++)
- input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
- for (i = 16; i < button_count && i < 18; i++)
- input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+ for (i = 0; i < button_count; i++) {
+ int key = wacom_numbered_button_to_key(i);
+
+ if (key)
+ input_report_key(input_dev, key, mask & (1 << i));
+ }
}
int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
__set_bit(ABS_MISC, input_dev->absbit);
/* kept for making legacy xf86-input-wacom accepting the pad */
- input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+ input_dev->absinfo[ABS_X].maximum)))
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+ input_dev->absinfo[ABS_Y].maximum)))
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
/* kept for making udev and libwacom accepting the pad */
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+ case HID_GENERIC:
+ break;
+
default:
/* no pad supported */
return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+ { "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+ { "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x32A =
- { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x32B =
- { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+ { "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
static const struct wacom_features wacom_features_0x32C =
{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+ { "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x333 =
- { "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+ { "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
static const struct wacom_features wacom_features_0x335 =
{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
{ "Wacom DTU1631", 34623, 19553, 511, 0,
DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", 21896, 13760, 511, 0,
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x32F =
- { "Wacom DTU1031X", 22472, 12728, 511, 0,
+ { "Wacom DTU1031X", 22672, 12928, 511, 0,
DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x336 =
- { "Wacom DTU1141", 23472, 13203, 1023, 0,
+ { "Wacom DTU1141", 23672, 13403, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ { "Wacom DTK2241", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ { "Wacom DTH2242", 95840, 54260, 2047, 63,
DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+ { "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+ { "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", 12800, 8000, 255, 0,
TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x307 =
- { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 307", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30A =
- { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+ { "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
static const struct wacom_features wacom_features_0x30C =
{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+ WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
static const struct wacom_features wacom_features_0x326 = /* Touch */
{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
.check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x343 =
- { "Wacom DTK1651", 34616, 19559, 1023, 0,
+ { "Wacom DTK1651", 34816, 19759, 1023, 0,
DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b0c119..fb0e50acb10d 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
+#define WACOM_QUIRK_SENSE 0x0002
#define WACOM_QUIRK_BATTERY 0x0008
/* device types */
@@ -84,23 +85,66 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_DEVICETYPE_DIRECT 0x0010
-#define WACOM_VENDORDEFINED_PEN 0xff0d0001
-#define WACOM_G9_PAGE 0xff090000
-#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE 0xff110000
-#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER 0xff0d0000
+#define WACOM_HID_SP_PAD 0x00040000
+#define WACOM_HID_SP_BUTTON 0x00090000
+#define WACOM_HID_SP_DIGITIZER 0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO 0x00100000
+#define WACOM_HID_WD_DIGITIZER (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2 (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00 (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9 0xff090000
+#define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11 0xff110000
+#define WACOM_HID_G11_PEN (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f) (((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+ ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
- ((f)->application == WACOM_VENDORDEFINED_PEN))
+ ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+ ((f)->application == WACOM_HID_G9_PEN) || \
+ ((f)->application == WACOM_HID_G11_PEN))
#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
((f)->physical == HID_DG_FINGER) || \
- ((f)->application == HID_DG_TOUCHSCREEN))
+ ((f)->application == HID_DG_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+ ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
enum {
PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
int x_resolution;
int y_resolution;
int numbered_buttons;
- int x_min;
- int y_min;
+ int offset_left;
+ int offset_right;
+ int offset_top;
+ int offset_bottom;
int device_type;
int x_phy;
int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
int pktlen;
bool check_for_hid_type;
int hid_type;
+ bool input_event_flag;
};
struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
struct hid_data {
__s16 inputmode; /* InputMode HID feature, -1 if non-existent */
__s16 inputmode_index; /* InputMode HID feature index in the report */
+ bool sense_state;
bool inrange_state;
bool invert_state;
bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
int last_slot_field;
int num_expected;
int num_received;
+ int battery_capacity;
+ int bat_charging;
+ int bat_connected;
+ int ps_connected;
};
struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
- __u32 serial[2];
+ __u64 serial[2];
bool reporting_data;
struct wacom_features features;
struct wacom_shared *shared;
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 4aa3cb63fd41..bcd06306f3e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a259e18d22d5..0276d2ef06ee 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -961,7 +961,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
- dev_set_name(&child_device_obj->device, "vmbus-%pUl",
+ dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
child_device_obj->device.bus = &hv_bus;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
* 0.5'C per two measurement cycles thus ignore possible
* but unlikely aliasing error on lsb reading. --Grant
*/
- data->temp = ((i2c_smbus_read_byte_data(client,
+ data->temp = (i2c_smbus_read_byte_data(client,
ADM9240_REG_TEMP) << 8) |
i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF)) / 128;
+ ADM9240_REG_TEMP_CONF);
for (i = 0; i < 2; i++) { /* read fans */
data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
- return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+ return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index adae6848ffb2..a74c075a30ec 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -536,8 +536,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
GFP_KERNEL);
- if (!hwdev->groups)
- return ERR_PTR(-ENOMEM);
+ if (!hwdev->groups) {
+ err = -ENOMEM;
+ goto free_hwmon;
+ }
attrs = __hwmon_create_attrs(dev, drvdata, chip);
if (IS_ERR(attrs)) {
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
long *val)
{
struct max31790_data *data = max31790_update_device(dev);
- u8 fan_config = data->fan_config[channel];
+ u8 fan_config;
if (IS_ERR(data))
return PTR_ERR(data);
+ fan_config = data->fan_config[channel];
+
switch (attr) {
case hwmon_pwm_input:
*val = data->pwm[channel] >> 8;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d223650a97e4..11edabf425ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on HAS_IOMEM
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 5c3993b26129..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
- depends on ARCH_HIX5HD2 || COMPILE_TEST
+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
help
- Say Y here to include support for high-speed I2C controller in the
- Hisilicon based hix5hd2 SoCs.
+ Say Y here to include support for the high-speed I2C controller
+ used in HiSilicon hix5hd2 SoCs.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-hix5hd2.
config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC or Layerscape processors.
+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.
This driver can also be built as a module. If so, the module
will be called i2c-imx.
@@ -836,7 +836,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
depends on HAS_DMA
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -956,6 +956,17 @@ config I2C_OCTEON
This driver can also be built as a module. If so, the module
will be called i2c-octeon.
+config I2C_THUNDERX
+ tristate "Cavium ThunderX I2C bus support"
+ depends on 64BIT && PCI && (ARM64 || COMPILE_TEST)
+ select I2C_SMBUS
+ help
+ Say yes if you want to support the I2C serial bus on Cavium
+ ThunderX SOC.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-thunderx.
+
config I2C_XILINX
tristate "Xilinx I2C Controller"
depends on HAS_IOMEM
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 37f2819b4560..29764cc20a44 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -91,7 +91,10 @@ obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_WMT) += i2c-wmt.o
+i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
+i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
+obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 6c7113d990f8..274908cd1fde 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -378,11 +378,8 @@ static int amd756_probe(struct pci_dev *pdev, const struct pci_device_id *id)
amd756_ioport);
error = i2c_add_adapter(&amd756_smbus);
- if (error) {
- dev_err(&pdev->dev,
- "Adapter registration failed, module not inserted\n");
+ if (error)
goto out_err;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 1bb97f658b47..0b86c6173e07 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1122,8 +1122,6 @@ static int at91_twi_probe(struct platform_device *pdev)
rc = i2c_add_numbered_adapter(&dev->adapter);
if (rc) {
- dev_err(dev->dev, "Adapter %s registration failed\n",
- dev->adapter.name);
clk_disable_unprepare(dev->clk);
pm_runtime_disable(dev->dev);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index c335cc7852f9..4351a9343058 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -545,7 +545,11 @@ static int axxia_i2c_probe(struct platform_device *pdev)
return ret;
}
- clk_prepare_enable(idev->i2c_clk);
+ ret = clk_prepare_enable(idev->i2c_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return ret;
+ }
i2c_set_adapdata(&idev->adapter, idev);
strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
@@ -560,7 +564,7 @@ static int axxia_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&idev->adapter);
if (ret) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ clk_disable_unprepare(idev->i2c_clk);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 95f7cac76f89..326b3db02c48 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -488,13 +488,7 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(iproc_i2c->device, "failed to add adapter\n");
- return ret;
- }
-
- return 0;
+ return i2c_add_adapter(adap);
}
static int bcm_iproc_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 258cb9a40ab3..4e489a9d16fb 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -858,10 +858,8 @@ static int bcm_kona_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
- if (rc) {
- dev_err(dev->device, "failed to add adapter\n");
+ if (rc)
return rc;
- }
dev_info(dev->device, "device registered successfully\n");
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 025686d41640..29d00c4f7824 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -685,10 +685,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
write_CONTROL(iface, read_CONTROL(iface) | TWI_ENA);
rc = i2c_add_numbered_adapter(p_adap);
- if (rc < 0) {
- dev_err(&pdev->dev, "Can't add i2c adapter!\n");
+ if (rc < 0)
goto out_error;
- }
platform_set_drvdata(pdev, iface);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 385b57bfcb38..0652281662a8 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -648,10 +648,8 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
- if (rc) {
- dev_err(dev->device, "failed to add adapter\n");
+ if (rc)
goto probe_errorout;
- }
dev_info(dev->device, "%s@%dhz registered in %s mode\n",
int_name ? int_name : " ", dev->clk_freq_hz,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 3c16a2f7c673..686971263bef 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -963,10 +963,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&id->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto err_clk_dis;
- }
/*
* Cadence I2C controller has a bug wherein it generates
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index ee57c1e865e2..d89bde2c5da2 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -665,10 +665,8 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1;
result = i2c_add_numbered_adapter(&cpm->adap);
- if (result < 0) {
- dev_err(&ofdev->dev, "Unable to register with I2C\n");
+ if (result < 0)
goto out_shut;
- }
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
cpm->adap.name);
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 2d5ff86398d0..9b36a7b3befd 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -281,10 +281,8 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->adap.retries = I2C_MAX_RETRIES;
err = i2c_add_adapter(&bus->adap);
- if (err) {
- dev_err(dev, "cannot register i2c adapter\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, bus);
return err;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a8bdcb5292f5..9e7ef5cf5d49 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -846,10 +846,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(&pdev->dev, "failure adding adapter\n");
+ if (r)
goto err_unuse_clocks;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index fcd973d5131e..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -42,6 +42,8 @@
#define DW_IC_SS_SCL_LCNT 0x18
#define DW_IC_FS_SCL_HCNT 0x1c
#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_HS_SCL_HCNT 0x24
+#define DW_IC_HS_SCL_LCNT 0x28
#define DW_IC_INTR_STAT 0x2c
#define DW_IC_INTR_MASK 0x30
#define DW_IC_RAW_INTR_STAT 0x34
@@ -89,12 +91,20 @@
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
-#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_TFE BIT(2)
+#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3))
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2)
+
/*
* status codes
*/
@@ -252,10 +262,15 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
{
+ dw_writel(dev, enable, DW_IC_ENABLE);
+}
+
+static void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable)
+{
int timeout = 100;
do {
- dw_writel(dev, enable, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, enable);
if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
return;
@@ -282,6 +297,28 @@ static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
return dev->get_clk_rate_khz(dev);
}
+static int i2c_dw_acquire_lock(struct dw_i2c_dev *dev)
+{
+ int ret;
+
+ if (!dev->acquire_lock)
+ return 0;
+
+ ret = dev->acquire_lock(dev);
+ if (!ret)
+ return 0;
+
+ dev_err(dev->dev, "couldn't acquire bus ownership\n");
+
+ return ret;
+}
+
+static void i2c_dw_release_lock(struct dw_i2c_dev *dev)
+{
+ if (dev->release_lock)
+ dev->release_lock(dev);
+}
+
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
@@ -293,17 +330,13 @@ static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
int i2c_dw_init(struct dw_i2c_dev *dev)
{
u32 hcnt, lcnt;
- u32 reg;
+ u32 reg, comp_param1;
u32 sda_falling_time, scl_falling_time;
int ret;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- return ret;
- }
- }
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
@@ -315,13 +348,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
} else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
return -ENODEV;
}
+ comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
+
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* set standard and fast speed deviders for high/low periods */
@@ -347,8 +381,11 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
- /* Set SCL timing parameters for fast-mode */
- if (dev->fs_hcnt && dev->fs_lcnt) {
+ /* Set SCL timing parameters for fast-mode or fast-mode plus */
+ if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) {
+ hcnt = dev->fp_hcnt;
+ lcnt = dev->fp_lcnt;
+ } else if (dev->fs_hcnt && dev->fs_lcnt) {
hcnt = dev->fs_hcnt;
lcnt = dev->fs_lcnt;
} else {
@@ -366,15 +403,40 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
+ DW_IC_CON_SPEED_HIGH) {
+ if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
+ != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
+ dev_err(dev->dev, "High Speed not supported!\n");
+ dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ } else if (dev->hs_hcnt && dev->hs_lcnt) {
+ hcnt = dev->hs_hcnt;
+ lcnt = dev->hs_lcnt;
+ dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT);
+ dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n",
+ hcnt, lcnt);
+ }
+ }
+
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (dev->sda_hold_time) {
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
+ if (!dev->sda_hold_time) {
/* Keep previous hold time setting if no one set it */
dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
}
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
} else {
dev_warn(dev->dev,
"Hardware too old to adjust SDA hold time.\n");
@@ -387,8 +449,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* configure the i2c master */
dw_writel(dev, dev->master_cfg , DW_IC_CON);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -415,27 +477,45 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_con, ic_tar = 0;
+ u32 ic_tar = 0;
+ bool enabled;
- /* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1;
+
+ if (enabled) {
+ u32 ic_status;
+
+ /*
+ * Only disable adapter if ic_tar and ic_con can't be
+ * dynamically updated
+ */
+ ic_status = dw_readl(dev, DW_IC_STATUS);
+ if (!dev->dynamic_tar_update_enabled ||
+ (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
+ !(ic_status & DW_IC_STATUS_TFE)) {
+ __i2c_dw_enable_and_wait(dev, false);
+ enabled = false;
+ }
+ }
/* if the slave address is ten bit address, enable 10BITADDR */
- ic_con = dw_readl(dev, DW_IC_CON);
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ if (dev->dynamic_tar_update_enabled) {
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register.
- * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
- * detected from registers.
+ * mode has to be enabled via bit 12 of IC_TAR register,
+ * otherwise bit 4 of IC_CON is used.
*/
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- }
+ u32 ic_con = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, ic_con, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ dw_writel(dev, ic_con, DW_IC_CON);
+ }
/*
* Set the slave (target) address and enable 10-bit addressing mode
@@ -446,8 +526,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* enforce disabled interrupts (due to HW issues) */
i2c_dw_disable_int(dev);
- /* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ if (!enabled)
+ __i2c_dw_enable(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -628,7 +708,8 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
}
/*
- * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ * Prepare controller for a transaction and start transfer by calling
+ * i2c_dw_xfer_init()
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -651,13 +732,9 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev->abort_source = 0;
dev->rx_outstanding = 0;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- goto done_nolock;
- }
- }
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ goto done_nolock;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
@@ -675,16 +752,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
goto done;
}
- /*
- * We must disable the adapter before returning and signaling the end
- * of the current transfer. Otherwise the hardware might continue
- * generating interrupts which in turn causes a race condition with
- * the following transfer. Needs some more investigation if the
- * additional interrupts are a hardware bug or this driver doesn't
- * handle them correctly yet.
- */
- __i2c_dw_enable(dev, false);
-
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
@@ -704,8 +771,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = -EIO;
done:
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
@@ -822,9 +888,19 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ || dev->msg_err) {
+ /*
+ * We must disable interruts before returning and signaling
+ * the end of the current transfer. Otherwise the hardware
+ * might continue generating interrupts for non-existent
+ * transfers.
+ */
+ i2c_dw_disable_int(dev);
+ dw_readl(dev, DW_IC_CLR_INTR);
+
complete(&dev->cmd_complete);
- else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
@@ -837,7 +913,7 @@ tx_aborted:
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
/* Disable controller */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* Disable all interupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
@@ -861,6 +937,7 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
int r;
+ u32 reg;
init_completion(&dev->cmd_complete);
@@ -868,6 +945,26 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
if (r)
return r;
+ r = i2c_dw_acquire_lock(dev);
+ if (r)
+ return r;
+
+ /*
+ * Test if dynamic TAR update is enabled in this controller by writing
+ * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
+ * field is read-only so it should not succeed
+ */
+ reg = dw_readl(dev, DW_IC_CON);
+ dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
+
+ if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
+ (reg & DW_IC_CON_10BITADDR_MASTER)) {
+ dev->dynamic_tar_update_enabled = true;
+ dev_dbg(dev->dev, "Dynamic TAR update enabled");
+ }
+
+ i2c_dw_release_lock(dev);
+
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 38493a7142ad..0d44d2ae7d4c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -26,6 +26,7 @@
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_SPEED_HIGH 0x6
#define DW_IC_CON_SPEED_MASK 0x6
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
@@ -57,10 +58,15 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
+ * @clk_freq: bus clock frequency
* @ss_hcnt: standard speed HCNT value
* @ss_lcnt: standard speed LCNT value
* @fs_hcnt: fast speed HCNT value
* @fs_lcnt: fast speed LCNT value
+ * @fp_hcnt: fast plus HCNT value
+ * @fp_lcnt: fast plus LCNT value
+ * @hs_hcnt: high speed HCNT value
+ * @hs_lcnt: high speed LCNT value
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @pm_runtime_disabled: true if pm runtime is disabled
@@ -96,6 +102,7 @@ struct dw_i2c_dev {
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
int rx_outstanding;
+ u32 clk_freq;
u32 sda_hold_time;
u32 sda_falling_time;
u32 scl_falling_time;
@@ -103,9 +110,14 @@ struct dw_i2c_dev {
u16 ss_lcnt;
u16 fs_hcnt;
u16 fs_lcnt;
+ u16 fp_hcnt;
+ u16 fp_lcnt;
+ u16 hs_hcnt;
+ u16 hs_lcnt;
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
+ bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index d656657b805c..0b42a12171f3 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -107,6 +107,8 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, NULL);
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
&dev->sda_hold_time);
+ dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, NULL);
+ dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, NULL);
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
if (id && id->driver_data)
@@ -155,7 +157,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem;
int irq, r;
- u32 clk_freq, ht = 0;
+ u32 acpi_speed, ht = 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -175,10 +177,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
/* fast mode by default because of legacy reasons */
- clk_freq = 400000;
+ dev->clk_freq = 400000;
if (pdata) {
- clk_freq = pdata->i2c_scl_freq;
+ dev->clk_freq = pdata->i2c_scl_freq;
} else {
device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns",
&ht);
@@ -187,17 +189,24 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns",
&dev->scl_falling_time);
device_property_read_u32(&pdev->dev, "clock-frequency",
- &clk_freq);
+ &dev->clk_freq);
}
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ if (acpi_speed)
+ dev->clk_freq = acpi_speed;
+
if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
/*
- * Only standard mode at 100kHz and fast mode at 400kHz are supported.
+ * Only standard mode at 100kHz, fast mode at 400kHz,
+ * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
*/
- if (clk_freq != 100000 && clk_freq != 400000) {
- dev_err(&pdev->dev, "Only 100kHz and 400kHz supported");
+ if (dev->clk_freq != 100000 && dev->clk_freq != 400000
+ && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
+ dev_err(&pdev->dev,
+ "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
return -EINVAL;
}
@@ -212,12 +221,20 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK;
- if (clk_freq == 100000)
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_STD;
- else
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+
+ dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN;
+
+ switch (dev->clk_freq) {
+ case 100000:
+ dev->master_cfg |= DW_IC_CON_SPEED_STD;
+ break;
+ case 3400000:
+ dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
+ break;
+ default:
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ }
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_plat_prepare_clk(dev, true)) {
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..50813a24c541 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
- clk_unprepare(i2c->clk);
+ clk_disable_unprepare(i2c->clk);
return ret;
}
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
{ },
};
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index b19a310bf9b3..f718ee4e3332 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -487,10 +487,8 @@ static int diolan_u2c_probe(struct usb_interface *interface,
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dev->adapter);
- if (ret < 0) {
- dev_err(&interface->dev, "failed to add I2C adapter\n");
+ if (ret < 0)
goto error_free;
- }
dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index f2eb4f76591f..8acda2aa1558 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -228,10 +228,8 @@ static int dln2_i2c_probe(struct platform_device *pdev)
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dln2->adapter);
- if (ret < 0) {
- dev_err(dev, "failed to add I2C adapter: %d\n", ret);
+ if (ret < 0)
goto out_disable;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index e253598d764c..aa336ba89aa3 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -438,7 +438,6 @@ static int efm32_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&ddata->adapter);
if (ret) {
- dev_err(&pdev->dev, "failed to add i2c adapter (%d)\n", ret);
free_irq(ddata->irq, ddata);
err_disable_clk:
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index c0e3ada02876..bea607149972 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -796,10 +796,8 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
exynos5_i2c_reset(i2c);
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ if (ret < 0)
goto err_clk;
- }
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 7c6966434ee7..ae7f3180f7e8 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -478,10 +478,8 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(priv->dev);
ret = i2c_add_adapter(&priv->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+ if (ret < 0)
goto err_runtime;
- }
return ret;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 26298af73232..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -64,6 +64,7 @@
* Broxton (SOC) 0x5ad4 32 hard yes yes yes
* Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
* Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
+ * Kaby Lake PCH-H (PCH) 0xa2a3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -145,6 +146,7 @@
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+#define SMBHSTCFG_SPD_WD 0x10
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
@@ -226,6 +228,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
+#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
struct i801_mux_config {
char *gpio_chip;
@@ -863,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ outb_p(((addr & 0x7f) << 1) |
+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+ (read_write & 0x01) : 0),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
@@ -1006,6 +1016,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
{ 0, }
};
@@ -1482,6 +1493,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
+ case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
priv->features |= FEATURE_SMBUS_PEC;
@@ -1569,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
+ if (temp & SMBHSTCFG_SPD_WD)
+ dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
@@ -1615,7 +1629,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
- dev_err(&dev->dev, "Failed to add SMBus adapter\n");
i801_acpi_remove(priv);
return err;
}
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index cdaa7be2cd1b..412b91d255ad 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -751,10 +751,8 @@ static int iic_probe(struct platform_device *ofdev)
adap->timeout = HZ;
ret = i2c_add_adapter(adap);
- if (ret < 0) {
- dev_err(&ofdev->dev, "failed to register i2c adapter\n");
+ if (ret < 0)
goto error_cleanup;
- }
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index ea20425b6972..db8e8b40569d 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1394,10 +1394,8 @@ static int img_i2c_probe(struct platform_device *pdev)
goto disable_clk;
ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (ret < 0)
goto disable_clk;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1844bc9f7cd5..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -984,11 +984,24 @@ static void i2c_imx_unprepare_recovery(struct i2c_adapter *adap)
pinctrl_select_state(i2c_imx->pinctrl, i2c_imx->pinctrl_pins_default);
}
-static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
+/*
+ * We switch SCL and SDA to their GPIO function and do some bitbanging
+ * for bus recovery. These alternative pinmux settings can be
+ * described in the device tree by a separate pinctrl state "gpio". If
+ * this is missing this is not a big problem, the only implication is
+ * that we can't do bus recovery.
+ */
+static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
struct platform_device *pdev)
{
struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo;
+ i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!i2c_imx->pinctrl || IS_ERR(i2c_imx->pinctrl)) {
+ dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(i2c_imx->pinctrl);
+ }
+
i2c_imx->pinctrl_pins_default = pinctrl_lookup_state(i2c_imx->pinctrl,
PINCTRL_STATE_DEFAULT);
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
@@ -996,12 +1009,15 @@ static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
- if (!gpio_is_valid(rinfo->sda_gpio) ||
- !gpio_is_valid(rinfo->scl_gpio) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+ if (rinfo->sda_gpio == -EPROBE_DEFER ||
+ rinfo->scl_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+ !gpio_is_valid(rinfo->scl_gpio) ||
+ IS_ERR(i2c_imx->pinctrl_pins_default) ||
+ IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
- return;
+ return 0;
}
dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
@@ -1011,6 +1027,8 @@ static void i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
rinfo->recover_bus = i2c_generic_gpio_recovery;
i2c_imx->adapter.bus_recovery_info = rinfo;
+
+ return 0;
}
static u32 i2c_imx_func(struct i2c_adapter *adapter)
@@ -1081,12 +1099,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
return ret;
}
- i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(i2c_imx->pinctrl)) {
- ret = PTR_ERR(i2c_imx->pinctrl);
- goto clk_disable;
- }
-
/* Request IRQ */
ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
pdev->name, i2c_imx);
@@ -1125,14 +1137,16 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx, IMX_I2C_I2CR);
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
- i2c_imx_init_recovery_info(i2c_imx, pdev);
+ /* Init optional bus recovery function */
+ ret = i2c_imx_init_recovery_info(i2c_imx, pdev);
+ /* Give it another chance if pinctrl used is not ready yet */
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
- if (ret < 0) {
- dev_err(&pdev->dev, "registration failed\n");
+ if (ret < 0)
goto rpm_disable;
- }
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index c2f25f19d76f..0cf1379f4e80 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -288,10 +288,8 @@ static int smbus_sch_probe(struct platform_device *dev)
"SMBus SCH adapter at %04x", sch_smba);
retval = i2c_add_adapter(&sch_adapter);
- if (retval) {
- dev_err(&dev->dev, "Couldn't register adapter!\n");
+ if (retval)
sch_smba = 0;
- }
return retval;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1c8707710098..f573448d2132 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -922,10 +922,8 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
err = i2c_add_adapter(&priv->adapter);
- if (err) {
- dev_err(&pdev->dev, "Failed to add SMBus iSMT adapter\n");
+ if (err)
return -ENODEV;
- }
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index cd9872594fe2..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
{ .compatible = "ingenic,jz4780-i2c", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
static int jz4780_i2c_probe(struct platform_device *pdev)
{
@@ -798,10 +799,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
goto err;
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add bus\n");
+ if (ret < 0)
goto err;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 586a15205e61..9b1fef455a89 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -432,10 +432,8 @@ static int i2c_lpc2k_probe(struct platform_device *pdev)
i2c->adap.dev.of_node = pdev->dev.of_node;
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add adapter!\n");
+ if (ret < 0)
goto fail_clk;
- }
dev_info(&pdev->dev, "LPC2K I2C adapter\n");
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 76e28980904f..2aa61bbbd307 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -453,7 +453,6 @@ static int meson_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0) {
- dev_err(&pdev->dev, "can't register adapter\n");
clk_unprepare(i2c->clk);
return ret;
}
@@ -473,6 +472,7 @@ static int meson_i2c_remove(struct platform_device *pdev)
static const struct of_device_id meson_i2c_match[] = {
{ .compatible = "amlogic,meson6-i2c" },
+ { .compatible = "amlogic,meson-gxbb-i2c" },
{ },
};
MODULE_DEVICE_TABLE(of, meson_i2c_match);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 48ecffecc0ed..565a49a0c564 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -737,10 +737,8 @@ static int fsl_i2c_probe(struct platform_device *op)
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
result = i2c_add_adapter(&i2c->adap);
- if (result < 0) {
- dev_err(i2c->dev, "failed to add adapter\n");
+ if (result < 0)
goto fail_add;
- }
return result;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index d9373e60be8a..4a7d9bc2142b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -786,10 +786,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c->adap, i2c);
ret = i2c_add_adapter(&i2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add i2c bus to i2c core\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 033846cdf266..5738556b6aac 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -868,7 +868,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, i2c);
err = i2c_add_numbered_adapter(adap);
if (err) {
- dev_err(dev, "Failed to add adapter (%d)\n", err);
writel(MXS_I2C_CTRL0_SFTRST,
i2c->regs + MXS_I2C_CTRL0_SET);
return err;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 42fcc9458432..374b35e7e450 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -366,7 +366,6 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
error = i2c_add_adapter(&smbus->adapter);
if (error) {
- dev_err(&smbus->adapter.dev, "Failed to register adapter.\n");
release_region(smbus->base, smbus->size);
return error;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index bcd17e8cbcb4..da6609d62848 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1046,10 +1046,8 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
adap->name, dev->virtbase);
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&adev->dev, "failed to add adapter\n");
+ if (ret)
goto err_no_adap;
- }
pm_runtime_put(&adev->dev);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ac88a524143e..34f1889a4073 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -494,10 +494,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
+ if (ret)
goto err_clk;
- }
/* add in known devices to the bus */
if (pdata) {
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon-core.c
index 30ae35146723..419b54bfc7c7 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -4,280 +4,127 @@
*
* Portions Copyright (C) 2010 - 2016 Cavium, Inc.
*
- * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
+ * This file contains the shared part of the driver for the i2c adapter in
+ * Cavium Networks' OCTEON processors and ThunderX SOCs.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-#include <linux/atomic.h>
-#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/of.h>
-
-#include <asm/octeon/octeon.h>
-
-#define DRV_NAME "i2c-octeon"
-
-/* Register offsets */
-#define SW_TWSI 0x00
-#define TWSI_INT 0x10
-#define SW_TWSI_EXT 0x18
-
-/* Controller command patterns */
-#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
-#define SW_TWSI_EIA BIT_ULL(61) /* Extended internal address */
-#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
-#define SW_TWSI_SOVR BIT_ULL(55) /* Size override */
-#define SW_TWSI_SIZE_SHIFT 52
-#define SW_TWSI_ADDR_SHIFT 40
-#define SW_TWSI_IA_SHIFT 32 /* Internal address */
-
-/* Controller opcode word (bits 60:57) */
-#define SW_TWSI_OP_SHIFT 57
-#define SW_TWSI_OP_7 (0ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_7_IA (1ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_10 (2ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_10_IA (3ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
-#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
-
-/* Controller extended opcode word (bits 34:32) */
-#define SW_TWSI_EOP_SHIFT 32
-#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
-#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
-
-/* Controller command and status bits */
-#define TWSI_CTL_CE 0x80 /* High level controller enable */
-#define TWSI_CTL_ENAB 0x40 /* Bus enable */
-#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
-#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
-#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
-#define TWSI_CTL_AAK 0x04 /* Assert ACK */
-
-/* Status values */
-#define STAT_ERROR 0x00
-#define STAT_START 0x08
-#define STAT_REP_START 0x10
-#define STAT_TXADDR_ACK 0x18
-#define STAT_TXADDR_NAK 0x20
-#define STAT_TXDATA_ACK 0x28
-#define STAT_TXDATA_NAK 0x30
-#define STAT_LOST_ARB_38 0x38
-#define STAT_RXADDR_ACK 0x40
-#define STAT_RXADDR_NAK 0x48
-#define STAT_RXDATA_ACK 0x50
-#define STAT_RXDATA_NAK 0x58
-#define STAT_SLAVE_60 0x60
-#define STAT_LOST_ARB_68 0x68
-#define STAT_SLAVE_70 0x70
-#define STAT_LOST_ARB_78 0x78
-#define STAT_SLAVE_80 0x80
-#define STAT_SLAVE_88 0x88
-#define STAT_GENDATA_ACK 0x90
-#define STAT_GENDATA_NAK 0x98
-#define STAT_SLAVE_A0 0xA0
-#define STAT_SLAVE_A8 0xA8
-#define STAT_LOST_ARB_B0 0xB0
-#define STAT_SLAVE_LOST 0xB8
-#define STAT_SLAVE_NAK 0xC0
-#define STAT_SLAVE_ACK 0xC8
-#define STAT_AD2W_ACK 0xD0
-#define STAT_AD2W_NAK 0xD8
-#define STAT_IDLE 0xF8
-
-/* TWSI_INT values */
-#define TWSI_INT_ST_INT BIT_ULL(0)
-#define TWSI_INT_TS_INT BIT_ULL(1)
-#define TWSI_INT_CORE_INT BIT_ULL(2)
-#define TWSI_INT_ST_EN BIT_ULL(4)
-#define TWSI_INT_TS_EN BIT_ULL(5)
-#define TWSI_INT_CORE_EN BIT_ULL(6)
-#define TWSI_INT_SDA_OVR BIT_ULL(8)
-#define TWSI_INT_SCL_OVR BIT_ULL(9)
-#define TWSI_INT_SDA BIT_ULL(10)
-#define TWSI_INT_SCL BIT_ULL(11)
-
-#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
-
-struct octeon_i2c {
- wait_queue_head_t queue;
- struct i2c_adapter adap;
- int irq;
- int hlc_irq; /* For cn7890 only */
- u32 twsi_freq;
- int sys_freq;
- void __iomem *twsi_base;
- struct device *dev;
- bool hlc_enabled;
- bool broken_irq_mode;
- bool broken_irq_check;
- void (*int_enable)(struct octeon_i2c *);
- void (*int_disable)(struct octeon_i2c *);
- void (*hlc_int_enable)(struct octeon_i2c *);
- void (*hlc_int_disable)(struct octeon_i2c *);
- atomic_t int_enable_cnt;
- atomic_t hlc_int_enable_cnt;
-};
-
-static void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
-{
- __raw_writeq(val, addr);
- __raw_readq(addr); /* wait for write to land */
-}
-
-/**
- * octeon_i2c_reg_write - write an I2C core register
- * @i2c: The struct octeon_i2c
- * @eop_reg: Register selector
- * @data: Value to be written
- *
- * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
- */
-static void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
-{
- u64 tmp;
- __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI);
- do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
- } while ((tmp & SW_TWSI_V) != 0);
-}
-
-#define octeon_i2c_ctl_write(i2c, val) \
- octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
-#define octeon_i2c_data_write(i2c, val) \
- octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+#include "i2c-octeon-core.h"
-/**
- * octeon_i2c_reg_read - read lower bits of an I2C core register
- * @i2c: The struct octeon_i2c
- * @eop_reg: Register selector
- *
- * Returns the data.
- *
- * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
- */
-static u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+/* interrupt service routine */
+irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
- u64 tmp;
+ struct octeon_i2c *i2c = dev_id;
- __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI);
- do {
- tmp = __raw_readq(i2c->twsi_base + SW_TWSI);
- } while ((tmp & SW_TWSI_V) != 0);
+ i2c->int_disable(i2c);
+ wake_up(&i2c->queue);
- return tmp & 0xFF;
+ return IRQ_HANDLED;
}
-#define octeon_i2c_ctl_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
-#define octeon_i2c_stat_read(i2c) \
- octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
-
-/**
- * octeon_i2c_read_int - read the TWSI_INT register
- * @i2c: The struct octeon_i2c
- *
- * Returns the value of the register.
- */
-static u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
{
- return __raw_readq(i2c->twsi_base + TWSI_INT);
+ return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
}
-/**
- * octeon_i2c_write_int - write the TWSI_INT register
- * @i2c: The struct octeon_i2c
- * @data: Value to be written
- */
-static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
+static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
{
- octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT);
-}
+ if (octeon_i2c_test_iflg(i2c))
+ return true;
-/**
- * octeon_i2c_int_enable - enable the CORE interrupt
- * @i2c: The struct octeon_i2c
- *
- * The interrupt will be asserted when there is non-STAT_IDLE state in
- * the SW_TWSI_EOP_TWSI_STAT register.
- */
-static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
-{
- octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
-}
+ if (*first) {
+ *first = false;
+ return false;
+ }
-/* disable the CORE interrupt */
-static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
-{
- /* clear TS/ST/IFLG events */
- octeon_i2c_write_int(i2c, 0);
+ /*
+ * IRQ has signaled an event but IFLG hasn't changed.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_test_iflg(i2c);
}
/**
- * octeon_i2c_int_enable78 - enable the CORE interrupt
+ * octeon_i2c_wait - wait for the IFLG to be set
* @i2c: The struct octeon_i2c
*
- * The interrupt will be asserted when there is non-STAT_IDLE state in the
- * SW_TWSI_EOP_TWSI_STAT register.
+ * Returns 0 on success, otherwise a negative errno.
*/
-static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
-{
- atomic_inc_return(&i2c->int_enable_cnt);
- enable_irq(i2c->irq);
-}
-
-static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
- int count;
+ long time_left;
+ bool first = true;
/*
- * The interrupt can be disabled in two places, but we only
- * want to make the disable_irq_nosync() call once, so keep
- * track with the atomic variable.
+ * Some chip revisions don't assert the irq in the interrupt
+ * controller. So we must poll for the IFLG change.
*/
- count = atomic_dec_if_positive(cnt);
- if (count >= 0)
- disable_irq_nosync(irq);
+ if (i2c->broken_irq_mode) {
+ u64 end = get_jiffies_64() + i2c->adap.timeout;
+
+ while (!octeon_i2c_test_iflg(i2c) &&
+ time_before64(get_jiffies_64(), end))
+ usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+
+ return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
+ }
+
+ i2c->int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ i2c->adap.timeout);
+ i2c->int_disable(i2c);
+
+ if (i2c->broken_irq_check && !time_left &&
+ octeon_i2c_test_iflg(i2c)) {
+ dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+ i2c->broken_irq_mode = true;
+ return 0;
+ }
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
}
-/* disable the CORE interrupt */
-static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
{
- __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+ return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
}
-/**
- * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
- * @i2c: The struct octeon_i2c
- *
- * The interrupt will be asserted when there is non-STAT_IDLE state in
- * the SW_TWSI_EOP_TWSI_STAT register.
- */
-static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
{
- atomic_inc_return(&i2c->hlc_int_enable_cnt);
- enable_irq(i2c->hlc_irq);
+ /* check if valid bit is cleared */
+ if (octeon_i2c_hlc_test_valid(i2c))
+ return true;
+
+ if (*first) {
+ *first = false;
+ return false;
+ }
+
+ /*
+ * IRQ has signaled an event but valid bit isn't cleared.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_hlc_test_valid(i2c);
}
-/* disable the ST interrupt */
-static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
{
- __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+ /* clear ST/TS events, listen for neither */
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
}
/*
@@ -321,83 +168,41 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
-/* interrupt service routine */
-static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
-{
- struct octeon_i2c *i2c = dev_id;
-
- i2c->int_disable(i2c);
- wake_up(&i2c->queue);
-
- return IRQ_HANDLED;
-}
-
-/* HLC interrupt service routine */
-static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
-{
- struct octeon_i2c *i2c = dev_id;
-
- i2c->hlc_int_disable(i2c);
- wake_up(&i2c->queue);
-
- return IRQ_HANDLED;
-}
-
-static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
-{
- return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
-}
-
-static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
-{
- if (octeon_i2c_test_iflg(i2c))
- return true;
-
- if (*first) {
- *first = false;
- return false;
- }
-
- /*
- * IRQ has signaled an event but IFLG hasn't changed.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c);
-}
-
/**
- * octeon_i2c_wait - wait for the IFLG to be set
+ * octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns 0 on success, otherwise -ETIMEDOUT.
*/
-static int octeon_i2c_wait(struct octeon_i2c *i2c)
+static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
- long time_left;
- bool first = 1;
+ bool first = true;
+ int time_left;
/*
- * Some chip revisions don't assert the irq in the interrupt
- * controller. So we must poll for the IFLG change.
+ * Some cn38xx boards don't assert the irq in the interrupt
+ * controller. So we must poll for the valid bit change.
*/
if (i2c->broken_irq_mode) {
u64 end = get_jiffies_64() + i2c->adap.timeout;
- while (!octeon_i2c_test_iflg(i2c) &&
+ while (!octeon_i2c_hlc_test_valid(i2c) &&
time_before64(get_jiffies_64(), end))
usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
+ return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
}
- i2c->int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+ i2c->hlc_int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue,
+ octeon_i2c_hlc_test_ready(i2c, &first),
i2c->adap.timeout);
- i2c->int_disable(i2c);
+ i2c->hlc_int_disable(i2c);
+ if (!time_left)
+ octeon_i2c_hlc_int_clear(i2c);
if (i2c->broken_irq_check && !time_left &&
- octeon_i2c_test_iflg(i2c)) {
+ octeon_i2c_hlc_test_valid(i2c)) {
dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
i2c->broken_irq_mode = true;
return 0;
@@ -405,13 +210,21 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
if (!time_left)
return -ETIMEDOUT;
-
return 0;
}
static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
{
- u8 stat = octeon_i2c_stat_read(i2c);
+ u8 stat;
+
+ /*
+ * This is ugly... in HLC mode the status is not in the status register
+ * but in the lower 8 bits of SW_TWSI.
+ */
+ if (i2c->hlc_enabled)
+ stat = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+ else
+ stat = octeon_i2c_stat_read(i2c);
switch (stat) {
/* Everything is fine */
@@ -470,83 +283,157 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
}
}
-static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
+static int octeon_i2c_recovery(struct octeon_i2c *i2c)
{
- return (__raw_readq(i2c->twsi_base + SW_TWSI) & SW_TWSI_V) == 0;
+ int ret;
+
+ ret = i2c_recover_bus(&i2c->adap);
+ if (ret)
+ /* recover failed, try hardware re-init */
+ ret = octeon_i2c_init_lowlevel(i2c);
+ return ret;
}
-static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
+/**
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_start(struct octeon_i2c *i2c)
{
- /* check if valid bit is cleared */
- if (octeon_i2c_hlc_test_valid(i2c))
- return true;
+ int ret;
+ u8 stat;
- if (*first) {
- *first = false;
- return false;
- }
+ octeon_i2c_hlc_disable(i2c);
- /*
- * IRQ has signaled an event but valid bit isn't cleared.
- * Sleep and retry once.
- */
- usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
- return octeon_i2c_hlc_test_valid(i2c);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
+ ret = octeon_i2c_wait(i2c);
+ if (ret)
+ goto error;
+
+ stat = octeon_i2c_stat_read(i2c);
+ if (stat == STAT_START || stat == STAT_REP_START)
+ /* START successful, bail out */
+ return 0;
+
+error:
+ /* START failed, try to recover */
+ ret = octeon_i2c_recovery(i2c);
+ return (ret) ? ret : -EAGAIN;
}
-static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+/* send STOP to the bus */
+static void octeon_i2c_stop(struct octeon_i2c *i2c)
{
- octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
}
-static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
+/**
+ * octeon_i2c_read - receive data from the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the location to store the data
+ * @rlength: Length of the data
+ * @recv_len: flag for length byte
+ *
+ * The address is sent over the bus, then the data is read.
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
+ u8 *data, u16 *rlength, bool recv_len)
{
- /* clear ST/TS events, listen for neither */
- octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
+ int i, result, length = *rlength;
+ bool final_read = false;
+
+ octeon_i2c_data_write(i2c, (target << 1) | 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
+
+ /* address OK ? */
+ result = octeon_i2c_check_status(i2c, false);
+ if (result)
+ return result;
+
+ for (i = 0; i < length; i++) {
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
+ final_read = true;
+
+ /* clear iflg to allow next event */
+ if (final_read)
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+ else
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
+
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
+
+ data[i] = octeon_i2c_data_read(i2c, &result);
+ if (result)
+ return result;
+ if (recv_len && i == 0) {
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+ return -EPROTO;
+ length += data[i];
+ }
+
+ result = octeon_i2c_check_status(i2c, final_read);
+ if (result)
+ return result;
+ }
+ *rlength = length;
+ return 0;
}
/**
- * octeon_i2c_hlc_wait - wait for an HLC operation to complete
+ * octeon_i2c_write - send data to the bus via low-level controller
* @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the data to be sent
+ * @length: Length of the data
*
- * Returns 0 on success, otherwise -ETIMEDOUT.
+ * The address is sent over the bus, then the data.
+ *
+ * Returns 0 on success, otherwise a negative errno.
*/
-static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
+static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
+ const u8 *data, int length)
{
- bool first = 1;
- int time_left;
+ int i, result;
- /*
- * Some cn38xx boards don't assert the irq in the interrupt
- * controller. So we must poll for the valid bit change.
- */
- if (i2c->broken_irq_mode) {
- u64 end = get_jiffies_64() + i2c->adap.timeout;
+ octeon_i2c_data_write(i2c, target << 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- while (!octeon_i2c_hlc_test_valid(i2c) &&
- time_before64(get_jiffies_64(), end))
- usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
- return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
- }
+ for (i = 0; i < length; i++) {
+ result = octeon_i2c_check_status(i2c, false);
+ if (result)
+ return result;
- i2c->hlc_int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue,
- octeon_i2c_hlc_test_ready(i2c, &first),
- i2c->adap.timeout);
- i2c->hlc_int_disable(i2c);
- if (!time_left)
- octeon_i2c_hlc_int_clear(i2c);
+ octeon_i2c_data_write(i2c, data[i]);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- if (i2c->broken_irq_check && !time_left &&
- octeon_i2c_hlc_test_valid(i2c)) {
- dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
- i2c->broken_irq_mode = true;
- return 0;
+ result = octeon_i2c_wait(i2c);
+ if (result)
+ return result;
}
- if (!time_left)
- return -ETIMEDOUT;
return 0;
}
@@ -570,20 +457,20 @@ static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
else
cmd |= SW_TWSI_OP_7;
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
+ return octeon_i2c_check_status(i2c, false);
for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[0].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -620,19 +507,17 @@ static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
ext |= (u64)msgs[0].buf[j] << (8 * i);
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
}
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
-
- ret = octeon_i2c_check_status(i2c, false);
+ return octeon_i2c_check_status(i2c, false);
err:
return ret;
@@ -663,27 +548,27 @@ static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs
cmd |= SW_TWSI_EIA;
ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
} else {
cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
}
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
+ return octeon_i2c_check_status(i2c, false);
for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--)
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
if (msgs[1].len > 4) {
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT(i2c));
for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
}
@@ -730,27 +615,85 @@ static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msg
set_ext = true;
}
if (set_ext)
- octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT(i2c));
octeon_i2c_hlc_int_clear(i2c);
- octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI(i2c));
ret = octeon_i2c_hlc_wait(i2c);
if (ret)
goto err;
- cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
if ((cmd & SW_TWSI_R) == 0)
- return -EAGAIN;
-
- ret = octeon_i2c_check_status(i2c, false);
+ return octeon_i2c_check_status(i2c, false);
err:
return ret;
}
+/**
+ * octeon_i2c_xfer - The driver's master_xfer function
+ * @adap: Pointer to the i2c_adapter structure
+ * @msgs: Pointer to the messages to be processed
+ * @num: Length of the MSGS array
+ *
+ * Returns the number of messages processed, or a negative errno on failure.
+ */
+int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ int i, ret = 0;
+
+ if (num == 1) {
+ if (msgs[0].len > 0 && msgs[0].len <= 8) {
+ if (msgs[0].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_write(i2c, msgs);
+ goto out;
+ }
+ } else if (num == 2) {
+ if ((msgs[0].flags & I2C_M_RD) == 0 &&
+ (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
+ msgs[0].len > 0 && msgs[0].len <= 2 &&
+ msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[0].addr == msgs[1].addr) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ }
+ }
+
+ for (i = 0; ret == 0 && i < num; i++) {
+ struct i2c_msg *pmsg = &msgs[i];
+
+ /* zero-length messages are not supported */
+ if (!pmsg->len) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = octeon_i2c_start(i2c);
+ if (ret)
+ return ret;
+
+ if (pmsg->flags & I2C_M_RD)
+ ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
+ &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
+ else
+ ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
+ pmsg->len);
+ }
+ octeon_i2c_stop(i2c);
+out:
+ return (ret != 0) ? ret : num;
+}
+
/* calculate and set clock divisors */
-static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
+void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -791,7 +734,7 @@ static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
}
-static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
+int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
{
u8 status = 0;
int tries;
@@ -818,219 +761,6 @@ static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
return 0;
}
-static int octeon_i2c_recovery(struct octeon_i2c *i2c)
-{
- int ret;
-
- ret = i2c_recover_bus(&i2c->adap);
- if (ret)
- /* recover failed, try hardware re-init */
- ret = octeon_i2c_init_lowlevel(i2c);
- return ret;
-}
-
-/**
- * octeon_i2c_start - send START to the bus
- * @i2c: The struct octeon_i2c
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_start(struct octeon_i2c *i2c)
-{
- int ret;
- u8 stat;
-
- octeon_i2c_hlc_disable(i2c);
-
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
- ret = octeon_i2c_wait(i2c);
- if (ret)
- goto error;
-
- stat = octeon_i2c_stat_read(i2c);
- if (stat == STAT_START || stat == STAT_REP_START)
- /* START successful, bail out */
- return 0;
-
-error:
- /* START failed, try to recover */
- ret = octeon_i2c_recovery(i2c);
- return (ret) ? ret : -EAGAIN;
-}
-
-/* send STOP to the bus */
-static void octeon_i2c_stop(struct octeon_i2c *i2c)
-{
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
-}
-
-/**
- * octeon_i2c_write - send data to the bus via low-level controller
- * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the data to be sent
- * @length: Length of the data
- *
- * The address is sent over the bus, then the data.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
- const u8 *data, int length)
-{
- int i, result;
-
- octeon_i2c_data_write(i2c, target << 1);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- for (i = 0; i < length; i++) {
- result = octeon_i2c_check_status(i2c, false);
- if (result)
- return result;
-
- octeon_i2c_data_write(i2c, data[i]);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
- }
-
- return 0;
-}
-
-/**
- * octeon_i2c_read - receive data from the bus via low-level controller
- * @i2c: The struct octeon_i2c
- * @target: Target address
- * @data: Pointer to the location to store the data
- * @rlength: Length of the data
- * @recv_len: flag for length byte
- *
- * The address is sent over the bus, then the data is read.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
- u8 *data, u16 *rlength, bool recv_len)
-{
- int i, result, length = *rlength;
- bool final_read = false;
-
- octeon_i2c_data_write(i2c, (target << 1) | 1);
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- /* address OK ? */
- result = octeon_i2c_check_status(i2c, false);
- if (result)
- return result;
-
- for (i = 0; i < length; i++) {
- /*
- * For the last byte to receive TWSI_CTL_AAK must not be set.
- *
- * A special case is I2C_M_RECV_LEN where we don't know the
- * additional length yet. If recv_len is set we assume we're
- * not reading the final byte and therefore need to set
- * TWSI_CTL_AAK.
- */
- if ((i + 1 == length) && !(recv_len && i == 0))
- final_read = true;
-
- /* clear iflg to allow next event */
- if (final_read)
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- else
- octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
-
- result = octeon_i2c_wait(i2c);
- if (result)
- return result;
-
- data[i] = octeon_i2c_data_read(i2c);
- if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
- return -EPROTO;
- length += data[i];
- }
-
- result = octeon_i2c_check_status(i2c, final_read);
- if (result)
- return result;
- }
- *rlength = length;
- return 0;
-}
-
-/**
- * octeon_i2c_xfer - The driver's master_xfer function
- * @adap: Pointer to the i2c_adapter structure
- * @msgs: Pointer to the messages to be processed
- * @num: Length of the MSGS array
- *
- * Returns the number of messages processed, or a negative errno on failure.
- */
-static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- int i, ret = 0;
-
- if (num == 1) {
- if (msgs[0].len > 0 && msgs[0].len <= 8) {
- if (msgs[0].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_write(i2c, msgs);
- goto out;
- }
- } else if (num == 2) {
- if ((msgs[0].flags & I2C_M_RD) == 0 &&
- (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
- msgs[0].len > 0 && msgs[0].len <= 2 &&
- msgs[1].len > 0 && msgs[1].len <= 8 &&
- msgs[0].addr == msgs[1].addr) {
- if (msgs[1].flags & I2C_M_RD)
- ret = octeon_i2c_hlc_comp_read(i2c, msgs);
- else
- ret = octeon_i2c_hlc_comp_write(i2c, msgs);
- goto out;
- }
- }
-
- for (i = 0; ret == 0 && i < num; i++) {
- struct i2c_msg *pmsg = &msgs[i];
-
- /* zero-length messages are not supported */
- if (!pmsg->len) {
- ret = -EOPNOTSUPP;
- break;
- }
-
- ret = octeon_i2c_start(i2c);
- if (ret)
- return ret;
-
- if (pmsg->flags & I2C_M_RD)
- ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
- &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
- else
- ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
- }
- octeon_i2c_stop(i2c);
-out:
- return (ret != 0) ? ret : num;
-}
-
static int octeon_i2c_get_scl(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
@@ -1044,7 +774,7 @@ static void octeon_i2c_set_scl(struct i2c_adapter *adap, int val)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
+ octeon_i2c_write_int(i2c, val ? 0 : TWSI_INT_SCL_OVR);
}
static int octeon_i2c_get_sda(struct i2c_adapter *adap)
@@ -1060,13 +790,14 @@ static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ octeon_i2c_hlc_disable(i2c);
+
/*
- * The stop resets the state machine, does not _transmit_ STOP unless
- * engine was active.
+ * Bring control register to a good state regardless
+ * of HLC state.
*/
- octeon_i2c_stop(i2c);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- octeon_i2c_hlc_disable(i2c);
octeon_i2c_write_int(i2c, 0);
}
@@ -1074,10 +805,19 @@ static void octeon_i2c_unprepare_recovery(struct i2c_adapter *adap)
{
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ /*
+ * Generate STOP to finish the unfinished transaction.
+ * Can't generate STOP via the TWSI CTL register
+ * since it could bring the TWSI controller into an inoperable state.
+ */
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
+ udelay(5);
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
+ udelay(5);
octeon_i2c_write_int(i2c, 0);
}
-static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
+struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
.recover_bus = i2c_generic_scl_recovery,
.get_scl = octeon_i2c_get_scl,
.set_scl = octeon_i2c_set_scl,
@@ -1085,171 +825,3 @@ static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
.prepare_recovery = octeon_i2c_prepare_recovery,
.unprepare_recovery = octeon_i2c_unprepare_recovery,
};
-
-static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
-}
-
-static const struct i2c_algorithm octeon_i2c_algo = {
- .master_xfer = octeon_i2c_xfer,
- .functionality = octeon_i2c_functionality,
-};
-
-static struct i2c_adapter octeon_i2c_ops = {
- .owner = THIS_MODULE,
- .name = "OCTEON adapter",
- .algo = &octeon_i2c_algo,
-};
-
-static int octeon_i2c_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- int irq, result = 0, hlc_irq = 0;
- struct resource *res_mem;
- struct octeon_i2c *i2c;
- bool cn78xx_style;
-
- cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
- if (cn78xx_style) {
- hlc_irq = platform_get_irq(pdev, 0);
- if (hlc_irq < 0)
- return hlc_irq;
-
- irq = platform_get_irq(pdev, 2);
- if (irq < 0)
- return irq;
- } else {
- /* All adaptors have an irq. */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- }
-
- i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
- if (!i2c) {
- result = -ENOMEM;
- goto out;
- }
- i2c->dev = &pdev->dev;
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
- if (IS_ERR(i2c->twsi_base)) {
- result = PTR_ERR(i2c->twsi_base);
- goto out;
- }
-
- /*
- * "clock-rate" is a legacy binding, the official binding is
- * "clock-frequency". Try the official one first and then
- * fall back if it doesn't exist.
- */
- if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
- of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
- dev_err(i2c->dev,
- "no I2C 'clock-rate' or 'clock-frequency' property\n");
- result = -ENXIO;
- goto out;
- }
-
- i2c->sys_freq = octeon_get_io_clock_rate();
-
- init_waitqueue_head(&i2c->queue);
-
- i2c->irq = irq;
-
- if (cn78xx_style) {
- i2c->hlc_irq = hlc_irq;
-
- i2c->int_enable = octeon_i2c_int_enable78;
- i2c->int_disable = octeon_i2c_int_disable78;
- i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
- i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
-
- irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
- irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
-
- result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
- octeon_i2c_hlc_isr78, 0,
- DRV_NAME, i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto out;
- }
- } else {
- i2c->int_enable = octeon_i2c_int_enable;
- i2c->int_disable = octeon_i2c_int_disable;
- i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
- i2c->hlc_int_disable = octeon_i2c_int_disable;
- }
-
- result = devm_request_irq(&pdev->dev, i2c->irq,
- octeon_i2c_isr, 0, DRV_NAME, i2c);
- if (result < 0) {
- dev_err(i2c->dev, "failed to attach interrupt\n");
- goto out;
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN38XX))
- i2c->broken_irq_check = true;
-
- result = octeon_i2c_init_lowlevel(i2c);
- if (result) {
- dev_err(i2c->dev, "init low level failed\n");
- goto out;
- }
-
- octeon_i2c_set_clock(i2c);
-
- i2c->adap = octeon_i2c_ops;
- i2c->adap.timeout = msecs_to_jiffies(2);
- i2c->adap.retries = 5;
- i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
- i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = node;
- i2c_set_adapdata(&i2c->adap, i2c);
- platform_set_drvdata(pdev, i2c);
-
- result = i2c_add_adapter(&i2c->adap);
- if (result < 0) {
- dev_err(i2c->dev, "failed to add adapter\n");
- goto out;
- }
- dev_info(i2c->dev, "probed\n");
- return 0;
-
-out:
- return result;
-};
-
-static int octeon_i2c_remove(struct platform_device *pdev)
-{
- struct octeon_i2c *i2c = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&i2c->adap);
- return 0;
-};
-
-static const struct of_device_id octeon_i2c_match[] = {
- { .compatible = "cavium,octeon-3860-twsi", },
- { .compatible = "cavium,octeon-7890-twsi", },
- {},
-};
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
-
-static struct platform_driver octeon_i2c_driver = {
- .probe = octeon_i2c_probe,
- .remove = octeon_i2c_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = octeon_i2c_match,
- },
-};
-
-module_platform_driver(octeon_i2c_driver);
-
-MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
-MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
new file mode 100644
index 000000000000..1db7c835a454
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -0,0 +1,216 @@
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+/* Controller command patterns */
+#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
+#define SW_TWSI_EIA BIT_ULL(61) /* Extended internal address */
+#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
+#define SW_TWSI_SOVR BIT_ULL(55) /* Size override */
+#define SW_TWSI_SIZE_SHIFT 52
+#define SW_TWSI_ADDR_SHIFT 40
+#define SW_TWSI_IA_SHIFT 32 /* Internal address */
+
+/* Controller opcode word (bits 60:57) */
+#define SW_TWSI_OP_SHIFT 57
+#define SW_TWSI_OP_7 (0ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_7_IA (1ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10 (2ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10_IA (3ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
+
+/* Controller extended opcode word (bits 34:32) */
+#define SW_TWSI_EOP_SHIFT 32
+#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
+
+/* Controller command and status bits */
+#define TWSI_CTL_CE 0x80 /* High level controller enable */
+#define TWSI_CTL_ENAB 0x40 /* Bus enable */
+#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
+#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
+#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
+#define TWSI_CTL_AAK 0x04 /* Assert ACK */
+
+/* Status values */
+#define STAT_ERROR 0x00
+#define STAT_START 0x08
+#define STAT_REP_START 0x10
+#define STAT_TXADDR_ACK 0x18
+#define STAT_TXADDR_NAK 0x20
+#define STAT_TXDATA_ACK 0x28
+#define STAT_TXDATA_NAK 0x30
+#define STAT_LOST_ARB_38 0x38
+#define STAT_RXADDR_ACK 0x40
+#define STAT_RXADDR_NAK 0x48
+#define STAT_RXDATA_ACK 0x50
+#define STAT_RXDATA_NAK 0x58
+#define STAT_SLAVE_60 0x60
+#define STAT_LOST_ARB_68 0x68
+#define STAT_SLAVE_70 0x70
+#define STAT_LOST_ARB_78 0x78
+#define STAT_SLAVE_80 0x80
+#define STAT_SLAVE_88 0x88
+#define STAT_GENDATA_ACK 0x90
+#define STAT_GENDATA_NAK 0x98
+#define STAT_SLAVE_A0 0xA0
+#define STAT_SLAVE_A8 0xA8
+#define STAT_LOST_ARB_B0 0xB0
+#define STAT_SLAVE_LOST 0xB8
+#define STAT_SLAVE_NAK 0xC0
+#define STAT_SLAVE_ACK 0xC8
+#define STAT_AD2W_ACK 0xD0
+#define STAT_AD2W_NAK 0xD8
+#define STAT_IDLE 0xF8
+
+/* TWSI_INT values */
+#define TWSI_INT_ST_INT BIT_ULL(0)
+#define TWSI_INT_TS_INT BIT_ULL(1)
+#define TWSI_INT_CORE_INT BIT_ULL(2)
+#define TWSI_INT_ST_EN BIT_ULL(4)
+#define TWSI_INT_TS_EN BIT_ULL(5)
+#define TWSI_INT_CORE_EN BIT_ULL(6)
+#define TWSI_INT_SDA_OVR BIT_ULL(8)
+#define TWSI_INT_SCL_OVR BIT_ULL(9)
+#define TWSI_INT_SDA BIT_ULL(10)
+#define TWSI_INT_SCL BIT_ULL(11)
+
+#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
+
+/* Register offsets */
+struct octeon_i2c_reg_offset {
+ unsigned int sw_twsi;
+ unsigned int twsi_int;
+ unsigned int sw_twsi_ext;
+};
+
+#define SW_TWSI(x) (x->roff.sw_twsi)
+#define TWSI_INT(x) (x->roff.twsi_int)
+#define SW_TWSI_EXT(x) (x->roff.sw_twsi_ext)
+
+struct octeon_i2c {
+ wait_queue_head_t queue;
+ struct i2c_adapter adap;
+ struct octeon_i2c_reg_offset roff;
+ struct clk *clk;
+ int irq;
+ int hlc_irq; /* For cn7890 only */
+ u32 twsi_freq;
+ int sys_freq;
+ void __iomem *twsi_base;
+ struct device *dev;
+ bool hlc_enabled;
+ bool broken_irq_mode;
+ bool broken_irq_check;
+ void (*int_enable)(struct octeon_i2c *);
+ void (*int_disable)(struct octeon_i2c *);
+ void (*hlc_int_enable)(struct octeon_i2c *);
+ void (*hlc_int_disable)(struct octeon_i2c *);
+ atomic_t int_enable_cnt;
+ atomic_t hlc_int_enable_cnt;
+#if IS_ENABLED(CONFIG_I2C_THUNDERX)
+ struct msix_entry i2c_msix;
+#endif
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
+};
+
+static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
+{
+ __raw_writeq(val, addr);
+ __raw_readq(addr); /* wait for write to land */
+}
+
+/**
+ * octeon_i2c_reg_write - write an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ * @data: Value to be written
+ *
+ * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
+ */
+static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
+{
+ u64 tmp;
+
+ __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
+
+ readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V,
+ I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout);
+}
+
+#define octeon_i2c_ctl_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
+#define octeon_i2c_data_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+
+/**
+ * octeon_i2c_reg_read - read lower bits of an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ *
+ * Returns the data.
+ *
+ * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
+ */
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+ int *error)
+{
+ u64 tmp;
+ int ret;
+
+ __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+
+ ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
+ tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
+ i2c->adap.timeout);
+ if (error)
+ *error = ret;
+ return tmp & 0xFF;
+}
+
+#define octeon_i2c_ctl_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
+#define octeon_i2c_stat_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
+
+/**
+ * octeon_i2c_read_int - read the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns the value of the register.
+ */
+static inline u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+{
+ return __raw_readq(i2c->twsi_base + TWSI_INT(i2c));
+}
+
+/**
+ * octeon_i2c_write_int - write the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ * @data: Value to be written
+ */
+static inline void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
+{
+ octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT(i2c));
+}
+
+/* Prototypes */
+irqreturn_t octeon_i2c_isr(int irq, void *dev_id);
+int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c);
+void octeon_i2c_set_clock(struct octeon_i2c *i2c);
+extern struct i2c_bus_recovery_info octeon_i2c_recovery_info;
diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c
new file mode 100644
index 000000000000..917524ce6890
--- /dev/null
+++ b/drivers/i2c/busses/i2c-octeon-platdrv.c
@@ -0,0 +1,286 @@
+/*
+ * (C) Copyright 2009-2010
+ * Nokia Siemens Networks, michael.lawnick.ext@nsn.com
+ *
+ * Portions Copyright (C) 2010 - 2016 Cavium, Inc.
+ *
+ * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/octeon/octeon.h>
+#include "i2c-octeon-core.h"
+
+#define DRV_NAME "i2c-octeon"
+
+/**
+ * octeon_i2c_int_enable - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
+{
+ /* clear TS/ST/IFLG events */
+ octeon_i2c_write_int(i2c, 0);
+}
+
+/**
+ * octeon_i2c_int_enable78 - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->int_enable_cnt);
+ enable_irq(i2c->irq);
+}
+
+static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+{
+ int count;
+
+ /*
+ * The interrupt can be disabled in two places, but we only
+ * want to make the disable_irq_nosync() call once, so keep
+ * track with the atomic variable.
+ */
+ count = atomic_dec_if_positive(cnt);
+ if (count >= 0)
+ disable_irq_nosync(irq);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+}
+
+/**
+ * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->hlc_int_enable_cnt);
+ enable_irq(i2c->hlc_irq);
+}
+
+/* disable the ST interrupt */
+static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+}
+
+/* HLC interrupt service routine */
+static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
+{
+ struct octeon_i2c *i2c = dev_id;
+
+ i2c->hlc_int_disable(i2c);
+ wake_up(&i2c->queue);
+
+ return IRQ_HANDLED;
+}
+
+static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+}
+
+static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm octeon_i2c_algo = {
+ .master_xfer = octeon_i2c_xfer,
+ .functionality = octeon_i2c_functionality,
+};
+
+static struct i2c_adapter octeon_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "OCTEON adapter",
+ .algo = &octeon_i2c_algo,
+};
+
+static int octeon_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int irq, result = 0, hlc_irq = 0;
+ struct resource *res_mem;
+ struct octeon_i2c *i2c;
+ bool cn78xx_style;
+
+ cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
+ if (cn78xx_style) {
+ hlc_irq = platform_get_irq(pdev, 0);
+ if (hlc_irq < 0)
+ return hlc_irq;
+
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ } else {
+ /* All adaptors have an irq. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ }
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c) {
+ result = -ENOMEM;
+ goto out;
+ }
+ i2c->dev = &pdev->dev;
+
+ i2c->roff.sw_twsi = 0x00;
+ i2c->roff.twsi_int = 0x10;
+ i2c->roff.sw_twsi_ext = 0x18;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(i2c->twsi_base)) {
+ result = PTR_ERR(i2c->twsi_base);
+ goto out;
+ }
+
+ /*
+ * "clock-rate" is a legacy binding, the official binding is
+ * "clock-frequency". Try the official one first and then
+ * fall back if it doesn't exist.
+ */
+ if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
+ of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
+ dev_err(i2c->dev,
+ "no I2C 'clock-rate' or 'clock-frequency' property\n");
+ result = -ENXIO;
+ goto out;
+ }
+
+ i2c->sys_freq = octeon_get_io_clock_rate();
+
+ init_waitqueue_head(&i2c->queue);
+
+ i2c->irq = irq;
+
+ if (cn78xx_style) {
+ i2c->hlc_irq = hlc_irq;
+
+ i2c->int_enable = octeon_i2c_int_enable78;
+ i2c->int_disable = octeon_i2c_int_disable78;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
+ i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
+
+ irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
+ irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
+
+ result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
+ octeon_i2c_hlc_isr78, 0,
+ DRV_NAME, i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ goto out;
+ }
+ } else {
+ i2c->int_enable = octeon_i2c_int_enable;
+ i2c->int_disable = octeon_i2c_int_disable;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
+ i2c->hlc_int_disable = octeon_i2c_int_disable;
+ }
+
+ result = devm_request_irq(&pdev->dev, i2c->irq,
+ octeon_i2c_isr, 0, DRV_NAME, i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ goto out;
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ i2c->broken_irq_check = true;
+
+ result = octeon_i2c_init_lowlevel(i2c);
+ if (result) {
+ dev_err(i2c->dev, "init low level failed\n");
+ goto out;
+ }
+
+ octeon_i2c_set_clock(i2c);
+
+ i2c->adap = octeon_i2c_ops;
+ i2c->adap.timeout = msecs_to_jiffies(2);
+ i2c->adap.retries = 5;
+ i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
+ i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = node;
+ i2c_set_adapdata(&i2c->adap, i2c);
+ platform_set_drvdata(pdev, i2c);
+
+ result = i2c_add_adapter(&i2c->adap);
+ if (result < 0)
+ goto out;
+ dev_info(i2c->dev, "probed\n");
+ return 0;
+
+out:
+ return result;
+};
+
+static int octeon_i2c_remove(struct platform_device *pdev)
+{
+ struct octeon_i2c *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+ return 0;
+};
+
+static const struct of_device_id octeon_i2c_match[] = {
+ { .compatible = "cavium,octeon-3860-twsi", },
+ { .compatible = "cavium,octeon-7890-twsi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
+static struct platform_driver octeon_i2c_driver = {
+ .probe = octeon_i2c_probe,
+ .remove = octeon_i2c_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = octeon_i2c_match,
+ },
+};
+
+module_platform_driver(octeon_i2c_driver);
+
+MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
+MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index ab1279b8e240..c7da0c42baee 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1425,10 +1425,8 @@ omap_i2c_probe(struct platform_device *pdev)
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
- if (r) {
- dev_err(omap->dev, "failure adding adapter\n");
+ if (r)
goto err_unuse_clocks;
- }
dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
major, minor, omap->speed);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 23d1c167b5d7..c2268cdf38e8 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -694,7 +694,6 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
retval = i2c_add_adapter(adap);
if (retval) {
- dev_err(&dev->dev, "Couldn't register adapter!\n");
kfree(adapdata);
kfree(adap);
release_region(smba, SMBIOSIZE);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 2c40edbf6224..217c78711d65 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -329,10 +329,8 @@ static int pmcmsptwi_probe(struct platform_device *pldev)
i2c_set_adapdata(&pmcmsptwi_adapter, &pmcmsptwi_data);
rc = i2c_add_adapter(&pmcmsptwi_adapter);
- if (rc) {
- dev_err(&pldev->dev, "Unable to register I2C adapter\n");
+ if (rc)
goto ret_unmap;
- }
return 0;
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 7ea67aa46fb7..fd5f9d2bf6d9 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -714,10 +714,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
/* Register this adapter with the I2C subsystem */
ret = i2c_add_numbered_adapter(&alg_data->adapter);
- if (ret < 0) {
- dev_err(&pdev->dev, "I2C: Failed to add bus\n");
+ if (ret < 0)
goto out_clock;
- }
dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
alg_data->adapter.name, res->start, alg_data->irq);
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index 82b6f02544da..0c8b1571886d 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -212,11 +212,8 @@ static int puv3_i2c_probe(struct platform_device *pdev)
adapter->nr = pdev->id;
rc = i2c_add_numbered_adapter(adapter);
- if (rc) {
- dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
- adapter->name);
+ if (rc)
goto fail_add_adapter;
- }
dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
return 0;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 0d351954db02..e28b825b0433 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1292,10 +1292,8 @@ static int i2c_pxa_probe(struct platform_device *dev)
#endif
ret = i2c_add_numbered_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&dev->dev, "failed to add bus: %d\n", ret);
+ if (ret < 0)
goto ereqirq;
- }
platform_set_drvdata(dev, i2c);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9bd849dacee8..726615e54f2a 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -802,6 +802,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,i2c-r8a7793", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
+ { .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
{},
};
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@@ -875,10 +876,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
- if (ret < 0) {
- dev_err(dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto out_pm_disable;
- }
dev_info(dev, "probed\n");
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index d7e3af671543..6263ea82d6ac 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -383,10 +383,8 @@ static int riic_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, riic);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5c5b7cada8be..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -58,7 +58,7 @@ enum {
#define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */
#define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */
-#define REG_CON_TUNING_MASK GENMASK(15, 8)
+#define REG_CON_TUNING_MASK GENMASK_ULL(15, 8)
#define REG_CON_SDA_CFG(cfg) ((cfg) << 8)
#define REG_CON_STA_CFG(cfg) ((cfg) << 12)
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
@@ -742,7 +744,7 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
struct i2c_timings *t,
struct rk3x_i2c_calced_timings *t_calc)
{
- unsigned long min_low_ns, min_high_ns, min_total_ns;
+ unsigned long min_low_ns, min_high_ns;
unsigned long min_setup_start_ns, min_setup_data_ns;
unsigned long min_setup_stop_ns, max_hold_data_ns;
@@ -793,7 +795,6 @@ static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
/* These are the min dividers needed for min hold times. */
min_div_for_hold = (min_low_div + min_high_div);
- min_total_ns = min_low_ns + min_high_ns;
/*
* This is the maximum divider so we don't go over the maximum.
@@ -1312,10 +1313,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
rk3x_i2c_adapt_div(i2c, clk_rate);
ret = i2c_add_adapter(&i2c->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not register adapter\n");
+ if (ret < 0)
goto err_clk_notifier;
- }
dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 38dc1cacfd8b..499af26e736e 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1215,7 +1215,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
pm_runtime_disable(&pdev->dev);
s3c24xx_i2c_deregister_cpufreq(i2c);
clk_unprepare(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 24968384b401..c2005c789d2b 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -510,10 +510,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_numbered_adapter(&id->adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "reg adap failed: %d\n", ret);
+ if (ret < 0)
goto out4;
- }
platform_set_drvdata(pdev, id);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 05b1eeab9cf5..192f36f00e4d 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -981,7 +981,6 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret < 0) {
sh_mobile_i2c_release_dma(pd);
- dev_err(&dev->dev, "cannot add numbered adapter\n");
return ret;
}
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 792a42bdd335..95e81d0f72b4 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -387,10 +387,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
writel(regval, siic->base + SIRFSOC_I2C_SDA_DELAY);
err = i2c_add_numbered_adapter(adap);
- if (err < 0) {
- dev_err(&pdev->dev, "Can't add new i2c adapter\n");
+ if (err < 0)
goto out;
- }
clk_disable(clk);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 944ec4205084..1371547ce1a3 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -874,10 +874,8 @@ static int st_i2c_probe(struct platform_device *pdev)
init_completion(&i2c_dev->complete);
ret = i2c_add_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, i2c_dev);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 460c134832ac..dc63236b45b2 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -920,11 +920,8 @@ static int stu300_probe(struct platform_device *pdev)
/* i2c device drivers may be active on return from add_adapter() */
ret = i2c_add_numbered_adapter(adap);
- if (ret) {
- dev_err(&pdev->dev, "failure adding ST Micro DDC "
- "I2C adapter\n");
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "ST DDC I2C @ %p, irq %d\n",
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b126dbaa47e3..4af9bbae20df 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -28,6 +28,9 @@
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
#include <asm/unaligned.h>
@@ -36,21 +39,21 @@
#define I2C_CNFG 0x000
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
-#define I2C_CNFG_PACKET_MODE_EN (1<<10)
-#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
-#define I2C_CNFG_MULTI_MASTER_MODE (1<<17)
+#define I2C_CNFG_PACKET_MODE_EN BIT(10)
+#define I2C_CNFG_NEW_MASTER_FSM BIT(11)
+#define I2C_CNFG_MULTI_MASTER_MODE BIT(17)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
-#define I2C_SL_CNFG_NACK (1<<1)
-#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_CNFG_NACK BIT(1)
+#define I2C_SL_CNFG_NEWSL BIT(2)
#define I2C_SL_ADDR1 0x02c
#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
#define I2C_FIFO_CONTROL 0x05c
-#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
-#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_FLUSH BIT(1)
+#define I2C_FIFO_CONTROL_RX_FLUSH BIT(0)
#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
#define I2C_FIFO_STATUS 0x060
@@ -60,26 +63,26 @@
#define I2C_FIFO_STATUS_RX_SHIFT 0
#define I2C_INT_MASK 0x064
#define I2C_INT_STATUS 0x068
-#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
-#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
-#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
-#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
-#define I2C_INT_NO_ACK (1<<3)
-#define I2C_INT_ARBITRATION_LOST (1<<2)
-#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
-#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_INT_PACKET_XFER_COMPLETE BIT(7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE BIT(6)
+#define I2C_INT_TX_FIFO_OVERFLOW BIT(5)
+#define I2C_INT_RX_FIFO_UNDERFLOW BIT(4)
+#define I2C_INT_NO_ACK BIT(3)
+#define I2C_INT_ARBITRATION_LOST BIT(2)
+#define I2C_INT_TX_FIFO_DATA_REQ BIT(1)
+#define I2C_INT_RX_FIFO_DATA_REQ BIT(0)
#define I2C_CLK_DIVISOR 0x06c
#define I2C_CLK_DIVISOR_STD_FAST_MODE_SHIFT 16
#define I2C_CLK_MULTIPLIER_STD_FAST_MODE 8
#define DVC_CTRL_REG1 0x000
-#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG1_INTR_EN BIT(10)
#define DVC_CTRL_REG2 0x004
#define DVC_CTRL_REG3 0x008
-#define DVC_CTRL_REG3_SW_PROG (1<<26)
-#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_CTRL_REG3_SW_PROG BIT(26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN BIT(30)
#define DVC_STATUS 0x00c
-#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+#define DVC_STATUS_I2C_DONE_INTR BIT(30)
#define I2C_ERR_NONE 0x00
#define I2C_ERR_NO_ACK 0x01
@@ -89,26 +92,28 @@
#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
#define PACKET_HEADER0_PACKET_ID_SHIFT 16
#define PACKET_HEADER0_CONT_ID_SHIFT 12
-#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
-
-#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
-#define I2C_HEADER_CONT_ON_NAK (1<<21)
-#define I2C_HEADER_SEND_START_BYTE (1<<20)
-#define I2C_HEADER_READ (1<<19)
-#define I2C_HEADER_10BIT_ADDR (1<<18)
-#define I2C_HEADER_IE_ENABLE (1<<17)
-#define I2C_HEADER_REPEAT_START (1<<16)
-#define I2C_HEADER_CONTINUE_XFER (1<<15)
+#define PACKET_HEADER0_PROTOCOL_I2C BIT(4)
+
+#define I2C_HEADER_HIGHSPEED_MODE BIT(22)
+#define I2C_HEADER_CONT_ON_NAK BIT(21)
+#define I2C_HEADER_SEND_START_BYTE BIT(20)
+#define I2C_HEADER_READ BIT(19)
+#define I2C_HEADER_10BIT_ADDR BIT(18)
+#define I2C_HEADER_IE_ENABLE BIT(17)
+#define I2C_HEADER_REPEAT_START BIT(16)
+#define I2C_HEADER_CONTINUE_XFER BIT(15)
#define I2C_HEADER_MASTER_ADDR_SHIFT 12
#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
#define I2C_CONFIG_LOAD 0x08C
-#define I2C_MSTR_CONFIG_LOAD (1 << 0)
-#define I2C_SLV_CONFIG_LOAD (1 << 1)
-#define I2C_TIMEOUT_CONFIG_LOAD (1 << 2)
+#define I2C_MSTR_CONFIG_LOAD BIT(0)
+#define I2C_SLV_CONFIG_LOAD BIT(1)
+#define I2C_TIMEOUT_CONFIG_LOAD BIT(2)
#define I2C_CLKEN_OVERRIDE 0x090
-#define I2C_MST_CORE_CLKEN_OVR (1 << 0)
+#define I2C_MST_CORE_CLKEN_OVR BIT(0)
+
+#define I2C_CONFIG_LOAD_TIMEOUT 1000000
/*
* msg_end_type: The bus control which need to be send at end of transfer.
@@ -191,9 +196,11 @@ struct tegra_i2c_dev {
u16 clk_divisor_non_hs_mode;
bool is_suspended;
bool is_multimaster_mode;
+ spinlock_t xfer_lock;
};
-static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
{
writel(val, i2c_dev->base + reg);
}
@@ -244,15 +251,17 @@ static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
{
- u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
- int_mask &= ~mask;
+ u32 int_mask;
+
+ int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) & ~mask;
i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
}
static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
{
- u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
- int_mask |= mask;
+ u32 int_mask;
+
+ int_mask = i2c_readl(i2c_dev, I2C_INT_MASK) | mask;
i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
}
@@ -260,6 +269,7 @@ static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
{
unsigned long timeout = jiffies + HZ;
u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+
val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
@@ -385,7 +395,8 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
*/
static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
{
- u32 val = 0;
+ u32 val;
+
val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
val |= DVC_CTRL_REG3_SW_PROG;
val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
@@ -396,9 +407,15 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
-static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_runtime_resume(struct device *dev)
{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
+
+ ret = pinctrl_pm_select_default_state(i2c_dev->dev);
+ if (ret)
+ return ret;
+
if (!i2c_dev->hw->has_single_clk_source) {
ret = clk_enable(i2c_dev->fast_clk);
if (ret < 0) {
@@ -407,32 +424,66 @@ static inline int tegra_i2c_clock_enable(struct tegra_i2c_dev *i2c_dev)
return ret;
}
}
+
ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev,
"Enabling div clk failed, err %d\n", ret);
clk_disable(i2c_dev->fast_clk);
+ return ret;
}
- return ret;
+
+ return 0;
}
-static inline void tegra_i2c_clock_disable(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_runtime_suspend(struct device *dev)
{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+
clk_disable(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
clk_disable(i2c_dev->fast_clk);
+
+ return pinctrl_pm_select_idle_state(i2c_dev->dev);
+}
+
+static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long reg_offset;
+ void __iomem *addr;
+ u32 val;
+ int err;
+
+ if (i2c_dev->hw->has_config_load_reg) {
+ reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
+ addr = i2c_dev->base + reg_offset;
+ i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
+ if (in_interrupt())
+ err = readl_poll_timeout_atomic(addr, val, val == 0,
+ 1000, I2C_CONFIG_LOAD_TIMEOUT);
+ else
+ err = readl_poll_timeout(addr, val, val == 0,
+ 1000, I2C_CONFIG_LOAD_TIMEOUT);
+
+ if (err) {
+ dev_warn(i2c_dev->dev,
+ "timeout waiting for config load\n");
+ return err;
+ }
+ }
+
+ return 0;
}
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
- int err = 0;
+ int err;
u32 clk_divisor;
- unsigned long timeout = jiffies + HZ;
- err = tegra_i2c_clock_enable(i2c_dev);
+ err = pm_runtime_get_sync(i2c_dev->dev);
if (err < 0) {
- dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
+ dev_err(i2c_dev->dev, "runtime resume failed %d\n", err);
return err;
}
@@ -460,54 +511,59 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
+
sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
-
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
- if (tegra_i2c_flush_fifos(i2c_dev))
- err = -ETIMEDOUT;
+ err = tegra_i2c_flush_fifos(i2c_dev);
+ if (err)
+ goto err;
if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
- if (i2c_dev->hw->has_config_load_reg) {
- i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
- while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
- if (time_after(jiffies, timeout)) {
- dev_warn(i2c_dev->dev,
- "timeout waiting for config load\n");
- err = -ETIMEDOUT;
- goto err;
- }
- msleep(1);
- }
- }
+ err = tegra_i2c_wait_for_config_load(i2c_dev);
+ if (err)
+ goto err;
if (i2c_dev->irq_disabled) {
- i2c_dev->irq_disabled = 0;
+ i2c_dev->irq_disabled = false;
enable_irq(i2c_dev->irq);
}
err:
- tegra_i2c_clock_disable(i2c_dev);
+ pm_runtime_put(i2c_dev->dev);
return err;
}
+static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 cnfg;
+
+ cnfg = i2c_readl(i2c_dev, I2C_CNFG);
+ if (cnfg & I2C_CNFG_PACKET_MODE_EN)
+ i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
+
+ return tegra_i2c_wait_for_config_load(i2c_dev);
+}
+
static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
{
u32 status;
const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
struct tegra_i2c_dev *i2c_dev = dev_id;
+ unsigned long flags;
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+ spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
if (status == 0) {
dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
@@ -517,12 +573,13 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
if (!i2c_dev->irq_disabled) {
disable_irq_nosync(i2c_dev->irq);
- i2c_dev->irq_disabled = 1;
+ i2c_dev->irq_disabled = true;
}
goto err;
}
if (unlikely(status & status_err)) {
+ tegra_i2c_disable_packet_mode(i2c_dev);
if (status & I2C_INT_NO_ACK)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
if (status & I2C_INT_ARBITRATION_LOST)
@@ -552,7 +609,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
- return IRQ_HANDLED;
+ goto done;
err:
/* An error occurred, mask all interrupts */
tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
@@ -563,6 +620,8 @@ err:
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
complete(&i2c_dev->msg_complete);
+done:
+ spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
return IRQ_HANDLED;
}
@@ -572,6 +631,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
u32 packet_header;
u32 int_mask;
unsigned long time_left;
+ unsigned long flags;
tegra_i2c_flush_fifos(i2c_dev);
@@ -584,6 +644,11 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_dev->msg_read = (msg->flags & I2C_M_RD);
reinit_completion(&i2c_dev->msg_complete);
+ spin_lock_irqsave(&i2c_dev->xfer_lock, flags);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+
packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
PACKET_HEADER0_PROTOCOL_I2C |
(i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
@@ -613,14 +678,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (!(msg->flags & I2C_M_RD))
tegra_i2c_fill_tx_fifo(i2c_dev);
- int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
if (i2c_dev->hw->has_per_pkt_xfer_complete_irq)
int_mask |= I2C_INT_PACKET_XFER_COMPLETE;
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+
tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags);
dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
@@ -643,9 +709,10 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
return 0;
/*
- * NACK interrupt is generated before the I2C controller generates the
- * STOP condition on the bus. So wait for 2 clock periods before resetting
- * the controller so that STOP condition has been delivered properly.
+ * NACK interrupt is generated before the I2C controller generates
+ * the STOP condition on the bus. So wait for 2 clock periods
+ * before resetting the controller so that the STOP condition has
+ * been delivered properly.
*/
if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
@@ -670,14 +737,15 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (i2c_dev->is_suspended)
return -EBUSY;
- ret = tegra_i2c_clock_enable(i2c_dev);
+ ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0) {
- dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
+ dev_err(i2c_dev->dev, "runtime resume failed %d\n", ret);
return ret;
}
for (i = 0; i < num; i++) {
enum msg_end_type end_type = MSG_END_STOP;
+
if (i < (num - 1)) {
if (msgs[i + 1].flags & I2C_M_NOSTART)
end_type = MSG_END_CONTINUE;
@@ -688,7 +756,9 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (ret)
break;
}
- tegra_i2c_clock_disable(i2c_dev);
+
+ pm_runtime_put(i2c_dev->dev);
+
return ret ?: i;
}
@@ -825,7 +895,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
div_clk = devm_clk_get(&pdev->dev, "div-clk");
if (IS_ERR(div_clk)) {
- dev_err(&pdev->dev, "missing controller clock");
+ dev_err(&pdev->dev, "missing controller clock\n");
return PTR_ERR(div_clk);
}
@@ -843,27 +913,22 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->rst = devm_reset_control_get(&pdev->dev, "i2c");
if (IS_ERR(i2c_dev->rst)) {
- dev_err(&pdev->dev, "missing controller reset");
+ dev_err(&pdev->dev, "missing controller reset\n");
return PTR_ERR(i2c_dev->rst);
}
tegra_i2c_parse_dt(i2c_dev);
- i2c_dev->hw = &tegra20_i2c_hw;
-
- if (pdev->dev.of_node) {
- i2c_dev->hw = of_device_get_match_data(&pdev->dev);
- i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
- "nvidia,tegra20-i2c-dvc");
- } else if (pdev->id == 3) {
- i2c_dev->is_dvc = 1;
- }
+ i2c_dev->hw = of_device_get_match_data(&pdev->dev);
+ i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
+ "nvidia,tegra20-i2c-dvc");
init_completion(&i2c_dev->msg_complete);
+ spin_lock_init(&i2c_dev->xfer_lock);
if (!i2c_dev->hw->has_single_clk_source) {
fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
if (IS_ERR(fast_clk)) {
- dev_err(&pdev->dev, "missing fast clock");
+ dev_err(&pdev->dev, "missing fast clock\n");
return PTR_ERR(fast_clk);
}
i2c_dev->fast_clk = fast_clk;
@@ -900,18 +965,27 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto unprepare_fast_clk;
}
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_i2c_runtime_resume(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime resume failed\n");
+ goto unprepare_div_clk;
+ }
+ }
+
if (i2c_dev->is_multimaster_mode) {
ret = clk_enable(i2c_dev->div_clk);
if (ret < 0) {
dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
ret);
- goto unprepare_div_clk;
+ goto disable_rpm;
}
}
ret = tegra_i2c_init(i2c_dev);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ dev_err(&pdev->dev, "Failed to initialize i2c controller\n");
goto disable_div_clk;
}
@@ -925,17 +999,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
i2c_dev->adapter.class = I2C_CLASS_DEPRECATED;
- strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ strlcpy(i2c_dev->adapter.name, dev_name(&pdev->dev),
sizeof(i2c_dev->adapter.name));
i2c_dev->adapter.dev.parent = &pdev->dev;
i2c_dev->adapter.nr = pdev->id;
i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ if (ret)
goto disable_div_clk;
- }
return 0;
@@ -943,6 +1015,11 @@ disable_div_clk:
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
+disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_i2c_runtime_suspend(&pdev->dev);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -956,11 +1033,16 @@ unprepare_fast_clk:
static int tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
i2c_del_adapter(&i2c_dev->adapter);
if (i2c_dev->is_multimaster_mode)
clk_disable(i2c_dev->div_clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_i2c_runtime_suspend(&pdev->dev);
+
clk_unprepare(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
clk_unprepare(i2c_dev->fast_clk);
@@ -988,20 +1070,19 @@ static int tegra_i2c_resume(struct device *dev)
i2c_lock_adapter(&i2c_dev->adapter);
ret = tegra_i2c_init(i2c_dev);
-
- if (ret) {
- i2c_unlock_adapter(&i2c_dev->adapter);
- return ret;
- }
-
- i2c_dev->is_suspended = false;
+ if (!ret)
+ i2c_dev->is_suspended = false;
i2c_unlock_adapter(&i2c_dev->adapter);
- return 0;
+ return ret;
}
-static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
+static const struct dev_pm_ops tegra_i2c_pm = {
+ SET_RUNTIME_PM_OPS(tegra_i2c_runtime_suspend, tegra_i2c_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_i2c_suspend, tegra_i2c_resume)
+};
#define TEGRA_I2C_PM (&tegra_i2c_pm)
#else
#define TEGRA_I2C_PM NULL
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
new file mode 100644
index 000000000000..bba5b429f69c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -0,0 +1,259 @@
+/*
+ * Cavium ThunderX i2c driver.
+ *
+ * Copyright (C) 2015,2016 Cavium Inc.
+ * Authors: Fred Martin <fmartin@caviumnetworks.com>
+ * Jan Glauber <jglauber@cavium.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+
+#include "i2c-octeon-core.h"
+
+#define DRV_NAME "i2c-thunderx"
+
+#define PCI_DEVICE_ID_THUNDER_TWSI 0xa012
+
+#define SYS_FREQ_DEFAULT 700000000
+
+#define TWSI_INT_ENA_W1C 0x1028
+#define TWSI_INT_ENA_W1S 0x1030
+
+/*
+ * Enable the CORE interrupt.
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void thunder_i2c_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_CORE_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1S);
+}
+
+/*
+ * Disable the CORE interrupt.
+ */
+static void thunder_i2c_int_disable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_CORE_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1C);
+}
+
+static void thunder_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1S);
+}
+
+static void thunder_i2c_hlc_int_disable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_writeq_flush(TWSI_INT_ST_INT | TWSI_INT_TS_INT,
+ i2c->twsi_base + TWSI_INT_ENA_W1C);
+}
+
+static u32 thunderx_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+}
+
+static const struct i2c_algorithm thunderx_i2c_algo = {
+ .master_xfer = octeon_i2c_xfer,
+ .functionality = thunderx_i2c_functionality,
+};
+
+static struct i2c_adapter thunderx_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "ThunderX adapter",
+ .algo = &thunderx_i2c_algo,
+};
+
+static void thunder_i2c_clock_enable(struct device *dev, struct octeon_i2c *i2c)
+{
+ int ret;
+
+ i2c->clk = clk_get(dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ i2c->clk = NULL;
+ goto skip;
+ }
+
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto skip;
+ i2c->sys_freq = clk_get_rate(i2c->clk);
+
+skip:
+ if (!i2c->sys_freq)
+ i2c->sys_freq = SYS_FREQ_DEFAULT;
+}
+
+static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
+{
+ if (!clk)
+ return;
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+}
+
+static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
+ struct device_node *node)
+{
+ u32 type;
+
+ if (!node)
+ return -EINVAL;
+
+ i2c->alert_data.irq = irq_of_parse_and_map(node, 0);
+ if (!i2c->alert_data.irq)
+ return -EINVAL;
+
+ type = irqd_get_trigger_type(irq_get_irq_data(i2c->alert_data.irq));
+ i2c->alert_data.alert_edge_triggered =
+ (type & IRQ_TYPE_LEVEL_MASK) ? 1 : 0;
+
+ i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
+ if (!i2c->ara)
+ return -ENODEV;
+ return 0;
+}
+
+static int thunder_i2c_smbus_setup(struct octeon_i2c *i2c,
+ struct device_node *node)
+{
+ /* TODO: ACPI support */
+ if (!acpi_disabled)
+ return -EOPNOTSUPP;
+
+ return thunder_i2c_smbus_setup_of(i2c, node);
+}
+
+static void thunder_i2c_smbus_remove(struct octeon_i2c *i2c)
+{
+ if (i2c->ara)
+ i2c_unregister_device(i2c->ara);
+}
+
+static int thunder_i2c_probe_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct octeon_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->roff.sw_twsi = 0x1000;
+ i2c->roff.twsi_int = 0x1010;
+ i2c->roff.sw_twsi_ext = 0x1018;
+
+ i2c->dev = dev;
+ pci_set_drvdata(pdev, i2c);
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ return ret;
+
+ i2c->twsi_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!i2c->twsi_base)
+ return -EINVAL;
+
+ thunder_i2c_clock_enable(dev, i2c);
+ ret = device_property_read_u32(dev, "clock-frequency", &i2c->twsi_freq);
+ if (ret)
+ i2c->twsi_freq = 100000;
+
+ init_waitqueue_head(&i2c->queue);
+
+ i2c->int_enable = thunder_i2c_int_enable;
+ i2c->int_disable = thunder_i2c_int_disable;
+ i2c->hlc_int_enable = thunder_i2c_hlc_int_enable;
+ i2c->hlc_int_disable = thunder_i2c_hlc_int_disable;
+
+ ret = pci_enable_msix(pdev, &i2c->i2c_msix, 1);
+ if (ret)
+ goto error;
+
+ ret = devm_request_irq(dev, i2c->i2c_msix.vector, octeon_i2c_isr, 0,
+ DRV_NAME, i2c);
+ if (ret)
+ goto error;
+
+ ret = octeon_i2c_init_lowlevel(i2c);
+ if (ret)
+ goto error;
+
+ octeon_i2c_set_clock(i2c);
+
+ i2c->adap = thunderx_i2c_ops;
+ i2c->adap.retries = 5;
+ i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
+ i2c->adap.dev.parent = dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ "Cavium ThunderX i2c adapter at %s", dev_name(dev));
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret)
+ goto error;
+
+ dev_info(i2c->dev, "Probed. Set system clock to %u\n", i2c->sys_freq);
+
+ ret = thunder_i2c_smbus_setup(i2c, pdev->dev.of_node);
+ if (ret)
+ dev_info(dev, "SMBUS alert not active on this bus\n");
+
+ return 0;
+
+error:
+ thunder_i2c_clock_disable(dev, i2c->clk);
+ return ret;
+}
+
+static void thunder_i2c_remove_pci(struct pci_dev *pdev)
+{
+ struct octeon_i2c *i2c = pci_get_drvdata(pdev);
+
+ thunder_i2c_smbus_remove(i2c);
+ thunder_i2c_clock_disable(&pdev->dev, i2c->clk);
+ i2c_del_adapter(&i2c->adap);
+}
+
+static const struct pci_device_id thunder_i2c_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_TWSI) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, thunder_i2c_pci_id_table);
+
+static struct pci_driver thunder_i2c_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = thunder_i2c_pci_id_table,
+ .probe = thunder_i2c_probe_pci,
+ .remove = thunder_i2c_remove_pci,
+};
+
+module_pci_driver(thunder_i2c_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fred Martin <fmartin@caviumnetworks.com>");
+MODULE_DESCRIPTION("I2C-Bus adapter for Cavium ThunderX SOC");
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index aeead0d27d10..db9105e52c79 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -348,14 +349,19 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
dev_dbg(&adap->dev, "complete\n");
if (unlikely(priv->flags & UNIPHIER_FI2C_DEFER_STOP_COMP)) {
- u32 status = readl(priv->membase + UNIPHIER_FI2C_SR);
-
- if (!(status & UNIPHIER_FI2C_SR_STS) ||
- status & UNIPHIER_FI2C_SR_BB) {
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(priv->membase + UNIPHIER_FI2C_SR,
+ status,
+ (status & UNIPHIER_FI2C_SR_STS) &&
+ !(status & UNIPHIER_FI2C_SR_BB),
+ 1, 20);
+ if (ret) {
dev_err(&adap->dev,
"stop condition was not completed.\n");
uniphier_fi2c_recover(priv);
- return -EBUSY;
+ return ret;
}
}
@@ -455,54 +461,25 @@ static struct i2c_bus_recovery_info uniphier_fi2c_bus_recovery_info = {
.unprepare_recovery = uniphier_fi2c_unprepare_recovery,
};
-static int uniphier_fi2c_clk_init(struct device *dev,
- struct uniphier_fi2c_priv *priv)
+static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv,
+ u32 bus_speed, unsigned long clk_rate)
{
- struct device_node *np = dev->of_node;
- unsigned long clk_rate;
- u32 bus_speed, clk_count;
- int ret;
-
- if (of_property_read_u32(np, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
-
- if (!bus_speed) {
- dev_err(dev, "clock-frequency should not be zero\n");
- return -EINVAL;
- }
-
- if (bus_speed > UNIPHIER_FI2C_MAX_SPEED)
- bus_speed = UNIPHIER_FI2C_MAX_SPEED;
-
- /* Get input clk rate through clk driver */
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
+ u32 tmp;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
+ tmp = readl(priv->membase + UNIPHIER_FI2C_CR);
+ tmp |= UNIPHIER_FI2C_CR_MST;
+ writel(tmp, priv->membase + UNIPHIER_FI2C_CR);
uniphier_fi2c_reset(priv);
- clk_count = clk_rate / bus_speed;
+ tmp = clk_rate / bus_speed;
- writel(clk_count, priv->membase + UNIPHIER_FI2C_CYC);
- writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_LCTL);
- writel(clk_count / 2, priv->membase + UNIPHIER_FI2C_SSUT);
- writel(clk_count / 16, priv->membase + UNIPHIER_FI2C_DSUT);
+ writel(tmp, priv->membase + UNIPHIER_FI2C_CYC);
+ writel(tmp / 2, priv->membase + UNIPHIER_FI2C_LCTL);
+ writel(tmp / 2, priv->membase + UNIPHIER_FI2C_SSUT);
+ writel(tmp / 16, priv->membase + UNIPHIER_FI2C_DSUT);
uniphier_fi2c_prepare_operation(priv);
-
- return 0;
}
static int uniphier_fi2c_probe(struct platform_device *pdev)
@@ -510,8 +487,9 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_fi2c_priv *priv;
struct resource *regs;
- int irq;
- int ret;
+ u32 bus_speed;
+ unsigned long clk_rate;
+ int irq, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -528,6 +506,31 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
return irq;
}
+ if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
+ bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
+
+ if (!bus_speed || bus_speed > UNIPHIER_FI2C_MAX_SPEED) {
+ dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(priv->clk);
+ if (!clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
init_completion(&priv->comp);
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &uniphier_fi2c_algo;
@@ -538,9 +541,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
- ret = uniphier_fi2c_clk_init(dev, priv);
- if (ret)
- goto err;
+ uniphier_fi2c_hw_init(priv, bus_speed, clk_rate);
ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0,
pdev->name, priv);
@@ -550,11 +551,6 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&priv->adap);
- if (ret) {
- dev_err(dev, "failed to add I2C adapter\n");
- goto err;
- }
-
err:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 475a5eb514e2..56e92af46ddc 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -316,50 +316,15 @@ static struct i2c_bus_recovery_info uniphier_i2c_bus_recovery_info = {
.unprepare_recovery = uniphier_i2c_unprepare_recovery,
};
-static int uniphier_i2c_clk_init(struct device *dev,
- struct uniphier_i2c_priv *priv)
+static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv,
+ u32 bus_speed, unsigned long clk_rate)
{
- struct device_node *np = dev->of_node;
- unsigned long clk_rate;
- u32 bus_speed;
- int ret;
-
- if (of_property_read_u32(np, "clock-frequency", &bus_speed))
- bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
-
- if (!bus_speed) {
- dev_err(dev, "clock-frequency should not be zero\n");
- return -EINVAL;
- }
-
- if (bus_speed > UNIPHIER_I2C_MAX_SPEED)
- bus_speed = UNIPHIER_I2C_MAX_SPEED;
-
- /* Get input clk rate through clk driver */
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(priv->clk);
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(priv->clk);
- if (!clk_rate) {
- dev_err(dev, "input clock rate should not be zero\n");
- return -EINVAL;
- }
-
uniphier_i2c_reset(priv, true);
writel((clk_rate / bus_speed / 2 << 16) | (clk_rate / bus_speed),
priv->membase + UNIPHIER_I2C_CLK);
uniphier_i2c_reset(priv, false);
-
- return 0;
}
static int uniphier_i2c_probe(struct platform_device *pdev)
@@ -367,8 +332,9 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_i2c_priv *priv;
struct resource *regs;
- int irq;
- int ret;
+ u32 bus_speed;
+ unsigned long clk_rate;
+ int irq, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -385,6 +351,31 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
return irq;
}
+ if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed))
+ bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
+
+ if (!bus_speed || bus_speed > UNIPHIER_I2C_MAX_SPEED) {
+ dev_err(dev, "invalid clock-frequency %d\n", bus_speed);
+ return -EINVAL;
+ }
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(priv->clk);
+ if (!clk_rate) {
+ dev_err(dev, "input clock rate should not be zero\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
init_completion(&priv->comp);
priv->adap.owner = THIS_MODULE;
priv->adap.algo = &uniphier_i2c_algo;
@@ -395,9 +386,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
platform_set_drvdata(pdev, priv);
- ret = uniphier_i2c_clk_init(dev, priv);
- if (ret)
- goto err;
+ uniphier_i2c_hw_init(priv, bus_speed, clk_rate);
ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name,
priv);
@@ -407,11 +396,6 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
}
ret = i2c_add_adapter(&priv->adap);
- if (ret) {
- dev_err(dev, "failed to add I2C adapter\n");
- goto err;
- }
-
err:
if (ret)
clk_disable_unprepare(priv->clk);
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index e1e3a85596c5..fbd0fd59f312 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -432,10 +432,8 @@ static int wmt_i2c_probe(struct platform_device *pdev)
}
err = i2c_add_adapter(adap);
- if (err) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, i2c_dev);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 4233f5695352..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
@@ -418,7 +418,6 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adapter, ctx);
rc = i2c_add_adapter(adapter);
if (rc) {
- dev_err(&pdev->dev, "Adapter registeration failed\n");
mbox_free_channel(ctx->mbox_chan);
return rc;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 74f54f2f471f..66bce3b311a1 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -804,7 +804,6 @@ static int xiic_i2c_probe(struct platform_device *pdev)
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
- dev_err(&pdev->dev, "Failed to add adapter\n");
xiic_deinit(i2c);
goto err_clk_dis;
}
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 55a7bef1b2e1..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -400,10 +400,8 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adapter, priv);
err = i2c_add_adapter(&priv->adapter);
- if (err) {
- dev_err(&pdev->dev, "failed to add I2C adapter!\n");
+ if (err)
return err;
- }
platform_set_drvdata(pdev, priv);
dev_dbg(&pdev->dev, "I2C bus:%d added\n", priv->adapter.nr);
@@ -428,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ .compatible = "netlogic,xlp980-i2c", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 613c3a4f2c51..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
static int xlr_i2c_probe(struct platform_device *pdev)
{
@@ -432,10 +433,8 @@ static int xlr_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&priv->adap, priv);
ret = i2c_add_numbered_adapter(&priv->adap);
- if (ret < 0) {
- dev_err(&priv->adap.dev, "Failed to add i2c bus.\n");
+ if (ret < 0)
return ret;
- }
platform_set_drvdata(pdev, priv);
dev_info(&priv->adap.dev, "Added I2C Bus.\n");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index da3a02ef4a31..b432b64e307a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -88,7 +88,7 @@ void i2c_transfer_trace_unreg(void)
}
#if defined(CONFIG_ACPI)
-struct acpi_i2c_handler_data {
+struct i2c_acpi_handler_data {
struct acpi_connection_info info;
struct i2c_adapter *adapter;
};
@@ -103,15 +103,18 @@ struct gsb_buffer {
};
} __packed;
-struct acpi_i2c_lookup {
+struct i2c_acpi_lookup {
struct i2c_board_info *info;
acpi_handle adapter_handle;
acpi_handle device_handle;
+ acpi_handle search_handle;
+ u32 speed;
+ u32 min_speed;
};
-static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
+static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
{
- struct acpi_i2c_lookup *lookup = data;
+ struct i2c_acpi_lookup *lookup = data;
struct i2c_board_info *info = lookup->info;
struct acpi_resource_i2c_serialbus *sb;
acpi_status status;
@@ -130,19 +133,18 @@ static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
return 1;
info->addr = sb->slave_address;
+ lookup->speed = sb->connection_speed;
if (sb->access_mode == ACPI_I2C_10BIT_MODE)
info->flags |= I2C_CLIENT_TEN;
return 1;
}
-static int acpi_i2c_get_info(struct acpi_device *adev,
- struct i2c_board_info *info,
- acpi_handle *adapter_handle)
+static int i2c_acpi_do_lookup(struct acpi_device *adev,
+ struct i2c_acpi_lookup *lookup)
{
+ struct i2c_board_info *info = lookup->info;
struct list_head resource_list;
- struct resource_entry *entry;
- struct acpi_i2c_lookup lookup;
int ret;
if (acpi_bus_get_status(adev) || !adev->status.present ||
@@ -150,24 +152,58 @@ static int acpi_i2c_get_info(struct acpi_device *adev,
return -EINVAL;
memset(info, 0, sizeof(*info));
- info->fwnode = acpi_fwnode_handle(adev);
-
- memset(&lookup, 0, sizeof(lookup));
- lookup.device_handle = acpi_device_handle(adev);
- lookup.info = info;
+ lookup->device_handle = acpi_device_handle(adev);
/* Look up for I2cSerialBus resource */
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_fill_info, &lookup);
+ i2c_acpi_fill_info, lookup);
acpi_dev_free_resource_list(&resource_list);
if (ret < 0 || !info->addr)
return -EINVAL;
- *adapter_handle = lookup.adapter_handle;
+ return 0;
+}
+
+static int i2c_acpi_get_info(struct acpi_device *adev,
+ struct i2c_board_info *info,
+ struct i2c_adapter *adapter,
+ acpi_handle *adapter_handle)
+{
+ struct list_head resource_list;
+ struct resource_entry *entry;
+ struct i2c_acpi_lookup lookup;
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.info = info;
+
+ ret = i2c_acpi_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ if (adapter) {
+ /* The adapter must match the one in I2cSerialBus() connector */
+ if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle)
+ return -ENODEV;
+ } else {
+ struct acpi_device *adapter_adev;
+
+ /* The adapter must be present */
+ if (acpi_bus_get_device(lookup.adapter_handle, &adapter_adev))
+ return -ENODEV;
+ if (acpi_bus_get_status(adapter_adev) ||
+ !adapter_adev->status.present)
+ return -ENODEV;
+ }
+
+ info->fwnode = acpi_fwnode_handle(adev);
+ if (adapter_handle)
+ *adapter_handle = lookup.adapter_handle;
/* Then fill IRQ number if any */
+ INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
return -EINVAL;
@@ -186,7 +222,7 @@ static int acpi_i2c_get_info(struct acpi_device *adev,
return 0;
}
-static void acpi_i2c_register_device(struct i2c_adapter *adapter,
+static void i2c_acpi_register_device(struct i2c_adapter *adapter,
struct acpi_device *adev,
struct i2c_board_info *info)
{
@@ -201,39 +237,35 @@ static void acpi_i2c_register_device(struct i2c_adapter *adapter,
}
}
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
void *data, void **return_value)
{
struct i2c_adapter *adapter = data;
struct acpi_device *adev;
- acpi_handle adapter_handle;
struct i2c_board_info info;
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
- if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ if (i2c_acpi_get_info(adev, &info, adapter, NULL))
return AE_OK;
- if (adapter_handle != ACPI_HANDLE(&adapter->dev))
- return AE_OK;
-
- acpi_i2c_register_device(adapter, adev, &info);
+ i2c_acpi_register_device(adapter, adev, &info);
return AE_OK;
}
-#define ACPI_I2C_MAX_SCAN_DEPTH 32
+#define I2C_ACPI_MAX_SCAN_DEPTH 32
/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter
* @adap: pointer to adapter
*
* Enumerate all I2C slave devices behind this adapter by walking the ACPI
* namespace. When a device is found it will be added to the Linux device
* model and bound to the corresponding ACPI handle.
*/
-static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+static void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
acpi_status status;
@@ -241,14 +273,72 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
return;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_I2C_MAX_SCAN_DEPTH,
- acpi_i2c_add_device, NULL,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_add_device, NULL,
adap, NULL);
if (ACPI_FAILURE(status))
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
-static int acpi_i2c_match_adapter(struct device *dev, void *data)
+static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_acpi_lookup *lookup = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (i2c_acpi_do_lookup(adev, lookup))
+ return AE_OK;
+
+ if (lookup->search_handle != lookup->adapter_handle)
+ return AE_OK;
+
+ if (lookup->speed <= lookup->min_speed)
+ lookup->min_speed = lookup->speed;
+
+ return AE_OK;
+}
+
+/**
+ * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI
+ * @dev: The device owning the bus
+ *
+ * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves
+ * devices connected to this bus and use the speed of slowest device.
+ *
+ * Returns the speed in Hz or zero
+ */
+u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ struct i2c_acpi_lookup lookup;
+ struct i2c_board_info dummy;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return 0;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.search_handle = ACPI_HANDLE(dev);
+ lookup.min_speed = UINT_MAX;
+ lookup.info = &dummy;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_lookup_speed, NULL,
+ &lookup, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "unable to find I2C bus speed from ACPI\n");
+ return 0;
+ }
+
+ return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+}
+EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
+
+static int i2c_acpi_match_adapter(struct device *dev, void *data)
{
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@@ -258,29 +348,29 @@ static int acpi_i2c_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int acpi_i2c_match_device(struct device *dev, void *data)
+static int i2c_acpi_match_device(struct device *dev, void *data)
{
return ACPI_COMPANION(dev) == data;
}
-static struct i2c_adapter *acpi_i2c_find_adapter_by_handle(acpi_handle handle)
+static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
- acpi_i2c_match_adapter);
+ i2c_acpi_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
-static struct i2c_client *acpi_i2c_find_client_by_adev(struct acpi_device *adev)
+static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev, acpi_i2c_match_device);
+ dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
return dev ? i2c_verify_client(dev) : NULL;
}
-static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
+static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
struct acpi_device *adev = arg;
@@ -291,20 +381,20 @@ static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle))
break;
- adapter = acpi_i2c_find_adapter_by_handle(adapter_handle);
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
if (!adapter)
break;
- acpi_i2c_register_device(adapter, adev, &info);
+ i2c_acpi_register_device(adapter, adev, &info);
break;
case ACPI_RECONFIG_DEVICE_REMOVE:
if (!acpi_device_enumerated(adev))
break;
- client = acpi_i2c_find_client_by_adev(adev);
+ client = i2c_acpi_find_client_by_adev(adev);
if (!client)
break;
@@ -317,10 +407,10 @@ static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
}
static struct notifier_block i2c_acpi_notifier = {
- .notifier_call = acpi_i2c_notify,
+ .notifier_call = i2c_acpi_notify,
};
#else /* CONFIG_ACPI */
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
extern struct notifier_block i2c_acpi_notifier;
#endif /* CONFIG_ACPI */
@@ -386,12 +476,12 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
}
static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+i2c_acpi_space_handler(u32 function, acpi_physical_address command,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
- struct acpi_i2c_handler_data *data = handler_context;
+ struct i2c_acpi_handler_data *data = handler_context;
struct acpi_connection_info *info = &data->info;
struct acpi_resource_i2c_serialbus *sb;
struct i2c_adapter *adapter = data->adapter;
@@ -510,10 +600,10 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command,
}
-static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -524,7 +614,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
if (!handle)
return -ENODEV;
- data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+ data = kzalloc(sizeof(struct i2c_acpi_handler_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -538,7 +628,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler,
+ &i2c_acpi_space_handler,
NULL,
data);
if (ACPI_FAILURE(status)) {
@@ -552,10 +642,10 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
return 0;
}
-static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -568,7 +658,7 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler);
+ &i2c_acpi_space_handler);
status = acpi_bus_get_private_data(handle, (void **)&data);
if (ACPI_SUCCESS(status))
@@ -577,10 +667,10 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_bus_detach_private_data(handle);
}
#else /* CONFIG_ACPI_I2C_OPREGION */
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{ return 0; }
#endif /* CONFIG_ACPI_I2C_OPREGION */
@@ -853,7 +943,7 @@ static int i2c_device_probe(struct device *dev)
status = 0;
if (status)
- dev_warn(&client->dev, "failed to set up wakeup irq");
+ dev_warn(&client->dev, "failed to set up wakeup irq\n");
}
dev_dbg(dev, "probe\n");
@@ -1208,8 +1298,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
return client;
out_err:
- dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
- "(%d)\n", client->name, client->addr, status);
+ dev_err(&adap->dev,
+ "Failed to register i2c client %s at 0x%02x (%d)\n",
+ client->name, client->addr, status);
out_err_silent:
kfree(client);
return NULL;
@@ -1335,21 +1426,19 @@ static void i2c_adapter_dev_release(struct device *dev)
complete(&adap->dev_released);
}
-/*
- * This function is only needed for mutex_lock_nested, so it is never
- * called unless locking correctness checking is enabled. Thus we
- * make it inline to avoid a compiler warning. That's what gcc ends up
- * doing anyway.
- */
-static inline unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
{
unsigned int depth = 0;
while ((adapter = i2c_parent_is_i2c_adapter(adapter)))
depth++;
+ WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES,
+ "adapter depth exceeds lockdep subclass limit\n");
+
return depth;
}
+EXPORT_SYMBOL_GPL(i2c_adapter_depth);
/*
* Let users instantiate I2C devices through sysfs. This can be used when
@@ -1591,7 +1680,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
- struct device_node *node;
+ struct device_node *bus, *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1599,11 +1689,24 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node) {
+ bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus");
+ if (!bus)
+ bus = of_node_get(adap->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
+
+ of_node_put(bus);
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -1678,8 +1781,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
if (driver->attach_adapter) {
dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
driver->driver.name);
- dev_warn(&adap->dev, "Please use another way to instantiate "
- "your i2c_client\n");
+ dev_warn(&adap->dev,
+ "Please use another way to instantiate your i2c_client\n");
/* We ignore the return code; if it fails, too bad */
driver->attach_adapter(adap);
}
@@ -1691,6 +1794,12 @@ static int __process_new_adapter(struct device_driver *d, void *data)
return i2c_do_add_adapter(to_i2c_driver(d), data);
}
+static const struct i2c_lock_operations i2c_adapter_lock_ops = {
+ .lock_bus = i2c_adapter_lock_bus,
+ .trylock_bus = i2c_adapter_trylock_bus,
+ .unlock_bus = i2c_adapter_unlock_bus,
+};
+
static int i2c_register_adapter(struct i2c_adapter *adap)
{
int res = -EINVAL;
@@ -1710,11 +1819,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
- if (!adap->lock_bus) {
- adap->lock_bus = i2c_adapter_lock_bus;
- adap->trylock_bus = i2c_adapter_trylock_bus;
- adap->unlock_bus = i2c_adapter_unlock_bus;
- }
+ if (!adap->lock_ops)
+ adap->lock_ops = &i2c_adapter_lock_ops;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
@@ -1752,8 +1858,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
- acpi_i2c_register_devices(adap);
- acpi_i2c_install_space_handler(adap);
+ i2c_acpi_register_devices(adap);
+ i2c_acpi_install_space_handler(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -1925,7 +2031,7 @@ void i2c_del_adapter(struct i2c_adapter *adap)
return;
}
- acpi_i2c_remove_space_handler(adap);
+ i2c_acpi_remove_space_handler(adap);
/* Tell drivers about this removal */
mutex_lock(&core_lock);
bus_for_each_drv(&i2c_bus_type, NULL, adap,
@@ -2073,6 +2179,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2083,7 +2190,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
@@ -2201,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
@@ -2451,15 +2558,16 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (adap->algo->master_xfer) {
#ifdef DEBUG
for (ret = 0; ret < num; ret++) {
- dev_dbg(&adap->dev, "master_xfer[%d] %c, addr=0x%02x, "
- "len=%d%s\n", ret, (msgs[ret].flags & I2C_M_RD)
- ? 'R' : 'W', msgs[ret].addr, msgs[ret].len,
+ dev_dbg(&adap->dev,
+ "master_xfer[%d] %c, addr=0x%02x, len=%d%s\n",
+ ret, (msgs[ret].flags & I2C_M_RD) ? 'R' : 'W',
+ msgs[ret].addr, msgs[ret].len,
(msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : "");
}
#endif
if (in_atomic() || irqs_disabled()) {
- ret = adap->trylock_bus(adap, I2C_LOCK_SEGMENT);
+ ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT);
if (!ret)
/* I2C activity is ongoing. */
return -EAGAIN;
@@ -2619,9 +2727,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Consistency check */
if (info.type[0] == '\0') {
- dev_err(&adapter->dev, "%s detection function provided "
- "no name for 0x%x\n", driver->driver.name,
- addr);
+ dev_err(&adapter->dev,
+ "%s detection function provided no name for 0x%x\n",
+ driver->driver.name, addr);
} else {
struct i2c_client *client;
@@ -2659,9 +2767,8 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
/* Warn that the adapter lost class based instantiation */
if (adapter->class == I2C_CLASS_DEPRECATED) {
dev_dbg(&adapter->dev,
- "This adapter dropped support for I2C classes and "
- "won't auto-detect %s devices anymore. If you need it, check "
- "'Documentation/i2c/instantiating-devices' for alternatives.\n",
+ "This adapter dropped support for I2C classes and won't auto-detect %s devices anymore. "
+ "If you need it, check 'Documentation/i2c/instantiating-devices' for alternatives.\n",
driver->driver.name);
return 0;
}
@@ -2677,8 +2784,9 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
temp_client->adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
- dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
- "addr 0x%02x\n", adap_id, address_list[i]);
+ dev_dbg(&adapter->dev,
+ "found normal entry for adapter %d, addr 0x%02x\n",
+ adap_id, address_list[i]);
temp_client->addr = address_list[i];
err = i2c_detect_address(temp_client, driver);
if (unlikely(err))
@@ -2710,15 +2818,16 @@ i2c_new_probed_device(struct i2c_adapter *adap,
for (i = 0; addr_list[i] != I2C_CLIENT_END; i++) {
/* Check address validity */
if (i2c_check_7bit_addr_validity_strict(addr_list[i]) < 0) {
- dev_warn(&adap->dev, "Invalid 7-bit address "
- "0x%02x\n", addr_list[i]);
+ dev_warn(&adap->dev, "Invalid 7-bit address 0x%02x\n",
+ addr_list[i]);
continue;
}
/* Check address availability (7 bit, no need to encode flags) */
if (i2c_check_addr_busy(adap, addr_list[i])) {
- dev_dbg(&adap->dev, "Address 0x%02x already in "
- "use, not probing\n", addr_list[i]);
+ dev_dbg(&adap->dev,
+ "Address 0x%02x already in use, not probing\n",
+ addr_list[i]);
continue;
}
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 8eee98634cda..83768e85a919 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -159,7 +159,7 @@ static int i2c_mux_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
return 0; /* mux_lock not locked, failure */
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
return 1; /* we only want mux_lock, success */
- if (parent->trylock_bus(parent, flags))
+ if (i2c_trylock_bus(parent, flags))
return 1; /* parent locked too, success */
rt_mutex_unlock(&parent->mux_lock);
return 0; /* parent not locked, failure */
@@ -193,7 +193,7 @@ static int i2c_parent_trylock_bus(struct i2c_adapter *adapter,
if (!rt_mutex_trylock(&parent->mux_lock))
return 0; /* mux_lock not locked, failure */
- if (parent->trylock_bus(parent, flags))
+ if (i2c_trylock_bus(parent, flags))
return 1; /* parent locked too, success */
rt_mutex_unlock(&parent->mux_lock);
return 0; /* parent not locked, failure */
@@ -255,6 +255,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->dev = dev;
if (flags & I2C_MUX_LOCKED)
muxc->mux_locked = true;
+ if (flags & I2C_MUX_ARBITRATOR)
+ muxc->arbitrator = true;
+ if (flags & I2C_MUX_GATE)
+ muxc->gate = true;
muxc->select = select;
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
@@ -263,6 +267,18 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
}
EXPORT_SYMBOL_GPL(i2c_mux_alloc);
+static const struct i2c_lock_operations i2c_mux_lock_ops = {
+ .lock_bus = i2c_mux_lock_bus,
+ .trylock_bus = i2c_mux_trylock_bus,
+ .unlock_bus = i2c_mux_unlock_bus,
+};
+
+static const struct i2c_lock_operations i2c_parent_lock_ops = {
+ .lock_bus = i2c_parent_lock_bus,
+ .trylock_bus = i2c_parent_trylock_bus,
+ .unlock_bus = i2c_parent_unlock_bus,
+};
+
int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
u32 force_nr, u32 chan_id,
unsigned int class)
@@ -312,15 +328,10 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
priv->adap.retries = parent->retries;
priv->adap.timeout = parent->timeout;
priv->adap.quirks = parent->quirks;
- if (muxc->mux_locked) {
- priv->adap.lock_bus = i2c_mux_lock_bus;
- priv->adap.trylock_bus = i2c_mux_trylock_bus;
- priv->adap.unlock_bus = i2c_mux_unlock_bus;
- } else {
- priv->adap.lock_bus = i2c_parent_lock_bus;
- priv->adap.trylock_bus = i2c_parent_trylock_bus;
- priv->adap.unlock_bus = i2c_parent_unlock_bus;
- }
+ if (muxc->mux_locked)
+ priv->adap.lock_ops = &i2c_mux_lock_ops;
+ else
+ priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class)
@@ -335,18 +346,42 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
* nothing if !CONFIG_OF.
*/
if (muxc->dev->of_node) {
- struct device_node *child;
+ struct device_node *dev_node = muxc->dev->of_node;
+ struct device_node *mux_node, *child = NULL;
u32 reg;
- for_each_child_of_node(muxc->dev->of_node, child) {
- ret = of_property_read_u32(child, "reg", &reg);
- if (ret)
- continue;
- if (chan_id == reg) {
- priv->adap.dev.of_node = child;
- break;
+ if (muxc->arbitrator)
+ mux_node = of_get_child_by_name(dev_node, "i2c-arb");
+ else if (muxc->gate)
+ mux_node = of_get_child_by_name(dev_node, "i2c-gate");
+ else
+ mux_node = of_get_child_by_name(dev_node, "i2c-mux");
+
+ if (mux_node) {
+ /* A "reg" property indicates an old-style DT entry */
+ if (!of_property_read_u32(mux_node, "reg", &reg)) {
+ of_node_put(mux_node);
+ mux_node = NULL;
+ }
+ }
+
+ if (!mux_node)
+ mux_node = of_node_get(dev_node);
+ else if (muxc->arbitrator || muxc->gate)
+ child = of_node_get(mux_node);
+
+ if (!child) {
+ for_each_child_of_node(mux_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg)
+ break;
}
}
+
+ priv->adap.dev.of_node = child;
+ of_node_put(mux_node);
}
/*
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index e280c8ecc0b5..96de9ce5669b 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
+ depends on HAS_IOMEM
help
If you say yes to this option, support will be included for a
register based I2C multiplexer. This driver provides access to
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index a90bbc4037dd..86fc2d4c081b 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -130,7 +130,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
return -EINVAL;
}
- muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), 0,
+ muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), I2C_MUX_ARBITRATOR,
i2c_arbitrator_select, i2c_arbitrator_deselect);
if (!muxc)
return -ENOMEM;
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index b3893f6282ba..3e6fe1760d82 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
goto err_with_revert;
}
- p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+ /*
+ * Check if there are pinctrl states at all. Note: we cant' use
+ * devm_pinctrl_get_select() because we need to distinguish between
+ * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
+ */
+ p = devm_pinctrl_get(adap->dev.parent);
if (IS_ERR(p)) {
ret = PTR_ERR(p);
- goto err_with_put;
+ /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */
+ if (ret != -ENODEV)
+ goto err_with_put;
+ } else {
+ /* there are states. check and use them */
+ struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
+
+ if (IS_ERR(s)) {
+ ret = PTR_ERR(s);
+ goto err_with_put;
+ }
+ ret = pinctrl_select_state(p, s);
+ if (ret < 0)
+ goto err_with_put;
}
priv->chan[new_chan].parent_adap = adap;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 3cb8af635db5..4ea7e691afc7 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -85,6 +85,13 @@ static const struct i2c_device_id pca9541_id[] = {
MODULE_DEVICE_TABLE(i2c, pca9541_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pca9541_of_match[] = {
+ { .compatible = "nxp,pca9541" },
+ {}
+};
+#endif
+
/*
* Write to chip register. Don't use i2c_transfer()/i2c_smbus_xfer()
* as they will try to lock the adapter a second time.
@@ -349,7 +356,8 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), 0,
+ muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data),
+ I2C_MUX_ARBITRATOR,
pca9541_select_chan, pca9541_release_chan);
if (!muxc)
return -ENOMEM;
@@ -382,6 +390,7 @@ static int pca9541_remove(struct i2c_client *client)
static struct i2c_driver pca9541_driver = {
.driver = {
.name = "pca9541",
+ .of_match_table = of_match_ptr(pca9541_of_match),
},
.probe = pca9541_probe,
.remove = pca9541_remove,
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 3278ebf1cc5c..8bc3d36d2837 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -42,6 +42,7 @@
#include <linux/i2c/pca954x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -58,14 +59,6 @@ enum pca_type {
pca_9548,
};
-struct pca954x {
- enum pca_type type;
-
- u8 last_chan; /* last register value */
- u8 deselect;
- struct i2c_client *client;
-};
-
struct chip_desc {
u8 nchans;
u8 enable; /* used for muxes only */
@@ -75,6 +68,14 @@ struct chip_desc {
} muxtype;
};
+struct pca954x {
+ const struct chip_desc *chip;
+
+ u8 last_chan; /* last register value */
+ u8 deselect;
+ struct i2c_client *client;
+};
+
/* Provide specs for the PCA954x types we know about */
static const struct chip_desc chips[] = {
[pca_9540] = {
@@ -119,6 +120,20 @@ static const struct i2c_device_id pca954x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca954x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pca954x_of_match[] = {
+ { .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
+ { .compatible = "nxp,pca9542", .data = &chips[pca_9542] },
+ { .compatible = "nxp,pca9543", .data = &chips[pca_9543] },
+ { .compatible = "nxp,pca9544", .data = &chips[pca_9544] },
+ { .compatible = "nxp,pca9545", .data = &chips[pca_9545] },
+ { .compatible = "nxp,pca9546", .data = &chips[pca_9546] },
+ { .compatible = "nxp,pca9547", .data = &chips[pca_9547] },
+ { .compatible = "nxp,pca9548", .data = &chips[pca_9548] },
+ {}
+};
+#endif
+
/* Write to mux register. Don't use i2c_transfer()/i2c_smbus_xfer()
for this as they will try to lock adapter a second time */
static int pca954x_reg_write(struct i2c_adapter *adap,
@@ -151,7 +166,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- const struct chip_desc *chip = &chips[data->type];
+ const struct chip_desc *chip = data->chip;
u8 regval;
int ret = 0;
@@ -197,6 +212,7 @@ static int pca954x_probe(struct i2c_client *client,
int num, force, class;
struct i2c_mux_core *muxc;
struct pca954x *data;
+ const struct of_device_id *match;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
@@ -226,14 +242,19 @@ static int pca954x_probe(struct i2c_client *client,
return -ENODEV;
}
- data->type = id->driver_data;
+ match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
+ if (match)
+ data->chip = of_device_get_match_data(&client->dev);
+ else
+ data->chip = &chips[id->driver_data];
+
data->last_chan = 0; /* force the first selection */
idle_disconnect_dt = of_node &&
of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
/* Now create an adapter for each channel */
- for (num = 0; num < chips[data->type].nchans; num++) {
+ for (num = 0; num < data->chip->nchans; num++) {
bool idle_disconnect_pd = false;
force = 0; /* dynamic adap number */
@@ -247,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client,
/* discard unconfigured channels */
break;
idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
- data->deselect |= (idle_disconnect_pd
- || idle_disconnect_dt) << num;
}
+ data->deselect |= (idle_disconnect_pd ||
+ idle_disconnect_dt) << num;
ret = i2c_mux_add_adapter(muxc, force, num, class);
@@ -263,7 +284,7 @@ static int pca954x_probe(struct i2c_client *client,
dev_info(&client->dev,
"registered %d multiplexed busses for I2C %s %s\n",
- num, chips[data->type].muxtype == pca954x_ismux
+ num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
return 0;
@@ -299,6 +320,7 @@ static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
.pm = &pca954x_pm,
+ .of_match_table = of_match_ptr(pca954x_of_match),
},
.probe = pca954x_probe,
.remove = pca954x_remove,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 67ec58f9ef99..4466a2f969d7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = {
*
* Must be called under local_irq_disable().
*/
-static int intel_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index da3fb069ec5c..ce69048c88e9 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -743,8 +743,8 @@ static int st_accel_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = adata->current_fullscale->gain;
+ *val = adata->current_fullscale->gain / 1000000;
+ *val2 = adata->current_fullscale->gain % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = adata->odr;
@@ -763,9 +763,13 @@ static int st_accel_write_raw(struct iio_dev *indio_dev,
int err;
switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- err = st_sensors_set_fullscale_by_gain(indio_dev, val2);
+ case IIO_CHAN_INFO_SCALE: {
+ int gain;
+
+ gain = val * 1000000 + val2;
+ err = st_sensors_set_fullscale_by_gain(indio_dev, gain);
break;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
if (val2)
return -EINVAL;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7edcf3238620..99c051490eff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -437,6 +437,8 @@ config STX104
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC081C,
ADC101C and ADC121C ADC chips.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index bd321b305a0a..ef761a508630 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index dc33c1dd5191..b5beea53d6f6 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -30,26 +30,26 @@ static struct {
u32 usage_id;
int unit; /* 0 for default others from HID sensor spec */
int scale_val0; /* scale, whole number */
- int scale_val1; /* scale, fraction in micros */
+ int scale_val1; /* scale, fraction in nanos */
} unit_conversion[] = {
- {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650},
+ {HID_USAGE_SENSOR_ACCEL_3D, 0, 9, 806650000},
{HID_USAGE_SENSOR_ACCEL_3D,
HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
{HID_USAGE_SENSOR_ACCEL_3D,
- HID_USAGE_SENSOR_UNITS_G, 9, 806650},
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
- {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_GYRO_3D,
HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
{HID_USAGE_SENSOR_GYRO_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND, 0, 17453293},
- {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000},
+ {HID_USAGE_SENSOR_COMPASS_3D, 0, 0, 1000000},
{HID_USAGE_SENSOR_COMPASS_3D, HID_USAGE_SENSOR_UNITS_GAUSS, 1, 0},
- {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453},
+ {HID_USAGE_SENSOR_INCLINOMETER_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
- HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453},
+ HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
{HID_USAGE_SENSOR_INCLINOMETER_3D,
HID_USAGE_SENSOR_UNITS_RADIANS, 1, 0},
@@ -57,7 +57,7 @@ static struct {
{HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
{HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000000},
};
static int pow_10(unsigned power)
@@ -266,15 +266,15 @@ EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
/*
* This fuction applies the unit exponent to the scale.
* For example:
- * 9.806650 ->exp:2-> val0[980]val1[665000]
- * 9.000806 ->exp:2-> val0[900]val1[80600]
- * 0.174535 ->exp:2-> val0[17]val1[453500]
- * 1.001745 ->exp:0-> val0[1]val1[1745]
- * 1.001745 ->exp:2-> val0[100]val1[174500]
- * 1.001745 ->exp:4-> val0[10017]val1[450000]
- * 9.806650 ->exp:-2-> val0[0]val1[98066]
+ * 9.806650000 ->exp:2-> val0[980]val1[665000000]
+ * 9.000806000 ->exp:2-> val0[900]val1[80600000]
+ * 0.174535293 ->exp:2-> val0[17]val1[453529300]
+ * 1.001745329 ->exp:0-> val0[1]val1[1745329]
+ * 1.001745329 ->exp:2-> val0[100]val1[174532900]
+ * 1.001745329 ->exp:4-> val0[10017]val1[453290000]
+ * 9.806650000 ->exp:-2-> val0[0]val1[98066500]
*/
-static void adjust_exponent_micro(int *val0, int *val1, int scale0,
+static void adjust_exponent_nano(int *val0, int *val1, int scale0,
int scale1, int exp)
{
int i;
@@ -285,32 +285,32 @@ static void adjust_exponent_micro(int *val0, int *val1, int scale0,
if (exp > 0) {
*val0 = scale0 * pow_10(exp);
res = 0;
- if (exp > 6) {
+ if (exp > 9) {
*val1 = 0;
return;
}
for (i = 0; i < exp; ++i) {
- x = scale1 / pow_10(5 - i);
+ x = scale1 / pow_10(8 - i);
res += (pow_10(exp - 1 - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ scale1 = scale1 % pow_10(8 - i);
}
*val0 += res;
*val1 = scale1 * pow_10(exp);
} else if (exp < 0) {
exp = abs(exp);
- if (exp > 6) {
+ if (exp > 9) {
*val0 = *val1 = 0;
return;
}
*val0 = scale0 / pow_10(exp);
rem = scale0 % pow_10(exp);
res = 0;
- for (i = 0; i < (6 - exp); ++i) {
- x = scale1 / pow_10(5 - i);
- res += (pow_10(5 - exp - i) * x);
- scale1 = scale1 % pow_10(5 - i);
+ for (i = 0; i < (9 - exp); ++i) {
+ x = scale1 / pow_10(8 - i);
+ res += (pow_10(8 - exp - i) * x);
+ scale1 = scale1 % pow_10(8 - i);
}
- *val1 = rem * pow_10(6 - exp) + res;
+ *val1 = rem * pow_10(9 - exp) + res;
} else {
*val0 = scale0;
*val1 = scale1;
@@ -332,14 +332,14 @@ int hid_sensor_format_scale(u32 usage_id,
unit_conversion[i].unit == attr_info->units) {
exp = hid_sensor_convert_exponent(
attr_info->unit_expo);
- adjust_exponent_micro(val0, val1,
+ adjust_exponent_nano(val0, val1,
unit_conversion[i].scale_val0,
unit_conversion[i].scale_val1, exp);
break;
}
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
EXPORT_SYMBOL(hid_sensor_format_scale);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 285a64a589d7..975a1f19f747 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -612,7 +612,7 @@ EXPORT_SYMBOL(st_sensors_sysfs_sampling_frequency_avail);
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i, len = 0;
+ int i, len = 0, q, r;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -621,8 +621,10 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
if (sdata->sensor_settings->fs.fs_avl[i].num == 0)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- sdata->sensor_settings->fs.fs_avl[i].gain);
+ q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
+ r = sdata->sensor_settings->fs.fs_avl[i].gain % 1000000;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
}
mutex_unlock(&indio_dev->mlock);
buf[len - 1] = '\n';
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index b98b9d94d184..a97e802ca523 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -335,6 +335,7 @@ static struct platform_driver hid_dev_rot_platform_driver = {
.id_table = hid_dev_rot_ids,
.driver = {
.name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
},
.probe = hid_dev_rot_probe,
.remove = hid_dev_rot_remove,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 39dd2026ccc9..f962f31a5eb2 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -123,22 +123,26 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- unsigned int buf;
+ __be16 buf16;
+ __be32 buf32;
int ret;
- ret = spi_read(data->spi, (void *) &buf, storage_bytes);
- if (ret)
- return ret;
-
switch (storage_bytes) {
case 2:
- *val = be16_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+ *val = be16_to_cpu(buf16);
break;
case 4:
- *val = be32_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+ *val = be32_to_cpu(buf32);
break;
+ default:
+ ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
/* check to be sure this is a valid reading */
if (*val & data->chip->status_bit)
return -EINVAL;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index e9b7dc037ff8..fb3fb89640e5 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -74,6 +74,7 @@ source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/hw/ocrdma/Kconfig"
source "drivers/infiniband/hw/usnic/Kconfig"
+source "drivers/infiniband/hw/hns/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
@@ -88,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1374541a4528..0f58f46dbad7 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
struct rdma_dev_addr *addr;
struct completion comp;
+ int status;
};
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct
- rdma_dev_addr));
+ if (!status)
+ memcpy(((struct resolve_cb_context *)context)->addr,
+ addr, sizeof(struct rdma_dev_addr));
+ ((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
wait_for_completion(&ctx.comp);
+ ret = ctx.status;
+ if (ret)
+ return ret;
+
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
if (!dev)
@@ -800,7 +807,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = create_singlethread_workqueue("ib_addr");
+ addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c99525512b34..71c7c4c328ef 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,6 +80,8 @@ static struct ib_cm {
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
+ /* Sync on cm change port state */
+ spinlock_t state_lock;
} cm;
/* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
+ struct list_head cm_priv_prim_list;
+ struct list_head cm_priv_altr_list;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
@@ -241,6 +245,12 @@ struct cm_id_private {
u8 service_timeout;
u8 target_ack_delay;
+ struct list_head prim_list;
+ struct list_head altr_list;
+ /* Indicates that the send port mad is registered and av is set */
+ int prim_send_port_not_ready;
+ int altr_send_port_not_ready;
+
struct list_head work_list;
atomic_t work_count;
};
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
+ struct cm_av *av;
+ unsigned long flags, flags2;
+ int ret = 0;
+ /* don't let the port to be released till the agent is down */
+ spin_lock_irqsave(&cm.state_lock, flags2);
+ spin_lock_irqsave(&cm.lock, flags);
+ if (!cm_id_priv->prim_send_port_not_ready)
+ av = &cm_id_priv->av;
+ else if (!cm_id_priv->altr_send_port_not_ready &&
+ (cm_id_priv->alt_av.port))
+ av = &cm_id_priv->alt_av;
+ else {
+ pr_info("%s: not valid CM id\n", __func__);
+ ret = -ENODEV;
+ spin_unlock_irqrestore(&cm.lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&cm.lock, flags);
+ /* Make sure the port haven't released the mad yet */
mad_agent = cm_id_priv->av.port->mad_agent;
- ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
- if (IS_ERR(ah))
- return PTR_ERR(ah);
+ if (!mad_agent) {
+ pr_info("%s: not a valid MAD agent\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+ ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
+ if (IS_ERR(ah)) {
+ ret = PTR_ERR(ah);
+ goto out;
+ }
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
- cm_id_priv->av.pkey_index,
+ av->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
- return PTR_ERR(m);
+ ret = PTR_ERR(m);
+ goto out;
}
/* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
- return 0;
+
+out:
+ spin_unlock_irqrestore(&cm.state_lock, flags2);
+ return ret;
}
static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
+static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
+ struct cm_id_private *cm_id_priv)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
- return 0;
+ spin_lock_irqsave(&cm.lock, flags);
+ if (&cm_id_priv->av == av)
+ list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
+ else if (&cm_id_priv->alt_av == av)
+ list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
+ else
+ ret = -EINVAL;
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+
+ return ret;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
+ INIT_LIST_HEAD(&cm_id_priv->prim_list);
+ INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
@@ -892,6 +945,15 @@ retest:
break;
}
+ spin_lock_irq(&cm.lock);
+ if (!list_empty(&cm_id_priv->altr_list) &&
+ (!cm_id_priv->altr_send_port_not_ready))
+ list_del(&cm_id_priv->altr_list);
+ if (!list_empty(&cm_id_priv->prim_list) &&
+ (!cm_id_priv->prim_send_port_not_ready))
+ list_del(&cm_id_priv->prim_list);
+ spin_unlock_irq(&cm.lock);
+
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
+ cm_id_priv);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
- &cm_id_priv->alt_av);
+ &cm_id_priv->alt_av, cm_id_priv);
if (ret)
goto error1;
}
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
dev_put(gid_attr.ndev);
}
work->path[0].gid_type = gid_attr.gid_type;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
}
if (ret) {
int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
goto rejected;
}
if (req_msg->alt_local_lid) {
- ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto out;
}
- ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
+ ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
- cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
+ cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
+ cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
+ ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
if (ret)
goto out;
@@ -3468,7 +3535,9 @@ out:
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
+ struct cm_av tmp_av;
unsigned long flags;
+ int tmp_send_port_not_ready;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
+ /* Swap address vector */
+ tmp_av = cm_id_priv->av;
cm_id_priv->av = cm_id_priv->alt_av;
+ cm_id_priv->alt_av = tmp_av;
+ /* Swap port send ready state */
+ tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
+ cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
+ cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
port->cm_dev = cm_dev;
port->port_num = i;
+ INIT_LIST_HEAD(&port->cm_priv_prim_list);
+ INIT_LIST_HEAD(&port->cm_priv_altr_list);
+
ret = cm_create_port_fs(port);
if (ret)
goto error1;
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct cm_device *cm_dev = client_data;
struct cm_port *port;
+ struct cm_id_private *cm_id_priv;
+ struct ib_mad_agent *cur_mad_agent;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
+ /* Mark all the cm_id's as not valid */
+ spin_lock_irq(&cm.lock);
+ list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
+ cm_id_priv->altr_send_port_not_ready = 1;
+ list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
+ cm_id_priv->prim_send_port_not_ready = 1;
+ spin_unlock_irq(&cm.lock);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue(cm.wq);
- ib_unregister_mad_agent(port->mad_agent);
+ spin_lock_irq(&cm.state_lock);
+ cur_mad_agent = port->mad_agent;
+ port->mad_agent = NULL;
+ spin_unlock_irq(&cm.state_lock);
+ ib_unregister_mad_agent(cur_mad_agent);
cm_remove_port_fs(port);
}
+
device_unregister(cm_dev->device);
kfree(cm_dev);
}
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
+ spin_lock_init(&cm.state_lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5f65a78b27c9..2a6fc47a1dfb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1094,47 +1094,47 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
}
}
-static void cma_save_ip4_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip4_info(struct sockaddr_in *src_addr,
+ struct sockaddr_in *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in *ip4;
-
if (src_addr) {
- ip4 = (struct sockaddr_in *)src_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = local_port;
+ *src_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
+ .sin_port = local_port,
+ };
}
if (dst_addr) {
- ip4 = (struct sockaddr_in *)dst_addr;
- ip4->sin_family = AF_INET;
- ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
- ip4->sin_port = hdr->port;
+ *dst_addr = (struct sockaddr_in) {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = hdr->src_addr.ip4.addr,
+ .sin_port = hdr->port,
+ };
}
}
-static void cma_save_ip6_info(struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
+static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr,
struct cma_hdr *hdr,
__be16 local_port)
{
- struct sockaddr_in6 *ip6;
-
if (src_addr) {
- ip6 = (struct sockaddr_in6 *)src_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = local_port;
+ *src_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->dst_addr.ip6,
+ .sin6_port = local_port,
+ };
}
if (dst_addr) {
- ip6 = (struct sockaddr_in6 *)dst_addr;
- ip6->sin6_family = AF_INET6;
- ip6->sin6_addr = hdr->src_addr.ip6;
- ip6->sin6_port = hdr->port;
+ *dst_addr = (struct sockaddr_in6) {
+ .sin6_family = AF_INET6,
+ .sin6_addr = hdr->src_addr.ip6,
+ .sin6_port = hdr->port,
+ };
}
}
@@ -1159,10 +1159,12 @@ static int cma_save_ip_info(struct sockaddr *src_addr,
switch (cma_get_ip_ver(hdr)) {
case 4:
- cma_save_ip4_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip4_info((struct sockaddr_in *)src_addr,
+ (struct sockaddr_in *)dst_addr, hdr, port);
break;
case 6:
- cma_save_ip6_info(src_addr, dst_addr, hdr, port);
+ cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
+ (struct sockaddr_in6 *)dst_addr, hdr, port);
break;
default:
return -EAFNOSUPPORT;
@@ -2436,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2461,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
if (addr->dev_addr.bound_dev_if) {
+ unsigned long supported_gids;
+
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
if (!ndev) {
ret = -ENODEV;
@@ -2484,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->net = &init_net;
route->path_rec->ifindex = ndev->ifindex;
- route->path_rec->gid_type = id_priv->gid_type;
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ route->path_rec->gid_type =
+ cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
}
if (!ndev) {
ret = -ENODEV;
@@ -4369,7 +4390,7 @@ static int __init cma_init(void)
{
int ret;
- cma_wq = create_singlethread_workqueue("rdma_cm");
+ cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 357624f8b9d3..5495e22839a7 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -1160,7 +1160,7 @@ static int __init iw_cm_init(void)
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
- iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
if (!iwcm_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 2d49228f28b2..40cbd6bdb73b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3160,7 +3160,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error3;
}
- port_priv->pd = ib_alloc_pd(device);
+ port_priv->pd = ib_alloc_pd(device, 0);
if (IS_ERR(port_priv->pd)) {
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd);
@@ -3177,7 +3177,7 @@ static int ib_mad_port_open(struct ib_device *device,
goto error7;
snprintf(name, sizeof name, "ib_mad%d", port_num);
- port_priv->wq = create_singlethread_workqueue(name);
+ port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!port_priv->wq) {
ret = -ENOMEM;
goto error8;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 51c79b2fb0b8..e51b739f6ea3 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -873,7 +873,7 @@ int mcast_init(void)
{
int ret;
- mcast_wq = create_singlethread_workqueue("ib_mcast");
+ mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM);
if (!mcast_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index b9bf7aa055e7..81b742ca1639 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -2015,7 +2015,7 @@ int ib_sa_init(void)
goto err2;
}
- ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
+ ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
if (!ib_nl_wq) {
ret = -ENOMEM;
goto err3;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15defefecb4f..c1fb545e8d78 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1193,7 +1193,7 @@ static ssize_t set_node_desc(struct device *device,
if (!dev->modify_device)
return -EIO;
- memcpy(desc.node_desc, buf, min_t(int, count, 64));
+ memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2825ece91d3c..9520154f1d7c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1638,7 +1638,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
- file->close_wq = create_singlethread_workqueue("ucma_close_id");
+ file->close_wq = alloc_ordered_workqueue("ucma_close_id",
+ WQ_MEM_RECLAIM);
if (!file->close_wq) {
kfree(file);
return -ENOMEM;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..84b4eff90395 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -174,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
- if (npages == 0) {
+ if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
goto out;
}
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT,
- 0, local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index f6647318138d..cb3f515a2285 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -571,7 +571,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
pd->device = ib_dev;
pd->uobject = uobj;
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
uobj->object = pd;
@@ -3078,51 +3078,102 @@ out_put:
return ret ? ret : in_len;
}
+static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
+{
+ /* Returns user space filter size, includes padding */
+ return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
+}
+
+static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
+ u16 ib_real_filter_sz)
+{
+ /*
+ * User space filter structures must be 64 bit aligned, otherwise this
+ * may pass, but we won't handle additional new attributes.
+ */
+
+ if (kern_filter_size > ib_real_filter_sz) {
+ if (memchr_inv(kern_spec_filter +
+ ib_real_filter_sz, 0,
+ kern_filter_size - ib_real_filter_sz))
+ return -EINVAL;
+ return ib_real_filter_sz;
+ }
+ return kern_filter_size;
+}
+
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ ssize_t actual_filter_sz;
+ ssize_t kern_filter_sz;
+ ssize_t ib_filter_sz;
+ void *kern_spec_mask;
+ void *kern_spec_val;
+
if (kern_spec->reserved)
return -EINVAL;
ib_spec->type = kern_spec->type;
+ kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
+ /* User flow spec size must be aligned to 4 bytes */
+ if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
+ return -EINVAL;
+
+ kern_spec_val = (void *)kern_spec +
+ sizeof(struct ib_uverbs_flow_spec_hdr);
+ kern_spec_mask = kern_spec_val + kern_filter_sz;
+
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
- ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
- if (ib_spec->eth.size != kern_spec->eth.size)
+ ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
- sizeof(struct ib_flow_eth_filter));
- memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
- sizeof(struct ib_flow_eth_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_eth);
+ memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
- ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
- if (ib_spec->ipv4.size != kern_spec->ipv4.size)
+ ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
- sizeof(struct ib_flow_ipv4_filter));
- memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
- sizeof(struct ib_flow_ipv4_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
+ memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
- ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
- if (ib_spec->ipv6.size != kern_spec->ipv6.size)
+ ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
+ return -EINVAL;
+ ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
+ memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
+
+ if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
+ (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
return -EINVAL;
- memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
- sizeof(struct ib_flow_ipv6_filter));
- memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
- sizeof(struct ib_flow_ipv6_filter));
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
- ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
- if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
+ ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
+ actual_filter_sz = spec_filter_size(kern_spec_mask,
+ kern_filter_sz,
+ ib_filter_sz);
+ if (actual_filter_sz <= 0)
return -EINVAL;
- memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
- sizeof(struct ib_flow_tcp_udp_filter));
- memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
- sizeof(struct ib_flow_tcp_udp_filter));
+ ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
+ memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
+ memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
default:
return -EINVAL;
@@ -3654,7 +3705,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_uobj;
}
- flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
+ flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
+ sizeof(union ib_flow_spec), GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
@@ -4173,6 +4225,23 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.device_cap_flags_ex = attr.device_cap_flags;
resp.response_length += sizeof(resp.device_cap_flags_ex);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
+ goto end;
+
+ resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
+ resp.rss_caps.max_rwq_indirection_tables =
+ attr.rss_caps.max_rwq_indirection_tables;
+ resp.rss_caps.max_rwq_indirection_table_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+
+ resp.response_length += sizeof(resp.rss_caps);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
+ goto end;
+
+ resp.max_wq_type_rq = attr.max_wq_type_rq;
+ resp.response_length += sizeof(resp.max_wq_type_rq);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0012fa58c105..44b1104eb168 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index f2b776efab3a..83687646da68 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -227,9 +227,11 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
* Every PD has a local_dma_lkey which can be used as the lkey value for local
* memory operations.
*/
-struct ib_pd *ib_alloc_pd(struct ib_device *device)
+struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
+ const char *caller)
{
struct ib_pd *pd;
+ int mr_access_flags = 0;
pd = device->alloc_pd(device, NULL, NULL);
if (IS_ERR(pd))
@@ -237,26 +239,46 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
pd->device = device;
pd->uobject = NULL;
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
atomic_set(&pd->usecnt, 0);
+ pd->flags = flags;
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
- else {
+ else
+ mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ pr_warn("%s: enabling unsafe global rkey\n", caller);
+ mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
+ }
+
+ if (mr_access_flags) {
struct ib_mr *mr;
- mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+ mr = pd->device->get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
- return (struct ib_pd *)mr;
+ return ERR_CAST(mr);
}
- pd->local_mr = mr;
- pd->local_dma_lkey = pd->local_mr->lkey;
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->uobject = NULL;
+ mr->need_inval = false;
+
+ pd->__internal_mr = mr;
+
+ if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
+ pd->local_dma_lkey = pd->__internal_mr->lkey;
+
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ pd->unsafe_global_rkey = pd->__internal_mr->rkey;
}
+
return pd;
}
-EXPORT_SYMBOL(ib_alloc_pd);
+EXPORT_SYMBOL(__ib_alloc_pd);
/**
* ib_dealloc_pd - Deallocates a protection domain.
@@ -270,10 +292,10 @@ void ib_dealloc_pd(struct ib_pd *pd)
{
int ret;
- if (pd->local_mr) {
- ret = ib_dereg_mr(pd->local_mr);
+ if (pd->__internal_mr) {
+ ret = pd->device->dereg_mr(pd->__internal_mr);
WARN_ON(ret);
- pd->local_mr = NULL;
+ pd->__internal_mr = NULL;
}
/* uverbs manipulates usecnt with proper locking, while the kabi
@@ -821,7 +843,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
- qp = ERR_PTR(ret);
+ return ERR_PTR(ret);
}
}
@@ -1391,29 +1413,6 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
- struct ib_mr *mr;
- int err;
-
- err = ib_check_mr_access(mr_access_flags);
- if (err)
- return ERR_PTR(err);
-
- mr = pd->device->get_dma_mr(pd, mr_access_flags);
-
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- mr->need_inval = false;
- }
-
- return mr;
-}
-EXPORT_SYMBOL(ib_get_dma_mr);
-
int ib_dereg_mr(struct ib_mr *mr)
{
struct ib_pd *pd = mr->pd;
@@ -1812,13 +1811,13 @@ EXPORT_SYMBOL(ib_set_vf_guid);
*
* Constraints:
* - The first sg element is allowed to have an offset.
- * - Each sg element must be aligned to page_size (or physically
- * contiguous to the previous element). In case an sg element has a
- * non contiguous offset, the mapping prefix will not include it.
+ * - Each sg element must either be aligned to page_size or virtually
+ * contiguous to the previous element. In case an sg element has a
+ * non-contiguous offset, the mapping prefix will not include it.
* - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped.
- * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
* constraints holds and the page_size argument is ignored.
*
* Returns the number of sg elements that were mapped to the memory region.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c0c7cf8af3f4..e7a5ed9f6f3f 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -9,3 +9,5 @@ obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
+obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 8e77dc543dd1..b3e11329801d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -36,7 +36,7 @@
#include "cxgb3_offload.h"
#include "iwch_provider.h"
-#include "iwch_user.h"
+#include <rdma/cxgb3-abi.h>
#include "iwch.h"
#include "iwch_cm.h"
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 04bbf172abde..65ee64400deb 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -2258,7 +2258,7 @@ int __init iwch_cm_init(void)
{
skb_queue_head_init(&rxq);
- workq = create_singlethread_workqueue("iw_cxgb3");
+ workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
if (!workq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 3edb80644b53..cba57bb53dba 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -58,7 +58,7 @@
#include "iwch.h"
#include "iwch_provider.h"
#include "iwch_cm.h"
-#include "iwch_user.h"
+#include <rdma/cxgb3-abi.h>
#include "common.h"
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
@@ -1396,6 +1396,7 @@ int iwch_register_device(struct iwch_dev *dev)
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
+ BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
dev->ibdev.num_comp_vectors = 1;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 71c8867ef66b..f1510cc76d2d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -4235,7 +4235,7 @@ int __init c4iw_cm_init(void)
spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq);
- workq = create_singlethread_workqueue("iw_cxgb4");
+ workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
if (!workq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ac926c942fee..19c6477af19f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -721,6 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
}
} else {
switch (CQE_OPCODE(&cqe)) {
@@ -746,6 +747,11 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
break;
case FW_RI_FAST_REGISTER:
wc->opcode = IB_WC_REG_MR;
+
+ /* Invalidate the MR if the fastreg failed */
+ if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
+ c4iw_invalidate_mr(qhp->rhp,
+ CQE_WRID_FR_STAG(&cqe));
break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index cdcf3eeb6f4a..4788e1a46fde 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -58,7 +58,7 @@
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "l2t.h"
-#include "user.h"
+#include <rdma/cxgb4-abi.h>
#define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":"
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_drain_rq(struct ib_qp *qp);
void c4iw_drain_sq(struct ib_qp *qp);
-
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0b91b0f4df71..410408f886c1 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -695,7 +695,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
- mhp->attr.state = 1;
+ mhp->attr.state = 0;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree(mhp);
return 0;
}
+
+void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
+{
+ struct c4iw_mr *mhp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rhp->lock, flags);
+ mhp = get_mhp(rhp, rkey >> 8);
+ if (mhp)
+ mhp->attr.state = 0;
+ spin_unlock_irqrestore(&rhp->lock, flags);
+}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index df127ce6b6ec..645e606a17c5 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -563,6 +563,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
+ BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 690435229be7..b7ac97b27c88 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -609,10 +609,42 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
return 0;
}
+static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
+ struct ib_reg_wr *wr, struct c4iw_mr *mhp,
+ u8 *len16)
+{
+ __be64 *p = (__be64 *)fr->pbl;
+
+ fr->r2 = cpu_to_be32(0);
+ fr->stag = cpu_to_be32(mhp->ibmr.rkey);
+
+ fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
+ FW_RI_TPTE_STAGSTATE_V(1) |
+ FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
+ FW_RI_TPTE_PDID_V(mhp->attr.pdid));
+ fr->tpte.locread_to_qpid = cpu_to_be32(
+ FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
+ FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
+ FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
+ fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
+ PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
+ fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
+ fr->tpte.len_hi = cpu_to_be32(0);
+ fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
+ fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
+ fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
+
+ p[0] = cpu_to_be64((u64)mhp->mpl[0]);
+ p[1] = cpu_to_be64((u64)mhp->mpl[1]);
+
+ *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
+}
+
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
+ struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
+ bool dsgl_supported)
{
- struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
struct fw_ri_immd *imdp;
__be64 *p;
int i;
@@ -674,8 +706,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
return 0;
}
-static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
- u8 *len16)
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0;
@@ -762,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
@@ -805,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ_WITH_INV:
fw_opcode = FW_RI_RDMA_READ_WR;
swsqe->opcode = FW_RI_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+ if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
+ c4iw_invalidate_mr(qhp->rhp,
+ wr->sg_list[0].lkey);
fw_flags = FW_RI_RDMA_READ_INVALIDATE;
- else
+ } else {
fw_flags = 0;
+ }
err = build_rdma_read(wqe, wr, &len16);
if (err)
break;
@@ -816,18 +852,33 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!qhp->wq.sq.oldest_read)
qhp->wq.sq.oldest_read = swsqe;
break;
- case IB_WR_REG_MR:
- fw_opcode = FW_RI_FR_NSMR_WR;
+ case IB_WR_REG_MR: {
+ struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
+
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
- qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
+ !mhp->attr.state && mhp->mpl_len <= 2) {
+ fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
+ build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
+ mhp, &len16);
+ } else {
+ fw_opcode = FW_RI_FR_NSMR_WR;
+ err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
+ mhp, &len16,
+ qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
+ if (err)
+ break;
+ }
+ mhp->attr.state = 1;
break;
+ }
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
fw_opcode = FW_RI_INV_LSTAG_WR;
swsqe->opcode = FW_RI_LOCAL_INV;
err = build_inv_stag(wqe, wr, &len16);
+ c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
PDBG("%s post of type=%d TBD!\n", __func__,
@@ -885,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -EINVAL;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
+ *bad_wr = wr;
return -ENOMEM;
}
while (wr) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 02173f4315fa..862381aa83c8 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -95,6 +95,7 @@ union t4_wr {
struct fw_ri_rdma_read_wr read;
struct fw_ri_bind_mw_wr bind;
struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
@@ -170,7 +171,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 nada1;
+ u32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -232,6 +233,7 @@ struct t4_cqe {
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+#define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 1e26669793c3..010c709ba3bb 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -669,6 +669,18 @@ struct fw_ri_fr_nsmr_wr {
#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
+struct fw_ri_fr_nsmr_tpte_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __u32 r2;
+ __u32 stag;
+ struct fw_ri_tpte tpte;
+ __u64 pbl[2];
+};
+
struct fw_ri_inv_lstag_wr {
__u8 opcode;
__u8 flags;
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index a26a9a0bfc41..67ea85a56945 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu)
}
mutex_unlock(&affinity->lock);
}
-
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count)
-{
- struct hfi1_affinity_node *entry;
- cpumask_var_t mask;
- int ret, i;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = cpulist_parse(buf, mask);
- if (ret)
- goto out;
-
- if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
- dd_dev_warn(dd, "Invalid CPU mask\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* reset the SDMA interrupt affinity details */
- init_cpu_mask_set(&entry->def_intr);
- cpumask_copy(&entry->def_intr.mask, mask);
-
- /* Reassign the affinity for each SDMA interrupt. */
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *msix;
-
- msix = &dd->msix_entries[i];
- if (msix->type != IRQ_SDMA)
- continue;
-
- ret = get_irq_affinity(dd, msix);
-
- if (ret)
- break;
- }
-out:
- free_cpumask_var(mask);
-unlock:
- mutex_unlock(&node_affinity.lock);
- return ret ? ret : strnlen(buf, PAGE_SIZE);
-}
-
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
-{
- struct hfi1_affinity_node *entry;
-
- mutex_lock(&node_affinity.lock);
- entry = node_affinity_lookup(dd->node);
-
- if (!entry) {
- mutex_unlock(&node_affinity.lock);
- return -EINVAL;
- }
-
- cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
- mutex_unlock(&node_affinity.lock);
- return strnlen(buf, PAGE_SIZE);
-}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index b89ea3c0ee1a..42e63316afd1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
void hfi1_put_proc_affinity(int);
-int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
-int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
- size_t count);
-
struct hfi1_affinity_node {
int node;
struct cpu_mask_set def_intr;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9bf5f23544d4..24d0820873cf 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
/* leave shared count at zero for both global and VL15 */
write_global_credit(dd, vau, vl15buf, 0);
- /* We may need some credits for another VL when sending packets
- * with the snoop interface. Dividing it down the middle for VL15
- * and VL0 should suffice.
- */
- if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
- } else {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- }
+ write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
+ << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
}
/*
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
u32 mask = ~((1U << ppd->lmc) - 1);
u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
- if (dd->hfi1_snoop.mode_flag)
- dd_dev_info(dd, "Set lid/lmc while snooping");
-
c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque)
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
-#define C_MAX_NAME 13 /* 12 chars + one for /0 */
+#define C_MAX_NAME 16 /* 15 chars + one for /0 */
static int init_cntrs(struct hfi1_devdata *dd)
{
int i, rcv_ctxts, j;
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
* Any error printing is already done by the init code.
* On return, we have the chip mapped.
*/
- ret = hfi1_pcie_ddinit(dd, pdev, ent);
+ ret = hfi1_pcie_ddinit(dd, pdev);
if (ret < 0)
goto bail_free;
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_free_cntrs;
+ init_completion(&dd->user_comp);
+
+ /* The user refcount starts with one to inidicate an active device */
+ atomic_set(&dd->user_refcount, 1);
+
goto bail;
bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 92345259a8f4..043fd21dc5f3 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -320,6 +320,9 @@
/* DC_DC8051_CFG_MODE.GENERAL bits */
#define DISABLE_SELF_GUID_CHECK 0x2
+/* Bad L2 frame error code */
+#define BAD_L2_ERR 0x6
+
/*
* Eager buffer minimum and maximum sizes supported by the hardware.
* All power-of-two sizes in between are supported as well.
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 6563e4d38b80..c5efff29c147 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
dd->rhf_offset;
struct rvt_qp *qp;
struct ib_header *hdr;
- struct ib_other_headers *ohdr;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (etype != RHF_RCV_TYPE_IB)
goto next;
- hdr = hfi1_get_msgheader(dd, rhf_addr);
+ packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
+ hdr = packet->hdr;
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &hdr->u.l.oth;
+ packet->rcv_flags |= HFI1_HAS_GRH;
+ } else {
goto next; /* just in case */
+ }
- bth1 = be32_to_cpu(ohdr->bth[1]);
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
if (!is_ecn)
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
/* turn off BECN, FECN */
bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
- ohdr->bth[1] = cpu_to_be32(bth1);
+ packet->ohdr->bth[1] = cpu_to_be32(bth1);
next:
update_ps_mdata(&mdata, rcd);
}
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet)
int process_receive_bypass(struct hfi1_packet *packet)
{
+ struct hfi1_devdata *dd = packet->rcd->dd;
+
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
- dd_dev_err(packet->rcd->dd,
+ dd_dev_err(dd,
"Bypass packets are not supported in normal operation. Dropping\n");
- incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
+ incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+ if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
+ u64 *flits = packet->ebuf;
+
+ if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+ dd->err_info_rcvport.packet_flit1 = flits[0];
+ dd->err_info_rcvport.packet_flit2 =
+ packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
+ }
+ dd->err_info_rcvport.status_and_code |=
+ (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
+ }
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 677efa0e8cd6..bd786b7bd30b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
struct hfi1_devdata,
user_cdev);
+ if (!atomic_inc_not_zero(&dd->user_refcount))
+ return -ENXIO;
+
/* Just take a ref now. Not all opens result in a context assign */
kobject_get(&dd->kobj);
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
atomic_inc(&fd->mm->mm_count);
- }
+ fp->private_data = fd;
+ } else {
+ fp->private_data = NULL;
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
- fp->private_data = fd;
+ return -ENOMEM;
+ }
- return fd ? 0 : -ENOMEM;
+ return 0;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
done:
mmdrop(fdata->mm);
kobject_put(&dd->kobj);
+
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
kfree(fdata);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7eef11b316ff..cc87fd4e534b 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -367,26 +367,6 @@ struct hfi1_packet {
u8 etype;
};
-/*
- * Private data for snoop/capture support.
- */
-struct hfi1_snoop_data {
- int mode_flag;
- struct cdev cdev;
- struct device *class_dev;
- /* protect snoop data */
- spinlock_t snoop_lock;
- struct list_head queue;
- wait_queue_head_t waitq;
- void *filter_value;
- int (*filter_callback)(void *hdr, void *data, void *value);
- u64 dcc_cfg; /* saved value of DCC Cfg register */
-};
-
-/* snoop mode_flag values */
-#define HFI1_PORT_SNOOP_MODE 1U
-#define HFI1_PORT_CAPTURE_MODE 2U
-
struct rvt_sge_state;
/*
@@ -613,8 +593,6 @@ struct hfi1_pportdata {
struct mutex hls_lock;
u32 host_link_state;
- spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
-
u32 lstate; /* logical link state */
/* these are the "32 bit" regs */
@@ -1104,8 +1082,6 @@ struct hfi1_devdata {
char *portcntrnames;
size_t portcntrnameslen;
- struct hfi1_snoop_data hfi1_snoop;
-
struct err_info_rcvport err_info_rcvport;
struct err_info_constraint err_info_rcv_constraint;
struct err_info_constraint err_info_xmit_constraint;
@@ -1141,8 +1117,8 @@ struct hfi1_devdata {
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
/*
- * Handlers for outgoing data so that snoop/capture does not
- * have to have its hooks in the send path
+ * Capability to have different send engines simply by changing a
+ * pointer value.
*/
send_routine process_pio_send;
send_routine process_dma_send;
@@ -1174,6 +1150,10 @@ struct hfi1_devdata {
spinlock_t aspm_lock;
/* Number of verbs contexts which have disabled ASPM */
atomic_t aspm_disabled_cnt;
+ /* Keeps track of user space clients */
+ atomic_t user_refcount;
+ /* Used to wait for outstanding user space clients before dev removal */
+ struct completion user_comp;
struct hfi1_affinity *affinity;
struct rhashtable sdma_rht;
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit);
extern u32 hfi1_cpulist_count;
extern unsigned long *hfi1_cpulist;
-extern unsigned int snoop_drop_send;
-extern unsigned int snoop_force_capture;
int hfi1_init(struct hfi1_devdata *, int);
int hfi1_count_units(int *npresentp, int *nupp);
int hfi1_count_active_units(void);
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
-int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count);
int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
- const struct pci_device_id *);
+int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
void hfi1_pcie_flr(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *);
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
int kdeth_process_eager(struct hfi1_packet *packet);
int process_receive_invalid(struct hfi1_packet *packet);
-extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-
void update_sge(struct rvt_sge_state *ss, u32 length);
/* global module parameter variables */
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex;
#define DRIVER_NAME "hfi1"
#define HFI1_USER_MINOR_BASE 0
#define HFI1_TRACE_MINOR 127
-#define HFI1_DIAGPKT_MINOR 128
-#define HFI1_DIAG_MINOR_BASE 129
-#define HFI1_SNOOP_CAPTURE_BASE 200
#define HFI1_NMINORS 255
#define PCI_VENDOR_ID_INTEL 0x8086
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex;
static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
u16 ctxt_type)
{
- u64 base_sc_integrity =
+ u64 base_sc_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sc_integrity =
SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sc_integrity &
- ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sc_integrity;
}
static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
{
- u64 base_sdma_integrity =
+ u64 base_sdma_integrity;
+
+ /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
+ if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
+ return 0;
+
+ base_sdma_integrity =
SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
| SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
| SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sdma_integrity &
- ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+ if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
+
+ /* turn on send-side job key checks if !A0 */
+ if (!is_ax(dd))
+ base_sdma_integrity |=
+ SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
+
return base_sdma_integrity;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 60db61536fed..e3b5bc93bc70 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd;
ppd = dd->pport + (i % dd->num_pports);
+
+ /* dd->rcd[i] gets assigned inside the callee */
rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
if (!rcd) {
dd_dev_err(dd,
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (!rcd->sc) {
dd_dev_err(dd,
"Unable to allocate kernel send context, failing\n");
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
goto nomem;
}
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
if (ret < 0) {
dd_dev_err(dd,
"Failed to setup kernel receive context, failing\n");
- sc_free(rcd->sc);
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
ret = -EFAULT;
goto bail;
}
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
nomem:
ret = -ENOMEM;
bail:
+ if (dd->rcd) {
+ for (i = 0; i < dd->num_rcv_contexts; ++i)
+ hfi1_free_ctxtdata(dd, dd->rcd[i]);
+ }
kfree(dd->rcd);
dd->rcd = NULL;
return ret;
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
dd->num_rcv_contexts - dd->first_user_ctxt)
kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
(dd->num_rcv_contexts - dd->first_user_ctxt));
- rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
+ rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
if (rcd) {
u32 rcvtids, max_entries;
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
}
rcd->eager_base = base * dd->rcv_entries.group_size;
- /* Validate and initialize Rcv Hdr Q variables */
- if (rcvhdrcnt % HDRQ_INCREMENT) {
- dd_dev_err(dd,
- "ctxt%u: header queue count %d must be divisible by %lu\n",
- rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
- goto bail;
- }
rcd->rcvhdrq_cnt = rcvhdrcnt;
rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
/*
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
mutex_init(&ppd->hls_lock);
- spin_lock_init(&ppd->sdma_alllock);
spin_lock_init(&ppd->qsfp_info.qsfp_lock);
ppd->qsfp_info.ppd = ppd;
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
+static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+{
+ if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev, "Receive header queue count too small\n");
+ return -EINVAL;
+ }
+
+ if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(dev,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ return -EINVAL;
+ }
+
+ if (thecnt % HDRQ_INCREMENT) {
+ hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
+ struct hfi1_devdata *dd;
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
HFI1_CAP_LOCK();
/* Validate some global module parameters */
- if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev, "Header queue count too small\n");
- ret = -EINVAL;
- goto bail;
- }
- if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
- ret = -EINVAL;
+ ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ if (ret)
goto bail;
- }
+
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto bail;
- /*
- * Do device-specific initialization, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_INTEL0:
- case PCI_DEVICE_ID_INTEL1:
- dd = hfi1_init_dd(pdev, ent);
- break;
- default:
+ if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
+ ent->device == PCI_DEVICE_ID_INTEL1)) {
hfi1_early_err(&pdev->dev,
"Failing on unknown Intel deviceid 0x%x\n",
ent->device);
ret = -ENODEV;
+ goto clean_bail;
}
- if (IS_ERR(dd))
+ /*
+ * Do device-specific initialization, function table setup, dd
+ * allocation, etc.
+ */
+ dd = hfi1_init_dd(pdev, ent);
+
+ if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
- if (ret)
goto clean_bail; /* error already printed */
+ }
ret = create_workqueues(dd);
if (ret)
@@ -1538,12 +1545,31 @@ bail:
return ret;
}
+static void wait_for_clients(struct hfi1_devdata *dd)
+{
+ /*
+ * Remove the device init value and complete the device if there is
+ * no clients or wait for active clients to finish.
+ */
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+
+ wait_for_completion(&dd->user_comp);
+}
+
static void remove_one(struct pci_dev *pdev)
{
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
/* close debugfs files before ib unregister */
hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+
+ /* remove the /dev hfi1 interface */
+ hfi1_device_remove(dd);
+
+ /* wait for existing user space clients to finish */
+ wait_for_clients(dd);
+
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev)
/* wait until all of our (qsfp) queue_work() calls complete */
flush_workqueue(ib_wq);
- hfi1_device_remove(dd);
-
postinit_cleanup(dd);
}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 89c68da1c273..4ac8f330c5cb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev)
* fields required to re-initialize after a chip reset, or for
* various other purposes
*/
-int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
{
unsigned long len;
resource_size_t addr;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 50a3a36d9363..d89b8745d4c1 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
void set_pio_integrity(struct send_context *sc)
{
struct hfi1_devdata *dd = sc->dd;
- u64 reg = 0;
u32 hw_context = sc->hw_context;
int type = sc->type;
- /*
- * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
- * we're snooping.
- */
- if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
- reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
-
- write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
+ write_kctxt_csr(dd, hw_context,
+ SC(CHECK_ENABLE),
+ hfi1_pkt_default_send_ctxt_mask(dd, type));
}
static u32 get_buffers_allocated(struct send_context *sc)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 8bc5013f39a1..83198a8a8797 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
+ priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
add_timer(&priv->s_rnr_timer);
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index fd39bcaa062d..9cbe52d21077 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
}
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
/*
* set_sdma_integrity
*
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
static void set_sdma_integrity(struct sdma_engine *sde)
{
struct hfi1_devdata *dd = sde->dd;
- u64 reg;
-
- if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
- return;
-
- reg = hfi1_pkt_base_sdma_integrity(dd);
-
- if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
- CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
- else
- SET_STATIC_RATE_CONTROL_SMASK(reg);
- write_sde_csr(sde, SD(CHECK_ENABLE), reg);
+ write_sde_csr(sde, SD(CHECK_ENABLE),
+ hfi1_pkt_base_sdma_integrity(dd));
}
static void init_sdma_regs(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index edba22461a9c..919a5474e651 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,7 +49,6 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
-#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
-static ssize_t show_sdma_affinity(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_get_sdma_affinity(dd, buf);
-}
-
-static ssize_t store_sdma_affinity(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return hfi1_set_sdma_affinity(dd, buf, count);
-}
-
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
- store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
- &dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 11e02b228922..f77e59fb43fe 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate,
)
);
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct ib_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
#endif /* __HFI1_TRACE_RX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a761f804111e..77697d690f3e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_extract(pq->handler,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node && !IS_ERR(rb_node))
+ if (rb_node)
node = container_of(rb_node, struct sdma_mmu_node, rb);
else
rb_node = NULL;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index f2f6b5a78e0e..4b7a16ceb362 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1441,7 +1441,8 @@ static int modify_device(struct ib_device *device,
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
+ memcpy(device->node_desc, device_modify->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
for (i = 0; i < dd->num_pports; i++) {
struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
new file mode 100644
index 000000000000..e1a6e055cd60
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -0,0 +1,10 @@
+config INFINIBAND_HNS
+ tristate "HNS RoCE Driver"
+ depends on NET_VENDOR_HISILICON
+ depends on ARM64 && HNS && HNS_DSAF && HNS_ENET
+ ---help---
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
+ is used in Hisilicon Hi1610 and more further ICT SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
new file mode 100644
index 000000000000..7e8ebd24dcae
--- /dev/null
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Hisilicon RoCE drivers.
+#
+
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
new file mode 100644
index 000000000000..24f79ee39fdf
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include "hns_roce_device.h"
+
+#define HNS_ROCE_PORT_NUM_SHIFT 24
+#define HNS_ROCE_VLAN_SL_BIT_MASK 7
+#define HNS_ROCE_VLAN_SL_SHIFT 13
+
+struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_gid_attr gid_attr;
+ struct hns_roce_ah *ah;
+ u16 vlan_tag = 0xffff;
+ struct in6_addr in6;
+ union ib_gid sgid;
+ int ret;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ /* Get mac address */
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(ah_attr->grh.dgid.raw));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, ah->av.mac);
+ else
+ memcpy(ah->av.mac, ah_attr->dmac, sizeof(ah_attr->dmac));
+
+ /* Get source gid */
+ ret = ib_get_cached_gid(ibpd->device, ah_attr->port_num,
+ ah_attr->grh.sgid_index, &sgid, &gid_attr);
+ if (ret) {
+ dev_err(dev, "get sgid failed! ret = %d\n", ret);
+ kfree(ah);
+ return ERR_PTR(ret);
+ }
+
+ if (gid_attr.ndev) {
+ if (is_vlan_dev(gid_attr.ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
+ dev_put(gid_attr.ndev);
+ }
+
+ if (vlan_tag < 0x1000)
+ vlan_tag |= (ah_attr->sl & HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+ ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | (ah_attr->port_num <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = ah_attr->grh.sgid_index;
+ ah->av.vlan = cpu_to_le16(vlan_tag);
+ dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan);
+
+ if (ah_attr->static_rate)
+ ah->av.stat_rate = IB_RATE_10_GBPS;
+
+ memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, HNS_ROCE_GID_SIZE);
+ ah->av.sl_tclass_flowlabel = cpu_to_le32(ah_attr->sl <<
+ HNS_ROCE_SL_SHIFT);
+
+ return &ah->ibah;
+}
+
+int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct hns_roce_ah *ah = to_hr_ah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+
+ ah_attr->sl = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_SL_SHIFT;
+ ah_attr->port_num = le32_to_cpu(ah->av.port_pd) >>
+ HNS_ROCE_PORT_NUM_SHIFT;
+ ah_attr->static_rate = ah->av.stat_rate;
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.traffic_class = le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
+ HNS_ROCE_TCLASS_SHIFT;
+ ah_attr->grh.flow_label = le32_to_cpu(ah->av.sl_tclass_flowlabel) &
+ HNS_ROCE_FLOW_LABLE_MASK;
+ ah_attr->grh.hop_limit = ah->av.hop_limit;
+ ah_attr->grh.sgid_index = ah->av.gid_index;
+ memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, HNS_ROCE_GID_SIZE);
+
+ return 0;
+}
+
+int hns_roce_destroy_ah(struct ib_ah *ah)
+{
+ kfree(to_hr_ah(ah));
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
new file mode 100644
index 000000000000..863a17a2de40
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+
+int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
+{
+ int ret = 0;
+
+ spin_lock(&bitmap->lock);
+ *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
+ if (*obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ *obj = find_first_zero_bit(bitmap->table, bitmap->max);
+ }
+
+ if (*obj < bitmap->max) {
+ set_bit(*obj, bitmap->table);
+ bitmap->last = (*obj + 1);
+ if (bitmap->last == bitmap->max)
+ bitmap->last = 0;
+ *obj |= bitmap->top;
+ } else {
+ ret = -1;
+ }
+
+ spin_unlock(&bitmap->lock);
+
+ return ret;
+}
+
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
+{
+ hns_roce_bitmap_free_range(bitmap, obj, 1);
+}
+
+int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
+ int align, unsigned long *obj)
+{
+ int ret = 0;
+ int i;
+
+ if (likely(cnt == 1 && align == 1))
+ return hns_roce_bitmap_alloc(bitmap, obj);
+
+ spin_lock(&bitmap->lock);
+
+ *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
+ if (*obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
+ cnt, align - 1);
+ }
+
+ if (*obj < bitmap->max) {
+ for (i = 0; i < cnt; i++)
+ set_bit(*obj + i, bitmap->table);
+
+ if (*obj == bitmap->last) {
+ bitmap->last = (*obj + cnt);
+ if (bitmap->last >= bitmap->max)
+ bitmap->last = 0;
+ }
+ *obj |= bitmap->top;
+ } else {
+ ret = -1;
+ }
+
+ spin_unlock(&bitmap->lock);
+
+ return ret;
+}
+
+void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
+ unsigned long obj, int cnt)
+{
+ int i;
+
+ obj &= bitmap->max + bitmap->reserved_top - 1;
+
+ spin_lock(&bitmap->lock);
+ for (i = 0; i < cnt; i++)
+ clear_bit(obj + i, bitmap->table);
+
+ bitmap->last = min(bitmap->last, obj);
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ spin_unlock(&bitmap->lock);
+}
+
+int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 reserved_top)
+{
+ u32 i;
+
+ if (num != roundup_pow_of_two(num))
+ return -EINVAL;
+
+ bitmap->last = 0;
+ bitmap->top = 0;
+ bitmap->max = num - reserved_top;
+ bitmap->mask = mask;
+ bitmap->reserved_top = reserved_top;
+ spin_lock_init(&bitmap->lock);
+ bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
+ GFP_KERNEL);
+ if (!bitmap->table)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved_bot; ++i)
+ set_bit(i, bitmap->table);
+
+ return 0;
+}
+
+void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
+{
+ kfree(bitmap->table);
+}
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
+ struct hns_roce_buf *buf)
+{
+ int i;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 bits_per_long = BITS_PER_LONG;
+
+ if (buf->nbufs == 1) {
+ dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
+ } else {
+ if (bits_per_long == 64)
+ vunmap(buf->direct.buf);
+
+ for (i = 0; i < buf->nbufs; ++i)
+ if (buf->page_list[i].buf)
+ dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+ buf->page_list[i].buf,
+ buf->page_list[i].map);
+ kfree(buf->page_list);
+ }
+}
+
+int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
+ struct hns_roce_buf *buf)
+{
+ int i = 0;
+ dma_addr_t t;
+ struct page **pages;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 bits_per_long = BITS_PER_LONG;
+
+ /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+ if (size <= max_direct) {
+ buf->nbufs = 1;
+ /* Npages calculated by page_size */
+ buf->npages = 1 << get_order(size);
+ buf->page_shift = PAGE_SHIFT;
+ /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
+ buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ buf->direct.map = t;
+
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
+
+ memset(buf->direct.buf, 0, size);
+ } else {
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
+ buf->page_shift = PAGE_SHIFT;
+ buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
+ GFP_KERNEL);
+
+ if (!buf->page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i) {
+ buf->page_list[i].buf = dma_alloc_coherent(dev,
+ PAGE_SIZE, &t,
+ GFP_KERNEL);
+
+ if (!buf->page_list[i].buf)
+ goto err_free;
+
+ buf->page_list[i].map = t;
+ memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ }
+ if (bits_per_long == 64) {
+ pages = kmalloc_array(buf->nbufs, sizeof(*pages),
+ GFP_KERNEL);
+ if (!pages)
+ goto err_free;
+
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
+ PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ hns_roce_buf_free(hr_dev, size, buf);
+ return -ENOMEM;
+}
+
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_qp_table(hr_dev);
+ hns_roce_cleanup_cq_table(hr_dev);
+ hns_roce_cleanup_mr_table(hr_dev);
+ hns_roce_cleanup_pd_table(hr_dev);
+ hns_roce_cleanup_uar_table(hr_dev);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
new file mode 100644
index 000000000000..2a0b6c05da5f
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/dmapool.h>
+#include <linux/platform_device.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+
+#define CMD_POLL_TOKEN 0xffff
+#define CMD_MAX_NUM 32
+#define STATUS_MASK 0xff
+#define CMD_TOKEN_MASK 0x1f
+#define GO_BIT_TIMEOUT_MSECS 10000
+
+enum {
+ HCR_TOKEN_OFFSET = 0x14,
+ HCR_STATUS_OFFSET = 0x18,
+ HCR_GO_BIT = 15,
+};
+
+static int cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
+
+ return (!!(status & (1 << HCR_GO_BIT)));
+}
+
+/* this function should be serialized with "hcr_mutex" */
+static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
+ u64 in_param, u64 out_param,
+ u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 __iomem *hcr = (u32 *)cmd->hcr;
+ int ret = -EAGAIN;
+ unsigned long end;
+ u32 val = 0;
+
+ end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
+ (int)end);
+ goto out;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ op);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+ __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val), hcr + 5);
+
+ mmiowb();
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier,
+ u8 op_modifier, u16 op, u16 token,
+ int event)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ int ret = -EAGAIN;
+
+ mutex_lock(&cmd->hcr_mutex);
+ ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op, token,
+ event);
+ mutex_unlock(&cmd->hcr_mutex);
+
+ return ret;
+}
+
+/* this should be called with "poll_sem" */
+static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 __iomem *hcr = hr_dev->cmd.hcr;
+ unsigned long end = 0;
+ u32 status = 0;
+ int ret;
+
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ CMD_POLL_TOKEN, 0);
+ if (ret) {
+ dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
+ goto out;
+ }
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (cmd_pending(hr_dev)) {
+ dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ status = le32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET));
+ if ((status & STATUS_MASK) != 0x1) {
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ ret = -EBUSY;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op, unsigned long timeout)
+{
+ int ret;
+
+ down(&hr_dev->cmd.poll_sem);
+ ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, timeout);
+ up(&hr_dev->cmd.poll_sem);
+
+ return ret;
+}
+
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param)
+{
+ struct hns_roce_cmd_context
+ *context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask];
+
+ if (token != context->token)
+ return;
+
+ context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO);
+ context->out_param = out_param;
+ complete(&context->done);
+}
+
+/* this should be called with "use_events" */
+static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ struct hns_roce_cmdq *cmd = &hr_dev->cmd;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_context *context;
+ int ret = 0;
+
+ spin_lock(&cmd->context_lock);
+ WARN_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ context->token, 1);
+ if (ret)
+ goto out;
+
+ /*
+ * It is timeout when wait_for_completion_timeout return 0
+ * The return value is the time limit set in advance
+ * how many seconds showing
+ */
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = context->result;
+ if (ret) {
+ dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ return ret;
+}
+
+static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, unsigned long in_modifier,
+ u8 op_modifier, u16 op, unsigned long timeout)
+{
+ int ret = 0;
+
+ down(&hr_dev->cmd.event_sem);
+ ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op, timeout);
+ up(&hr_dev->cmd.event_sem);
+
+ return ret;
+}
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout)
+{
+ if (hr_dev->cmd.use_events)
+ return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
+ else
+ return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+ in_modifier, op_modifier, op,
+ timeout);
+}
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ mutex_init(&hr_dev->cmd.hcr_mutex);
+ sema_init(&hr_dev->cmd.poll_sem, 1);
+ hr_dev->cmd.use_events = 0;
+ hr_dev->cmd.toggle = 1;
+ hr_dev->cmd.max_cmds = CMD_MAX_NUM;
+ hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
+ hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
+ HNS_ROCE_MAILBOX_SIZE,
+ HNS_ROCE_MAILBOX_SIZE, 0);
+ if (!hr_dev->cmd.pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev)
+{
+ dma_pool_destroy(hr_dev->cmd.pool);
+}
+
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+ int i;
+
+ hr_cmd->context = kmalloc(hr_cmd->max_cmds *
+ sizeof(struct hns_roce_cmd_context),
+ GFP_KERNEL);
+ if (!hr_cmd->context)
+ return -ENOMEM;
+
+ for (i = 0; i < hr_cmd->max_cmds; ++i) {
+ hr_cmd->context[i].token = i;
+ hr_cmd->context[i].next = i + 1;
+ }
+
+ hr_cmd->context[hr_cmd->max_cmds - 1].next = -1;
+ hr_cmd->free_head = 0;
+
+ sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds);
+ spin_lock_init(&hr_cmd->context_lock);
+
+ hr_cmd->token_mask = CMD_TOKEN_MASK;
+ hr_cmd->use_events = 1;
+
+ down(&hr_cmd->poll_sem);
+
+ return 0;
+}
+
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
+ int i;
+
+ hr_cmd->use_events = 0;
+
+ for (i = 0; i < hr_cmd->max_cmds; ++i)
+ down(&hr_cmd->event_sem);
+
+ kfree(hr_cmd->context);
+ up(&hr_cmd->poll_sem);
+}
+
+struct hns_roce_cmd_mailbox
+ *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+
+ mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
+ if (!mailbox)
+ return ERR_PTR(-ENOMEM);
+
+ mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL,
+ &mailbox->dma);
+ if (!mailbox->buf) {
+ kfree(mailbox);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mailbox;
+}
+
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox)
+{
+ if (!mailbox)
+ return;
+
+ dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
+ kfree(mailbox);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
new file mode 100644
index 000000000000..e3997d312c55
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_CMD_H
+#define _HNS_ROCE_CMD_H
+
+#define HNS_ROCE_MAILBOX_SIZE 4096
+
+enum {
+ /* TPT commands */
+ HNS_ROCE_CMD_SW2HW_MPT = 0xd,
+ HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+
+ /* CQ commands */
+ HNS_ROCE_CMD_SW2HW_CQ = 0x16,
+ HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+
+ /* QP/EE commands */
+ HNS_ROCE_CMD_RST2INIT_QP = 0x19,
+ HNS_ROCE_CMD_INIT2RTR_QP = 0x1a,
+ HNS_ROCE_CMD_RTR2RTS_QP = 0x1b,
+ HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
+ HNS_ROCE_CMD_2ERR_QP = 0x1e,
+ HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
+ HNS_ROCE_CMD_2RST_QP = 0x21,
+ HNS_ROCE_CMD_QUERY_QP = 0x22,
+};
+
+enum {
+ HNS_ROCE_CMD_TIME_CLASS_A = 10000,
+ HNS_ROCE_CMD_TIME_CLASS_B = 10000,
+ HNS_ROCE_CMD_TIME_CLASS_C = 10000,
+};
+
+struct hns_roce_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
+int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout);
+
+struct hns_roce_cmd_mailbox
+ *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
+void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox);
+
+#endif /* _HNS_ROCE_CMD_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
new file mode 100644
index 000000000000..297016103aa7
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_COMMON_H
+#define _HNS_ROCE_COMMON_H
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
+#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
+#define roce_raw_write(value, addr) \
+ __raw_writel((__force u32)cpu_to_le32(value), (addr))
+
+#define roce_get_field(origin, mask, shift) \
+ (((origin) & (mask)) >> (shift))
+
+#define roce_get_bit(origin, shift) \
+ roce_get_field((origin), (1ul << (shift)), (shift))
+
+#define roce_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((u32)(val) << (shift)) & (mask)); \
+ } while (0)
+
+#define roce_set_bit(origin, shift, val) \
+ roce_set_field((origin), (1ul << (shift)), (shift), (val))
+
+#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
+#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
+
+#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
+
+#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
+
+#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
+#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
+ (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
+
+#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
+
+#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
+#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
+ (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
+#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
+ (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
+#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
+ (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
+
+#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
+#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
+ (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
+
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
+ (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
+
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
+#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
+ (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
+
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
+ (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
+
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
+#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
+ (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
+
+#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
+#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
+ (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
+ (((1UL << 15) - 1) << \
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
+ (((1UL << 4) - 1) << \
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
+
+#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
+
+#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
+#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
+
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
+#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
+
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
+
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
+#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
+ (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
+ (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
+ (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
+
+#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
+
+#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
+#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
+ (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
+
+#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
+#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
+ (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
+
+#define ROCEE_MB6_ROCEE_MB_CMD_S 0
+#define ROCEE_MB6_ROCEE_MB_CMD_M \
+ (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
+
+#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
+#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
+ (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
+
+#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
+
+#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
+
+#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
+#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
+ (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
+ (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
+ (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
+ (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
+
+#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
+
+#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
+#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
+ (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
+
+#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
+#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
+ (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
+ (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
+ (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
+
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
+ (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
+
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
+#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
+ (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
+
+#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
+#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
+ (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
+
+#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
+#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
+#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
+
+#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
+#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
+
+#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
+
+#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
+#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
+ (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
+
+#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
+#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
+ (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
+
+#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
+#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
+ (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
+
+/*************ROCEE_REG DEFINITION****************/
+#define ROCEE_VENDOR_ID_REG 0x0
+#define ROCEE_VENDOR_PART_ID_REG 0x4
+
+#define ROCEE_HW_VERSION_REG 0x8
+
+#define ROCEE_SYS_IMAGE_GUID_L_REG 0xC
+#define ROCEE_SYS_IMAGE_GUID_H_REG 0x10
+
+#define ROCEE_PORT_GID_L_0_REG 0x50
+#define ROCEE_PORT_GID_ML_0_REG 0x54
+#define ROCEE_PORT_GID_MH_0_REG 0x58
+#define ROCEE_PORT_GID_H_0_REG 0x5C
+
+#define ROCEE_BT_CMD_H_REG 0x204
+
+#define ROCEE_SMAC_L_0_REG 0x240
+#define ROCEE_SMAC_H_0_REG 0x244
+
+#define ROCEE_QP1C_CFG3_0_REG 0x27C
+
+#define ROCEE_CAEP_AEQE_CONS_IDX_REG 0x3AC
+#define ROCEE_CAEP_CEQC_CONS_IDX_0_REG 0x3BC
+
+#define ROCEE_ECC_UCERR_ALM1_REG 0xB38
+#define ROCEE_ECC_UCERR_ALM2_REG 0xB3C
+#define ROCEE_ECC_CERR_ALM1_REG 0xB44
+#define ROCEE_ECC_CERR_ALM2_REG 0xB48
+
+#define ROCEE_ACK_DELAY_REG 0x14
+#define ROCEE_GLB_CFG_REG 0x18
+
+#define ROCEE_DMAE_USER_CFG1_REG 0x40
+#define ROCEE_DMAE_USER_CFG2_REG 0x44
+
+#define ROCEE_DB_SQ_WL_REG 0x154
+#define ROCEE_DB_OTHERS_WL_REG 0x158
+#define ROCEE_RAQ_WL_REG 0x15C
+#define ROCEE_WRMS_POL_TIME_INTERVAL_REG 0x160
+#define ROCEE_EXT_DB_SQ_REG 0x164
+#define ROCEE_EXT_DB_SQ_H_REG 0x168
+#define ROCEE_EXT_DB_OTH_REG 0x16C
+
+#define ROCEE_EXT_DB_OTH_H_REG 0x170
+#define ROCEE_EXT_DB_SQ_WL_EMPTY_REG 0x174
+#define ROCEE_EXT_DB_SQ_WL_REG 0x178
+#define ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG 0x17C
+#define ROCEE_EXT_DB_OTHERS_WL_REG 0x180
+#define ROCEE_EXT_RAQ_REG 0x184
+#define ROCEE_EXT_RAQ_H_REG 0x188
+
+#define ROCEE_CAEP_CE_INTERVAL_CFG_REG 0x190
+#define ROCEE_CAEP_CE_BURST_NUM_CFG_REG 0x194
+#define ROCEE_BT_CMD_L_REG 0x200
+
+#define ROCEE_MB1_REG 0x210
+#define ROCEE_DB_SQ_L_0_REG 0x230
+#define ROCEE_DB_OTHERS_L_0_REG 0x238
+#define ROCEE_QP1C_CFG0_0_REG 0x270
+
+#define ROCEE_CAEP_AEQC_AEQE_SHIFT_REG 0x3A0
+#define ROCEE_CAEP_CEQC_SHIFT_0_REG 0x3B0
+#define ROCEE_CAEP_CE_IRQ_MASK_0_REG 0x3C0
+#define ROCEE_CAEP_CEQ_ALM_OVF_0_REG 0x3C4
+#define ROCEE_CAEP_AE_MASK_REG 0x6C8
+#define ROCEE_CAEP_AE_ST_REG 0x6CC
+
+#define ROCEE_SDB_ISSUE_PTR_REG 0x758
+#define ROCEE_SDB_SEND_PTR_REG 0x75C
+#define ROCEE_SDB_INV_CNT_REG 0x9A4
+#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
+#define ROCEE_ECC_CERR_ALM0_REG 0xB40
+
+#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
new file mode 100644
index 000000000000..097365932b09
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_user.h"
+#include "hns_roce_common.h"
+
+static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
+{
+ struct ib_cq *ibcq = &hr_cq->ib_cq;
+
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
+ enum hns_roce_event event_type)
+{
+ struct hns_roce_dev *hr_dev;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+
+ ibcq = &hr_cq->ib_cq;
+ hr_dev = to_hr_dev(ibcq->device);
+
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(&hr_dev->pdev->dev,
+ "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
+ event_type, hr_cq->cqn);
+ return;
+ }
+
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.event = IB_EVENT_CQ_ERR;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+}
+
+static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long cq_num)
+{
+ return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
+ HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
+}
+
+static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
+ struct hns_roce_mtt *hr_mtt,
+ struct hns_roce_uar *hr_uar,
+ struct hns_roce_cq *hr_cq, int vector)
+{
+ struct hns_roce_cmd_mailbox *mailbox = NULL;
+ struct hns_roce_cq_table *cq_table = NULL;
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t dma_handle;
+ u64 *mtts = NULL;
+ int ret = 0;
+
+ cq_table = &hr_dev->cq_table;
+
+ /* Get the physical address of cq buf */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_mtt->first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
+ return -EINVAL;
+ }
+
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "CQ alloc.Invalid vector.\n");
+ return -EINVAL;
+ }
+ hr_cq->vector = vector;
+
+ ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
+ if (ret == -1) {
+ dev_err(dev, "CQ alloc.Failed to alloc index.\n");
+ return -ENOMEM;
+ }
+
+ /* Get CQC memory HEM(Hardware Entry Memory) table */
+ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ goto err_out;
+ }
+
+ /* The cq insert radix tree */
+ spin_lock_irq(&cq_table->lock);
+ /* Radix_tree: The associated pointer and long integer key value like */
+ ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
+ spin_unlock_irq(&cq_table->lock);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
+ goto err_put;
+ }
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_radix;
+ }
+
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
+ nent, vector);
+
+ /* Send mailbox to hw */
+ ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ goto err_radix;
+ }
+
+ hr_cq->cons_index = 0;
+ hr_cq->uar = hr_uar;
+
+ atomic_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
+ return 0;
+
+err_radix:
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, hr_cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+err_put:
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+
+err_out:
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+ return ret;
+}
+
+static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long cq_num)
+{
+ return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+}
+
+static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret;
+
+ ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ if (ret)
+ dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ hr_cq->cqn);
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+
+ spin_lock_irq(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, hr_cq->cqn);
+ spin_unlock_irq(&cq_table->lock);
+
+ hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
+ hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+}
+
+static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
+ struct ib_ucontext *context,
+ struct hns_roce_cq_buf *buf,
+ struct ib_umem **umem, u64 buf_addr, int cqe)
+{
+ int ret;
+
+ *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(*umem))
+ return PTR_ERR(*umem);
+
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
+ ilog2((unsigned int)(*umem)->page_size),
+ &buf->hr_mtt);
+ if (ret)
+ goto err_buf;
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ if (ret)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+
+err_buf:
+ ib_umem_release(*umem);
+ return ret;
+}
+
+static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq_buf *buf, u32 nent)
+{
+ int ret;
+
+ ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
+ PAGE_SIZE * 2, &buf->hr_buf);
+ if (ret)
+ goto out;
+
+ ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
+ buf->hr_buf.page_shift, &buf->hr_mtt);
+ if (ret)
+ goto err_buf;
+
+ ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ if (ret)
+ goto err_mtt;
+
+ return 0;
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+
+err_buf:
+ hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
+ &buf->hr_buf);
+out:
+ return ret;
+}
+
+static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq_buf *buf, int cqe)
+{
+ hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
+ &buf->hr_buf);
+}
+
+struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_ib_create_cq ucmd;
+ struct hns_roce_cq *hr_cq = NULL;
+ struct hns_roce_uar *uar = NULL;
+ int vector = attr->comp_vector;
+ int cq_entries = attr->cqe;
+ int ret = 0;
+
+ if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
+ dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ cq_entries, hr_dev->caps.max_cqes);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
+ if (!hr_cq)
+ return ERR_PTR(-ENOMEM);
+
+ /* In v1 engine, parameter verification */
+ if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
+ cq_entries = HNS_ROCE_MIN_CQE_NUM;
+
+ cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1;
+ spin_lock_init(&hr_cq->lock);
+
+ if (context) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ dev_err(dev, "Failed to copy_from_udata.\n");
+ ret = -EFAULT;
+ goto err_cq;
+ }
+
+ /* Get user space address, write it into mtt table */
+ ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
+ &hr_cq->umem, ucmd.buf_addr,
+ cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to get_cq_umem.\n");
+ goto err_cq;
+ }
+
+ /* Get user space parameters */
+ uar = &to_hr_ucontext(context)->uar;
+ } else {
+ /* Init mmt table and write buff address to mtt table */
+ ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
+ cq_entries);
+ if (ret) {
+ dev_err(dev, "Failed to alloc_cq_buf.\n");
+ goto err_cq;
+ }
+
+ uar = &hr_dev->priv_uar;
+ hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
+ 0x1000 * uar->index;
+ }
+
+ /* Allocate cq index, fill cq_context */
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+ hr_cq, vector);
+ if (ret) {
+ dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ goto err_mtt;
+ }
+
+ /* Get created cq handler and carry out event */
+ hr_cq->comp = hns_roce_ib_cq_comp;
+ hr_cq->event = hns_roce_ib_cq_event;
+ hr_cq->cq_depth = cq_entries;
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
+ ret = -EFAULT;
+ goto err_cqc;
+ }
+ }
+
+ return &hr_cq->ib_cq;
+
+err_cqc:
+ hns_roce_free_cq(hr_dev, hr_cq);
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ if (context)
+ ib_umem_release(hr_cq->umem);
+ else
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
+ hr_cq->ib_cq.cqe);
+
+err_cq:
+ kfree(hr_cq);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+
+ hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+
+ if (ib_cq->uobject)
+ ib_umem_release(hr_cq->umem);
+ else
+ /* Free the buff of stored cq */
+ hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+
+ kfree(hr_cq);
+
+ return 0;
+}
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cq *cq;
+
+ cq = radix_tree_lookup(&hr_dev->cq_table.tree,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!cq) {
+ dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ return;
+ }
+
+ cq->comp(cq);
+}
+
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cq *cq;
+
+ cq = radix_tree_lookup(&cq_table->tree,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ if (!cq) {
+ dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, (enum hns_roce_event)event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
+ return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
+ hr_dev->caps.num_cqs - 1,
+ hr_dev->caps.reserved_cqs, 0);
+}
+
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
new file mode 100644
index 000000000000..341731553a60
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -0,0 +1,728 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_DEVICE_H
+#define _HNS_ROCE_DEVICE_H
+
+#include <rdma/ib_verbs.h>
+
+#define DRV_NAME "hns_roce"
+
+#define MAC_ADDR_OCTET_NUM 6
+#define HNS_ROCE_MAX_MSG_LEN 0x80000000
+
+#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+
+#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
+
+#define HNS_ROCE_BA_SIZE (32 * 4096)
+
+/* Hardware specification only for v1 engine */
+#define HNS_ROCE_MIN_CQE_NUM 0x40
+#define HNS_ROCE_MIN_WQE_NUM 0x20
+
+/* Hardware specification only for v1 engine */
+#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
+#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
+
+#define HNS_ROCE_MAX_IRQ_NUM 34
+
+#define HNS_ROCE_COMP_VEC_NUM 32
+
+#define HNS_ROCE_AEQE_VEC_NUM 1
+#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+
+/* 4G/4K = 1M */
+#define HNS_ROCE_SL_SHIFT 28
+#define HNS_ROCE_TCLASS_SHIFT 20
+#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
+
+#define HNS_ROCE_MAX_PORTS 6
+#define HNS_ROCE_MAX_GID_NUM 16
+#define HNS_ROCE_GID_SIZE 16
+
+#define MR_TYPE_MR 0x00
+#define MR_TYPE_DMA 0x03
+
+#define PKEY_ID 0xffff
+#define GUID_LEN 8
+#define NODE_DESC_SIZE 64
+#define DB_REG_OFFSET 0x1000
+
+#define SERV_TYPE_RC 0
+#define SERV_TYPE_RD 1
+#define SERV_TYPE_UC 2
+#define SERV_TYPE_UD 3
+
+#define PAGES_SHIFT_8 8
+#define PAGES_SHIFT_16 16
+#define PAGES_SHIFT_24 24
+#define PAGES_SHIFT_32 32
+
+enum hns_roce_qp_state {
+ HNS_ROCE_QP_STATE_RST,
+ HNS_ROCE_QP_STATE_INIT,
+ HNS_ROCE_QP_STATE_RTR,
+ HNS_ROCE_QP_STATE_RTS,
+ HNS_ROCE_QP_STATE_SQD,
+ HNS_ROCE_QP_STATE_ERR,
+ HNS_ROCE_QP_NUM_STATE,
+};
+
+enum hns_roce_event {
+ HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
+ HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
+ HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
+ HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
+ HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
+ HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
+ HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
+ HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
+ HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
+ HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
+ HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
+ HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
+ HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
+ /* 0x10 and 0x11 is unused in currently application case */
+ HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
+ HNS_ROCE_EVENT_TYPE_MB = 0x13,
+ HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+};
+
+/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
+enum {
+ HNS_ROCE_LWQCE_QPC_ERROR = 1,
+ HNS_ROCE_LWQCE_MTU_ERROR = 2,
+ HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
+ HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
+ HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
+ HNS_ROCE_LWQCE_SL_ERROR = 6,
+ HNS_ROCE_LWQCE_PORT_ERROR = 7,
+};
+
+/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
+enum {
+ HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
+ HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
+ HNS_ROCE_LAVWQE_VA_ERROR = 3,
+ HNS_ROCE_LAVWQE_PD_ERROR = 4,
+ HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
+ HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
+ HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
+};
+
+/* DOORBELL overflow subtype */
+enum {
+ HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
+ HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
+ HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
+ HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
+};
+
+enum {
+ /* RQ&SRQ related operations */
+ HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
+ HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
+};
+
+#define HNS_ROCE_CMD_SUCCESS 1
+
+#define HNS_ROCE_PORT_DOWN 0
+#define HNS_ROCE_PORT_UP 1
+
+#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
+
+#define PAGE_ADDR_SHIFT 12
+
+struct hns_roce_uar {
+ u64 pfn;
+ unsigned long index;
+};
+
+struct hns_roce_ucontext {
+ struct ib_ucontext ibucontext;
+ struct hns_roce_uar uar;
+};
+
+struct hns_roce_pd {
+ struct ib_pd ibpd;
+ unsigned long pdn;
+};
+
+struct hns_roce_bitmap {
+ /* Bitmap Traversal last a bit which is 1 */
+ unsigned long last;
+ unsigned long top;
+ unsigned long max;
+ unsigned long reserved_top;
+ unsigned long mask;
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
+/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
+/* Every bit repesent to a partner free/used status in bitmap */
+/*
+* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
+* Bit = 1 represent to idle and available; bit = 0: not available
+*/
+struct hns_roce_buddy {
+ /* Members point to every order level bitmap */
+ unsigned long **bits;
+ /* Represent to avail bits of the order level bitmap */
+ u32 *num_free;
+ int max_order;
+ spinlock_t lock;
+};
+
+/* For Hardware Entry Memory */
+struct hns_roce_hem_table {
+ /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
+ u32 type;
+ /* HEM array elment num */
+ unsigned long num_hem;
+ /* HEM entry record obj total num */
+ unsigned long num_obj;
+ /*Single obj size */
+ unsigned long obj_size;
+ int lowmem;
+ struct mutex mutex;
+ struct hns_roce_hem **hem;
+};
+
+struct hns_roce_mtt {
+ unsigned long first_seg;
+ int order;
+ int page_shift;
+};
+
+/* Only support 4K page size for mr register */
+#define MR_SIZE_4K 0
+
+struct hns_roce_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ u64 iova; /* MR's virtual orignal addr */
+ u64 size; /* Address range of MR */
+ u32 key; /* Key of MR */
+ u32 pd; /* PD num of MR */
+ u32 access;/* Access permission of MR */
+ int enabled; /* MR's active status */
+ int type; /* MR's register type */
+ u64 *pbl_buf;/* MR's PBL space */
+ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
+};
+
+struct hns_roce_mr_table {
+ struct hns_roce_bitmap mtpt_bitmap;
+ struct hns_roce_buddy mtt_buddy;
+ struct hns_roce_hem_table mtt_table;
+ struct hns_roce_hem_table mtpt_table;
+};
+
+struct hns_roce_wq {
+ u64 *wrid; /* Work request ID */
+ spinlock_t lock;
+ int wqe_cnt; /* WQE num */
+ u32 max_post;
+ int max_gs;
+ int offset;
+ int wqe_shift;/* WQE size */
+ u32 head;
+ u32 tail;
+ void __iomem *db_reg_l;
+};
+
+struct hns_roce_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+struct hns_roce_buf {
+ struct hns_roce_buf_list direct;
+ struct hns_roce_buf_list *page_list;
+ int nbufs;
+ u32 npages;
+ int page_shift;
+};
+
+struct hns_roce_cq_buf {
+ struct hns_roce_buf hr_buf;
+ struct hns_roce_mtt hr_mtt;
+};
+
+struct hns_roce_cq {
+ struct ib_cq ib_cq;
+ struct hns_roce_cq_buf hr_buf;
+ spinlock_t lock;
+ struct ib_umem *umem;
+ void (*comp)(struct hns_roce_cq *);
+ void (*event)(struct hns_roce_cq *, enum hns_roce_event);
+
+ struct hns_roce_uar *uar;
+ u32 cq_depth;
+ u32 cons_index;
+ void __iomem *cq_db_l;
+ void __iomem *tptr_addr;
+ unsigned long cqn;
+ u32 vector;
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct hns_roce_srq {
+ struct ib_srq ibsrq;
+ int srqn;
+};
+
+struct hns_roce_uar_table {
+ struct hns_roce_bitmap bitmap;
+};
+
+struct hns_roce_qp_table {
+ struct hns_roce_bitmap bitmap;
+ spinlock_t lock;
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+};
+
+struct hns_roce_cq_table {
+ struct hns_roce_bitmap bitmap;
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct hns_roce_hem_table table;
+};
+
+struct hns_roce_raq_table {
+ struct hns_roce_buf_list *e_raq_buf;
+};
+
+struct hns_roce_av {
+ __le32 port_pd;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ __le32 sl_tclass_flowlabel;
+ u8 dgid[HNS_ROCE_GID_SIZE];
+ u8 mac[6];
+ __le16 vlan;
+};
+
+struct hns_roce_ah {
+ struct ib_ah ibah;
+ struct hns_roce_av av;
+};
+
+struct hns_roce_cmd_context {
+ struct completion done;
+ int result;
+ int next;
+ u64 out_param;
+ u16 token;
+};
+
+struct hns_roce_cmdq {
+ struct dma_pool *pool;
+ u8 __iomem *hcr;
+ struct mutex hcr_mutex;
+ struct semaphore poll_sem;
+ /*
+ * Event mode: cmd register mutex protection,
+ * ensure to not exceed max_cmds and user use limit region
+ */
+ struct semaphore event_sem;
+ int max_cmds;
+ spinlock_t context_lock;
+ int free_head;
+ struct hns_roce_cmd_context *context;
+ /*
+ * Result of get integer part
+ * which max_comds compute according a power of 2
+ */
+ u16 token_mask;
+ /*
+ * Process whether use event mode, init default non-zero
+ * After the event queue of cmd event ready,
+ * can switch into event mode
+ * close device, switch into poll mode(non event mode)
+ */
+ u8 use_events;
+ u8 toggle;
+};
+
+struct hns_roce_dev;
+
+struct hns_roce_qp {
+ struct ib_qp ibqp;
+ struct hns_roce_buf hr_buf;
+ struct hns_roce_wq rq;
+ __le64 doorbell_qpn;
+ __le32 sq_signal_bits;
+ u32 sq_next_wqe;
+ int sq_max_wqes_per_wr;
+ int sq_spare_wqes;
+ struct hns_roce_wq sq;
+
+ struct ib_umem *umem;
+ struct hns_roce_mtt mtt;
+ u32 buff_size;
+ struct mutex mutex;
+ u8 port;
+ u8 phy_port;
+ u8 sl;
+ u8 resp_depth;
+ u8 state;
+ u32 access_flags;
+ u32 pkey_index;
+ void (*event)(struct hns_roce_qp *,
+ enum hns_roce_event);
+ unsigned long qpn;
+
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct hns_roce_sqp {
+ struct hns_roce_qp hr_qp;
+};
+
+struct hns_roce_ib_iboe {
+ spinlock_t lock;
+ struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
+ struct notifier_block nb;
+ struct notifier_block nb_inet;
+ /* 16 GID is shared by 6 port in v1 engine. */
+ union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
+ u8 phy_port[HNS_ROCE_MAX_PORTS];
+};
+
+struct hns_roce_eq {
+ struct hns_roce_dev *hr_dev;
+ void __iomem *doorbell;
+
+ int type_flag;/* Aeq:1 ceq:0 */
+ int eqn;
+ u32 entries;
+ int log_entries;
+ int eqe_size;
+ int irq;
+ int log_page_size;
+ int cons_index;
+ struct hns_roce_buf_list *buf_list;
+};
+
+struct hns_roce_eq_table {
+ struct hns_roce_eq *eq;
+ void __iomem **eqc_base;
+};
+
+struct hns_roce_caps {
+ u8 num_ports;
+ int gid_table_len[HNS_ROCE_MAX_PORTS];
+ int pkey_table_len[HNS_ROCE_MAX_PORTS];
+ int local_ca_ack_delay;
+ int num_uars;
+ u32 phy_num_uars;
+ u32 max_sq_sg; /* 2 */
+ u32 max_sq_inline; /* 32 */
+ u32 max_rq_sg; /* 2 */
+ int num_qps; /* 256k */
+ u32 max_wqes; /* 16k */
+ u32 max_sq_desc_sz; /* 64 */
+ u32 max_rq_desc_sz; /* 64 */
+ int max_qp_init_rdma;
+ int max_qp_dest_rdma;
+ int num_cqs;
+ int max_cqes;
+ int reserved_cqs;
+ int num_aeq_vectors; /* 1 */
+ int num_comp_vectors; /* 32 ceq */
+ int num_other_vectors;
+ int num_mtpts;
+ u32 num_mtt_segs;
+ int reserved_mrws;
+ int reserved_uars;
+ int num_pds;
+ int reserved_pds;
+ u32 mtt_entry_sz;
+ u32 cq_entry_sz;
+ u32 page_size_cap;
+ u32 reserved_lkey;
+ int mtpt_entry_sz;
+ int qpc_entry_sz;
+ int irrl_entry_sz;
+ int cqc_entry_sz;
+ int aeqe_depth;
+ int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ enum ib_mtu max_mtu;
+};
+
+struct hns_roce_hw {
+ int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
+ void (*hw_profile)(struct hns_roce_dev *hr_dev);
+ int (*hw_init)(struct hns_roce_dev *hr_dev);
+ void (*hw_exit)(struct hns_roce_dev *hr_dev);
+ void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid);
+ void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu);
+ int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx);
+ void (*write_cqc)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+ dma_addr_t dma_handle, int nent, u32 vector);
+ int (*clear_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj);
+ int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+ int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+ int (*destroy_qp)(struct ib_qp *ibqp);
+ int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+ int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+ int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+ void *priv;
+};
+
+struct hns_roce_dev {
+ struct ib_device ib_dev;
+ struct platform_device *pdev;
+ struct hns_roce_uar priv_uar;
+ const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
+ spinlock_t sm_lock;
+ spinlock_t bt_cmd_lock;
+ struct hns_roce_ib_iboe iboe;
+
+ int irq[HNS_ROCE_MAX_IRQ_NUM];
+ u8 __iomem *reg_base;
+ struct hns_roce_caps caps;
+ struct radix_tree_root qp_table_tree;
+
+ unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
+ u64 sys_image_guid;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_rev;
+ void __iomem *priv_addr;
+
+ struct hns_roce_cmdq cmd;
+ struct hns_roce_bitmap pd_bitmap;
+ struct hns_roce_uar_table uar_table;
+ struct hns_roce_mr_table mr_table;
+ struct hns_roce_cq_table cq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
+
+ int cmd_mod;
+ int loop_idc;
+ struct hns_roce_hw *hw;
+};
+
+static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
+{
+ return container_of(ib_dev, struct hns_roce_dev, ib_dev);
+}
+
+static inline struct hns_roce_ucontext
+ *to_hr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
+}
+
+static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct hns_roce_pd, ibpd);
+}
+
+static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct hns_roce_ah, ibah);
+}
+
+static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct hns_roce_mr, ibmr);
+}
+
+static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct hns_roce_qp, ibqp);
+}
+
+static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
+{
+ return container_of(ib_cq, struct hns_roce_cq, ib_cq);
+}
+
+static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct hns_roce_srq, ibsrq);
+}
+
+static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
+{
+ return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
+}
+
+static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
+{
+ __raw_writeq(*(u64 *) val, dest);
+}
+
+static inline struct hns_roce_qp
+ *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
+{
+ return radix_tree_lookup(&hr_dev->qp_table_tree,
+ qpn & (hr_dev->caps.num_qps - 1));
+}
+
+static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
+{
+ u32 bits_per_long_val = BITS_PER_LONG;
+
+ if (bits_per_long_val == 64 || buf->nbufs == 1)
+ return (char *)(buf->direct.buf) + offset;
+ else
+ return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
+ (offset & (PAGE_SIZE - 1));
+}
+
+int hns_roce_init_uar_table(struct hns_roce_dev *dev);
+int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
+void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
+void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
+
+int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
+ u64 out_param);
+int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
+void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
+
+int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
+ struct hns_roce_mtt *mtt);
+void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt);
+int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
+
+int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+
+void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
+
+int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
+int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 resetrved_top);
+void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
+void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
+int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
+ int align, unsigned long *obj);
+void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
+ unsigned long obj, int cnt);
+
+struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int hns_roce_destroy_ah(struct ib_ah *ah);
+
+struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int hns_roce_dealloc_pd(struct ib_pd *pd);
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+int hns_roce_dereg_mr(struct ib_mr *ibmr);
+
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
+ struct hns_roce_buf *buf);
+int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
+ struct hns_roce_buf *buf);
+
+int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct ib_umem *umem);
+
+struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
+void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+ struct ib_cq *ib_cq);
+enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq);
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
+void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
+ int cnt);
+__be32 send_ieth(struct ib_send_wr *wr);
+int to_hr_qp_type(int qp_type);
+
+struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+
+void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
+void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
+int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
+
+extern struct hns_roce_hw hns_roce_hw_v1;
+
+#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
new file mode 100644
index 000000000000..21e21b03cfb5
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -0,0 +1,758 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_eq.h"
+
+static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
+ (req_not << eq->log_entries), eq->doorbell);
+ /* Memory barrier */
+ mb();
+}
+
+static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw(eq))) {
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ /* Memory barrier */
+ rmb();
+
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to zero\n"
+ );
+ eq->cons_index = 0;
+ }
+ }
+
+ eq_set_cons_index(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->ceqe.comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw(eq))) {
+ /* Memory barrier */
+ rmb();
+ cqn = roce_get_field(ceqe->ceqe.comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to zero\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ eq_set_cons_index(eq, 0);
+
+ return ceqes_found;
+}
+
+static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &eq->hr_dev->pdev->dev;
+ int eqovf_found = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i = 0;
+
+ /**
+ * AEQ overflow ECC mult bit err CEQ overflow alarm
+ * must clear interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ eqovf_found++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return eqovf_found;
+}
+
+static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+{
+ int eqes_found = 0;
+
+ if (likely(eq->type_flag == HNS_ROCE_CEQ))
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ eqes_found = hns_roce_ceq_int(hr_dev, eq);
+ else if (likely(eq->type_flag == HNS_ROCE_AEQ))
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ eqes_found = hns_roce_aeq_int(hr_dev, eq);
+ else
+ /* AEQ queue overflow irq */
+ eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
+
+ return eqes_found;
+}
+
+static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
+{
+ int int_work = 0;
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+
+ int_work = hns_roce_eq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas = 0;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, (u8 *)eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, (u8 *)eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i = i - 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int i = 0;
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
+{
+ int i = 0;
+ u32 aemask_val;
+ int masken = 0;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
+{
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+}
+
+int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq = NULL;
+ int eq_num = 0;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ HNS_ROCE_CEQC_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ HNS_ROCE_CEQC_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth[i];
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = sizeof(struct hns_roce_ceqe);
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = sizeof(struct hns_roce_aeqe);
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_int_mask_en(hr_dev);
+
+ /* Configure CE irq interval and burst num */
+ hns_roce_ce_int_default_cfg(hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < eq_num; j++) {
+ ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
+ 0, hr_dev->irq_names[j], eq_table->eq + j);
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j = j - 1; j >= 0; j--)
+ free_irq(eq_table->eq[j].irq, eq_table->eq + j);
+
+err_create_eq_fail:
+ for (i = i - 1; i >= 0; i--)
+ hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ int i;
+ int eq_num;
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+
+ hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
new file mode 100644
index 000000000000..c6d212d12e03
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_EQ_H
+#define _HNS_ROCE_EQ_H
+
+#define HNS_ROCE_CEQ 1
+#define HNS_ROCE_AEQ 2
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
+#define HNS_ROCE_CEQC_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+#define CONS_INDEX_MASK 0xffff
+
+#define CEQ_REG_OFFSET 0x18
+
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 port;
+ u32 rsv0;
+ u32 rsv1;
+ } port_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
+ (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
+ (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
+ (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
+ (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
+ (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
+ (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
+
+struct hns_roce_ceqe {
+ union {
+ int comp;
+ } ceqe;
+};
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
+ (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
+
+#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
new file mode 100644
index 000000000000..250d8f280390
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_common.h"
+
+#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
+#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
+
+#define DMA_ADDR_T_SHIFT 12
+#define BT_BA_SHIFT 32
+
+struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
+ gfp_t gfp_mask)
+{
+ struct hns_roce_hem_chunk *chunk = NULL;
+ struct hns_roce_hem *hem;
+ struct scatterlist *mem;
+ int order;
+ void *buf;
+
+ WARN_ON(gfp_mask & __GFP_HIGHMEM);
+
+ hem = kmalloc(sizeof(*hem),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!hem)
+ return NULL;
+
+ hem->refcount = 0;
+ INIT_LIST_HEAD(&hem->chunk_list);
+
+ order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
+
+ while (npages > 0) {
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+
+ sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
+ chunk->npages = 0;
+ chunk->nsg = 0;
+ list_add_tail(&chunk->list, &hem->chunk_list);
+ }
+
+ while (1 << order > npages)
+ --order;
+
+ /*
+ * Alloc memory one time. If failed, don't alloc small block
+ * memory, directly return fail.
+ */
+ mem = &chunk->mem[chunk->npages];
+ buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+ &sg_dma_address(mem), gfp_mask);
+ if (!buf)
+ goto fail;
+
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+ WARN_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+
+ ++chunk->npages;
+ ++chunk->nsg;
+ npages -= 1 << order;
+ }
+
+ return hem;
+
+fail:
+ hns_roce_free_hem(hr_dev, hem);
+ return NULL;
+}
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
+{
+ struct hns_roce_hem_chunk *chunk, *tmp;
+ int i;
+
+ if (!hem)
+ return;
+
+ list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev,
+ chunk->mem[i].length,
+ lowmem_page_address(sg_page(&chunk->mem[i])),
+ sg_dma_address(&chunk->mem[i]));
+ kfree(chunk);
+ }
+
+ kfree(hem);
+}
+
+static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ spinlock_t *lock = &hr_dev->bt_cmd_lock;
+ unsigned long end = 0;
+ unsigned long flags;
+ struct hns_roce_hem_iter iter;
+ void __iomem *bt_cmd;
+ u32 bt_cmd_h_val = 0;
+ u32 bt_cmd_val[2];
+ u32 bt_cmd_l = 0;
+ u64 bt_ba = 0;
+ int ret = 0;
+
+ /* Find the HEM(Hardware Entry Memory) entry */
+ unsigned long i = (obj & (table->num_obj - 1)) /
+ (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_MTPT);
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ break;
+ case HEM_TYPE_SRQC:
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
+ HEM_TYPE_SRQC);
+ break;
+ default:
+ return ret;
+ }
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ /* Currently iter only a chunk */
+ for (hns_roce_hem_first(table->hem[i], &iter);
+ !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+ bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
+
+ spin_lock_irqsave(lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(lock, flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_l = (u32)bt_ba;
+ roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
+ bt_ba >> BT_BA_SHIFT);
+
+ bt_cmd_val[0] = bt_cmd_l;
+ bt_cmd_val[1] = bt_cmd_h_val;
+ hns_roce_write64_k(bt_cmd_val,
+ hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+ spin_unlock_irqrestore(lock, flags);
+ }
+
+ return ret;
+}
+
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = 0;
+ unsigned long i;
+
+ i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
+ table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[i]) {
+ ++table->hem[i]->refcount;
+ goto out;
+ }
+
+ table->hem[i] = hns_roce_alloc_hem(hr_dev,
+ HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ (table->lowmem ? GFP_KERNEL :
+ GFP_HIGHUSER) | __GFP_NOWARN);
+ if (!table->hem[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Set HEM base address(128K/page, pa) to Hardware */
+ if (hns_roce_set_hem(hr_dev, table, obj)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed.\n");
+ goto out;
+ }
+
+ ++table->hem[i]->refcount;
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long i;
+
+ i = (obj & (table->num_obj - 1)) /
+ (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (--table->hem[i]->refcount == 0) {
+ /* Clear HEM base address */
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
+void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+ dma_addr_t *dma_handle)
+{
+ struct hns_roce_hem_chunk *chunk;
+ unsigned long idx;
+ int i;
+ int offset, dma_offset;
+ struct hns_roce_hem *hem;
+ struct page *page = NULL;
+
+ if (!table->lowmem)
+ return NULL;
+
+ mutex_lock(&table->mutex);
+ idx = (obj & (table->num_obj - 1)) * table->obj_size;
+ hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
+ dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+
+ if (!hem)
+ goto out;
+
+ list_for_each_entry(chunk, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i) {
+ if (dma_handle && dma_offset >= 0) {
+ if (sg_dma_len(&chunk->mem[i]) >
+ (u32)dma_offset)
+ *dma_handle = sg_dma_address(
+ &chunk->mem[i]) + dma_offset;
+ dma_offset -= sg_dma_len(&chunk->mem[i]);
+ }
+
+ if (chunk->mem[i].length > (u32)offset) {
+ page = sg_page(&chunk->mem[i]);
+ goto out;
+ }
+ offset -= chunk->mem[i].length;
+ }
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return page ? lowmem_page_address(page) + offset : NULL;
+}
+
+int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end)
+{
+ unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
+ unsigned long i = 0;
+ int ret = 0;
+
+ /* Allocate MTT entry memory according to chunk(128K) */
+ for (i = start; i <= end; i += inc) {
+ ret = hns_roce_table_get(hr_dev, table, i);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (i > start) {
+ i -= inc;
+ hns_roce_table_put(hr_dev, table, i);
+ }
+ return ret;
+}
+
+void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end)
+{
+ unsigned long i;
+
+ for (i = start; i <= end;
+ i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
+ hns_roce_table_put(hr_dev, table, i);
+}
+
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj,
+ int use_lowmem)
+{
+ unsigned long obj_per_chunk;
+ unsigned long num_hem;
+
+ obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+
+ table->type = type;
+ table->num_hem = num_hem;
+ table->num_obj = nobj;
+ table->obj_size = obj_size;
+ table->lowmem = use_lowmem;
+ mutex_init(&table->mutex);
+
+ return 0;
+}
+
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ unsigned long i;
+
+ for (i = 0; i < table->num_hem; ++i)
+ if (table->hem[i]) {
+ if (hr_dev->hw->clear_hem(hr_dev, table,
+ i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
+ dev_err(dev, "Clear HEM base address failed.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ }
+
+ kfree(table->hem);
+}
+
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
new file mode 100644
index 000000000000..435748858252
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HEM_H
+#define _HNS_ROCE_HEM_H
+
+#define HW_SYNC_TIMEOUT_MSECS 500
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+#define BT_CMD_SYNC_SHIFT 31
+
+enum {
+ /* MAP HEM(Hardware Entry Memory) */
+ HEM_TYPE_QPC = 0,
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+ HEM_TYPE_IRRL,
+};
+
+#define HNS_ROCE_HEM_CHUNK_LEN \
+ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
+ (sizeof(struct scatterlist)))
+
+enum {
+ HNS_ROCE_HEM_PAGE_SHIFT = 12,
+ HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
+};
+
+struct hns_roce_hem_chunk {
+ struct list_head list;
+ int npages;
+ int nsg;
+ struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
+};
+
+struct hns_roce_hem {
+ struct list_head chunk_list;
+ int refcount;
+};
+
+struct hns_roce_hem_iter {
+ struct hns_roce_hem *hem;
+ struct hns_roce_hem_chunk *chunk;
+ int page_idx;
+};
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj);
+void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+ dma_addr_t *dma_handle);
+int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end);
+void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long start, unsigned long end);
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj,
+ int use_lowmem);
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table);
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+
+static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
+ struct hns_roce_hem_iter *iter)
+{
+ iter->hem = hem;
+ iter->chunk = list_empty(&hem->chunk_list) ? NULL :
+ list_entry(hem->chunk_list.next,
+ struct hns_roce_hem_chunk, list);
+ iter->page_idx = 0;
+}
+
+static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
+{
+ return !iter->chunk;
+}
+
+static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
+{
+ if (++iter->page_idx >= iter->chunk->nsg) {
+ if (iter->chunk->list.next == &iter->hem->chunk_list) {
+ iter->chunk = NULL;
+ return;
+ }
+
+ iter->chunk = list_entry(iter->chunk->list.next,
+ struct hns_roce_hem_chunk, list);
+ iter->page_idx = 0;
+ }
+}
+
+static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
+{
+ return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+#endif /*_HNS_ROCE_HEM_H*/
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
new file mode 100644
index 000000000000..71232e5fabf6
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -0,0 +1,2921 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v1.h"
+
+static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->lkey = cpu_to_le32(sg->lkey);
+ dseg->addr = cpu_to_le64(sg->addr);
+ dseg->len = cpu_to_le32(sg->length);
+}
+
+static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
+ u32 rkey)
+{
+ rseg->raddr = cpu_to_le64(remote_addr);
+ rseg->rkey = cpu_to_le32(rkey);
+ rseg->len = 0;
+}
+
+int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
+ struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
+ struct hns_roce_wqe_data_seg *dseg = NULL;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_sq_db sq_db;
+ int ps_opcode = 0, i = 0;
+ unsigned long flags = 0;
+ void *wqe = NULL;
+ u32 doorbell[2];
+ int nreq = 0;
+ u32 ind = 0;
+ int ret = 0;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "un-supported QP type\n");
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+ ind = qp->sq_next_wqe;
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+ qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
+ wr->wr_id;
+
+ /* Corresponding to the RC and RD type wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_0_M,
+ UD_SEND_WQE_U32_4_DMAC_0_S,
+ ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_1_M,
+ UD_SEND_WQE_U32_4_DMAC_1_S,
+ ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_2_M,
+ UD_SEND_WQE_U32_4_DMAC_2_S,
+ ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac_h,
+ UD_SEND_WQE_U32_4_DMAC_3_M,
+ UD_SEND_WQE_U32_4_DMAC_3_S,
+ ah->av.mac[3]);
+
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_DMAC_4_M,
+ UD_SEND_WQE_U32_8_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_DMAC_5_M,
+ UD_SEND_WQE_U32_8_DMAC_5_S,
+ ah->av.mac[5]);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
+ UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
+ HNS_ROCE_WQE_OPCODE_SEND);
+ roce_set_field(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
+ UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
+ 2);
+ roce_set_bit(ud_sq_wqe->u32_8,
+ UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
+ 1);
+
+ ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
+ cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
+ ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
+ cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
+
+ roce_set_field(ud_sq_wqe->u32_16,
+ UD_SEND_WQE_U32_16_DEST_QP_M,
+ UD_SEND_WQE_U32_16_DEST_QP_S,
+ ud_wr(wr)->remote_qpn);
+ roce_set_field(ud_sq_wqe->u32_16,
+ UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
+ UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
+ ah->av.stat_rate);
+
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_FLOW_LABEL_M,
+ UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_PRIORITY_M,
+ UD_SEND_WQE_U32_36_PRIORITY_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->u32_36,
+ UD_SEND_WQE_U32_36_SGID_INDEX_M,
+ UD_SEND_WQE_U32_36_SGID_INDEX_S,
+ hns_get_gid_index(hr_dev, qp->phy_port,
+ ah->av.gid_index));
+
+ roce_set_field(ud_sq_wqe->u32_40,
+ UD_SEND_WQE_U32_40_HOP_LIMIT_M,
+ UD_SEND_WQE_U32_40_HOP_LIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->u32_40,
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
+ UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
+
+ ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
+ ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
+ ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
+
+ ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
+ ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
+ ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ ctrl = wqe;
+ memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
+ for (i = 0; i < wr->num_sge; i++)
+ ctrl->msg_length += wr->sg_list[i].length;
+
+ ctrl->sgl_pa_h = 0;
+ ctrl->flag = 0;
+ ctrl->imm_data = send_ieth(wr);
+
+ /*Ctrl field, ctrl set type: sig, solic, imm, fence */
+ /* SO wait for conforming application scenarios */
+ ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
+ cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
+ ((wr->opcode == IB_WR_SEND_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
+ cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
+ (wr->send_flags & IB_SEND_FENCE ?
+ (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
+
+ wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
+ set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
+ atomic_wr(wr)->rkey);
+ break;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ case IB_WR_SEND_WITH_IMM:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
+ break;
+ case IB_WR_LOCAL_INV:
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_LSO:
+ default:
+ ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
+ break;
+ }
+ ctrl->flag |= cpu_to_le32(ps_opcode);
+ wqe += sizeof(struct hns_roce_wqe_raddr_seg);
+
+ dseg = wqe;
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (ctrl->msg_length >
+ hr_dev->caps.max_sq_inline) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_err(dev, "inline len(1-%d)=%d, illegal",
+ ctrl->msg_length,
+ hr_dev->caps.max_sq_inline);
+ goto out;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *) (uintptr_t)
+ wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ }
+ ctrl->flag |= HNS_ROCE_WQE_INLINE;
+ } else {
+ /*sqe num is two */
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg(dseg + i, wr->sg_list + i);
+
+ ctrl->flag |= cpu_to_le32(wr->num_sge <<
+ HNS_ROCE_WQE_SGE_NUM_BIT);
+ }
+ ind++;
+ }
+ }
+
+out:
+ /* Set DB return */
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ sq_db.u32_4 = 0;
+ sq_db.u32_8 = 0;
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
+ SQ_DOORBELL_U32_4_SQ_HEAD_S,
+ (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
+ roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
+ SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
+ roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
+ SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
+ roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
+
+ doorbell[0] = sq_db.u32_4;
+ doorbell[1] = sq_db.u32_8;
+
+ hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
+ qp->sq_next_wqe = ind;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return ret;
+}
+
+int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int ret = 0;
+ int nreq = 0;
+ int ind = 0;
+ int i = 0;
+ u32 reg_val = 0;
+ unsigned long flags = 0;
+ struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
+ struct hns_roce_wqe_data_seg *scat = NULL;
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_rq_db rq_db;
+ uint32_t doorbell[2] = {0};
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags);
+ ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ ctrl = get_recv_wqe(hr_qp, ind);
+
+ roce_set_field(ctrl->rwqe_byte_12,
+ RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
+ RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
+ wr->num_sge);
+
+ scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg(scat + i, wr->sg_list + i);
+
+ hr_qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ hr_qp->rq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ /* SW update GSI rq header */
+ reg_val = roce_read(to_hr_dev(ibqp->device),
+ ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->phy_port);
+ roce_set_field(reg_val,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
+ ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_write(to_hr_dev(ibqp->device),
+ ROCEE_QP1C_CFG3_0_REG +
+ QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
+ } else {
+ rq_db.u32_4 = 0;
+ rq_db.u32_8 = 0;
+
+ roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
+ 1);
+
+ doorbell[0] = rq_db.u32_4;
+ doorbell[1] = rq_db.u32_8;
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ }
+ }
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+ return ret;
+}
+
+static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
+ int sdb_mode, int odb_mode)
+{
+ u32 val;
+
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
+ roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+}
+
+static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
+ u32 odb_mode)
+{
+ u32 val;
+
+ /* Configure SDB/ODB extend mode */
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
+ roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+}
+
+static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
+ u32 sdb_alful)
+{
+ u32 val;
+
+ /* Configure SDB */
+ val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
+ roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
+ ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
+ roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
+ ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
+ roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
+}
+
+static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
+ u32 odb_alful)
+{
+ u32 val;
+
+ /* Configure ODB */
+ val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
+ roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
+ ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
+ roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
+ ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
+ roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
+}
+
+static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
+ u32 ext_sdb_alful)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t sdb_dma_addr;
+ u32 val;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ /* Configure extend SDB threshold */
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
+
+ /* Configure extend SDB base addr */
+ sdb_dma_addr = db->ext_db->sdb_buf_list->map;
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
+
+ /* Configure extend SDB depth */
+ val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
+ db->ext_db->esdb_dep);
+ /*
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
+ roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
+
+ dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
+ dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
+ ext_sdb_alept, ext_sdb_alful);
+}
+
+static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
+ u32 ext_odb_alful)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t odb_dma_addr;
+ u32 val;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ /* Configure extend ODB threshold */
+ roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
+ roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
+
+ /* Configure extend ODB base addr */
+ odb_dma_addr = db->ext_db->odb_buf_list->map;
+ roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
+
+ /* Configure extend ODB depth */
+ val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
+ roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
+ ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
+ db->ext_db->eodb_dep);
+ roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
+ ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
+ db->ext_db->eodb_dep);
+ roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
+
+ dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
+ dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
+ ext_odb_alept, ext_odb_alful);
+}
+
+static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
+ u32 odb_ext_mod)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ dma_addr_t sdb_dma_addr;
+ dma_addr_t odb_dma_addr;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
+ if (!db->ext_db)
+ return -ENOMEM;
+
+ if (sdb_ext_mod) {
+ db->ext_db->sdb_buf_list = kmalloc(
+ sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
+ if (!db->ext_db->sdb_buf_list) {
+ ret = -ENOMEM;
+ goto ext_sdb_buf_fail_out;
+ }
+
+ db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
+ HNS_ROCE_V1_EXT_SDB_SIZE,
+ &sdb_dma_addr, GFP_KERNEL);
+ if (!db->ext_db->sdb_buf_list->buf) {
+ ret = -ENOMEM;
+ goto alloc_sq_db_buf_fail;
+ }
+ db->ext_db->sdb_buf_list->map = sdb_dma_addr;
+
+ db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
+ hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
+ HNS_ROCE_V1_EXT_SDB_ALFUL);
+ } else
+ hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
+ HNS_ROCE_V1_SDB_ALFUL);
+
+ if (odb_ext_mod) {
+ db->ext_db->odb_buf_list = kmalloc(
+ sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
+ if (!db->ext_db->odb_buf_list) {
+ ret = -ENOMEM;
+ goto ext_odb_buf_fail_out;
+ }
+
+ db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
+ HNS_ROCE_V1_EXT_ODB_SIZE,
+ &odb_dma_addr, GFP_KERNEL);
+ if (!db->ext_db->odb_buf_list->buf) {
+ ret = -ENOMEM;
+ goto alloc_otr_db_buf_fail;
+ }
+ db->ext_db->odb_buf_list->map = odb_dma_addr;
+
+ db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
+ hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
+ HNS_ROCE_V1_EXT_ODB_ALFUL);
+ } else
+ hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
+ HNS_ROCE_V1_ODB_ALFUL);
+
+ hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
+
+ return 0;
+
+alloc_otr_db_buf_fail:
+ kfree(db->ext_db->odb_buf_list);
+
+ext_odb_buf_fail_out:
+ if (sdb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
+ db->ext_db->sdb_buf_list->buf,
+ db->ext_db->sdb_buf_list->map);
+ }
+
+alloc_sq_db_buf_fail:
+ if (sdb_ext_mod)
+ kfree(db->ext_db->sdb_buf_list);
+
+ext_sdb_buf_fail_out:
+ kfree(db->ext_db);
+ return ret;
+}
+
+static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+ u32 sdb_ext_mod;
+ u32 odb_ext_mod;
+ u32 sdb_evt_mod;
+ u32 odb_evt_mod;
+ int ret = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ memset(db, 0, sizeof(*db));
+
+ /* Default DB mode */
+ sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
+ odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
+ sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
+ odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
+
+ db->sdb_ext_mod = sdb_ext_mod;
+ db->odb_ext_mod = odb_ext_mod;
+
+ /* Init extend DB */
+ ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
+ if (ret) {
+ dev_err(dev, "Failed in extend DB configuration.\n");
+ return ret;
+ }
+
+ hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
+
+ return 0;
+}
+
+static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_db_table *db;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ db = &priv->db_table;
+
+ if (db->sdb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
+ db->ext_db->sdb_buf_list->buf,
+ db->ext_db->sdb_buf_list->map);
+ kfree(db->ext_db->sdb_buf_list);
+ }
+
+ if (db->odb_ext_mod) {
+ dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
+ db->ext_db->odb_buf_list->buf,
+ db->ext_db->odb_buf_list->map);
+ kfree(db->ext_db->odb_buf_list);
+ }
+
+ kfree(db->ext_db);
+}
+
+static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ int raq_shift = 0;
+ dma_addr_t addr;
+ u32 val;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_raq_table *raq;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ raq = &priv->raq_table;
+
+ raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
+ if (!raq->e_raq_buf)
+ return -ENOMEM;
+
+ raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
+ &addr, GFP_KERNEL);
+ if (!raq->e_raq_buf->buf) {
+ ret = -ENOMEM;
+ goto err_dma_alloc_raq;
+ }
+ raq->e_raq_buf->map = addr;
+
+ /* Configure raq extended address. 48bit 4K align*/
+ roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
+
+ /* Configure raq_shift */
+ raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
+ val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
+ roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
+ ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
+ /*
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
+ ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
+ raq->e_raq_buf->map >> 44);
+ roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
+ dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
+
+ /* Configure raq threshold */
+ val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
+ roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
+ ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
+ HNS_ROCE_V1_EXT_RAQ_WF);
+ roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
+ dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
+
+ /* Enable extend raq */
+ val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
+ roce_set_field(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
+ POL_TIME_INTERVAL_VAL);
+ roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
+ roce_set_field(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
+ 2);
+ roce_set_bit(val,
+ ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
+ roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
+ dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
+
+ /* Enable raq drop */
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
+
+ return 0;
+
+err_dma_alloc_raq:
+ kfree(raq->e_raq_buf);
+ return ret;
+}
+
+static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ struct hns_roce_raq_table *raq;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ raq = &priv->raq_table;
+
+ dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
+ raq->e_raq_buf->map);
+ kfree(raq->e_raq_buf);
+}
+
+static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
+{
+ u32 val;
+
+ if (enable_flag) {
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ /* Open all ports */
+ roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
+ ALL_PORT_VAL_OPEN);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ } else {
+ val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
+ /* Close all ports */
+ roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
+ ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
+ roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
+ }
+}
+
+static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ int ret;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.qpc_buf.buf)
+ return -ENOMEM;
+
+ priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.mtpt_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_mtpt_buf;
+ }
+
+ priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.cqc_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_cqc_buf;
+ }
+
+ return 0;
+
+err_failed_alloc_cqc_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+err_failed_alloc_mtpt_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+
+ return ret;
+}
+
+static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+}
+
+/**
+ * hns_roce_v1_reset - reset RoCE
+ * @hr_dev: RoCE device struct pointer
+ * @enable: true -- drop reset, false -- reset
+ * return 0 - success , negative --fail
+ */
+int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
+{
+ struct device_node *dsaf_node;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ /* check if this is DT/ACPI case */
+ if (dev_of_node(dev)) {
+ dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
+ if (!dsaf_node) {
+ dev_err(dev, "could not find dsaf-handle\n");
+ return -EINVAL;
+ }
+ fwnode = &dsaf_node->fwnode;
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "dsaf-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "could not find dsaf-handle\n");
+ return ret;
+ }
+ fwnode = acpi_fwnode_handle(args.adev);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ ret = hns_dsaf_roce_reset(fwnode, false);
+ if (ret)
+ return ret;
+
+ if (dereset) {
+ msleep(SLEEP_TIME_INTERVAL);
+ ret = hns_dsaf_roce_reset(fwnode, true);
+ }
+
+ return ret;
+}
+
+void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
+{
+ int i = 0;
+ struct hns_roce_caps *caps = &hr_dev->caps;
+
+ hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
+ hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_VENDOR_PART_ID_REG));
+ hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
+
+ hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_L_REG)) |
+ ((u64)le32_to_cpu(roce_read(hr_dev,
+ ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+
+ caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
+ caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
+ caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
+ caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
+ caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
+ caps->num_uars = HNS_ROCE_V1_UAR_NUM;
+ caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
+ caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
+ caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
+ caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
+ caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
+ caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
+ caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
+ caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
+ caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
+ caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
+ caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
+ caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
+ caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
+ caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
+ caps->reserved_lkey = 0;
+ caps->reserved_pds = 0;
+ caps->reserved_mrws = 1;
+ caps->reserved_uars = 0;
+ caps->reserved_cqs = 0;
+
+ for (i = 0; i < caps->num_ports; i++)
+ caps->pkey_table_len[i] = 1;
+
+ for (i = 0; i < caps->num_ports; i++) {
+ /* Six ports shared 16 GID in v1 engine */
+ if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
+ caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
+ caps->num_ports;
+ else
+ caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
+ caps->num_ports + 1;
+ }
+
+ for (i = 0; i < caps->num_comp_vectors; i++)
+ caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
+
+ caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
+ ROCEE_ACK_DELAY_REG));
+ caps->max_mtu = IB_MTU_2048;
+}
+
+int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ u32 val;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* DMAE user config */
+ val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
+ ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
+ ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
+ 1 << PAGES_SHIFT_16);
+ roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
+
+ val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
+ ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
+ roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
+ ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
+ 1 << PAGES_SHIFT_16);
+
+ ret = hns_roce_db_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "doorbell init failed!\n");
+ return ret;
+ }
+
+ ret = hns_roce_raq_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "raq init failed!\n");
+ goto error_failed_raq_init;
+ }
+
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+
+ ret = hns_roce_bt_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "bt init failed!\n");
+ goto error_failed_bt_init;
+ }
+
+ return 0;
+
+error_failed_bt_init:
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+
+error_failed_raq_init:
+ hns_roce_db_free(hr_dev);
+ return ret;
+}
+
+void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bt_free(hr_dev);
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+ hns_roce_db_free(hr_dev);
+}
+
+void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid)
+{
+ u32 *p = NULL;
+ u8 gid_idx = 0;
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[4];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[8];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ p = (u32 *)&gid->raw[0xc];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+}
+
+void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
+{
+ u32 reg_smac_l;
+ u16 reg_smac_h;
+ u16 *p_h;
+ u32 *p;
+ u32 val;
+
+ p = (u32 *)(&addr[0]);
+ reg_smac_l = *p;
+ roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
+ PHY_PORT_OFFSET * phy_port);
+
+ val = roce_read(hr_dev,
+ ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ p_h = (u16 *)(&addr[4]);
+ reg_smac_h = *p_h;
+ roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
+ ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
+ roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
+ val);
+}
+
+void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu)
+{
+ u32 val;
+
+ val = roce_read(hr_dev,
+ ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
+ roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
+ ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
+ roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
+ val);
+}
+
+int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
+{
+ struct hns_roce_v1_mpt_entry *mpt_entry;
+ struct scatterlist *sg;
+ u64 *pages;
+ int entry;
+ int i;
+
+ /* MPT filled into mailbox buf */
+ mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
+ MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
+ MPT_BYTE_4_KEY_S, mr->key);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
+ MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
+ roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
+ MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
+ (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
+ (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
+ (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
+ 0);
+ roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
+
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
+ MPT_BYTE_12_PBL_ADDR_H_S, 0);
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
+ MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
+
+ mpt_entry->virt_addr_l = (u32)mr->iova;
+ mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
+ mpt_entry->length = (u32)mr->size;
+
+ roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
+ MPT_BYTE_28_PD_S, mr->pd);
+ roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
+ MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
+ roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
+ MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
+
+ /* DMA momery regsiter */
+ if (mr->type == MR_TYPE_DMA)
+ return 0;
+
+ pages = (u64 *) __get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ pages[i] = ((u64)sg_dma_address(sg)) >> 12;
+
+ /* Directly record to MTPT table firstly 7 entry */
+ if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
+ break;
+ i++;
+ }
+
+ /* Register user mr */
+ for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
+ switch (i) {
+ case 0:
+ mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
+ roce_set_field(mpt_entry->mpt_byte_36,
+ MPT_BYTE_36_PA0_H_M,
+ MPT_BYTE_36_PA0_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ break;
+ case 1:
+ roce_set_field(mpt_entry->mpt_byte_36,
+ MPT_BYTE_36_PA1_L_M,
+ MPT_BYTE_36_PA1_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_40,
+ MPT_BYTE_40_PA1_H_M,
+ MPT_BYTE_40_PA1_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ break;
+ case 2:
+ roce_set_field(mpt_entry->mpt_byte_40,
+ MPT_BYTE_40_PA2_L_M,
+ MPT_BYTE_40_PA2_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_44,
+ MPT_BYTE_44_PA2_H_M,
+ MPT_BYTE_44_PA2_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ break;
+ case 3:
+ roce_set_field(mpt_entry->mpt_byte_44,
+ MPT_BYTE_44_PA3_L_M,
+ MPT_BYTE_44_PA3_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_48,
+ MPT_BYTE_48_PA3_H_M,
+ MPT_BYTE_48_PA3_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
+ break;
+ case 4:
+ mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
+ roce_set_field(mpt_entry->mpt_byte_56,
+ MPT_BYTE_56_PA4_H_M,
+ MPT_BYTE_56_PA4_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
+ break;
+ case 5:
+ roce_set_field(mpt_entry->mpt_byte_56,
+ MPT_BYTE_56_PA5_L_M,
+ MPT_BYTE_56_PA5_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_60,
+ MPT_BYTE_60_PA5_H_M,
+ MPT_BYTE_60_PA5_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
+ break;
+ case 6:
+ roce_set_field(mpt_entry->mpt_byte_60,
+ MPT_BYTE_60_PA6_L_M,
+ MPT_BYTE_60_PA6_L_S,
+ cpu_to_le32((u32)(pages[i])));
+ roce_set_field(mpt_entry->mpt_byte_64,
+ MPT_BYTE_64_PA6_H_M,
+ MPT_BYTE_64_PA6_H_S,
+ cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
+ break;
+ default:
+ break;
+ }
+ }
+
+ free_page((unsigned long) pages);
+
+ mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
+
+ roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
+ MPT_BYTE_12_PBL_ADDR_H_S,
+ ((u32)(mr->pbl_dma_addr >> 32)));
+
+ return 0;
+}
+
+static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
+{
+ return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
+ n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+}
+
+static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
+{
+ struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
+
+ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+ return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
+ !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+}
+
+static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
+{
+ return get_sw_cqe(hr_cq, hr_cq->cons_index);
+}
+
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+{
+ u32 doorbell[2];
+
+ doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
+ roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+}
+
+static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ struct hns_roce_cqe *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
+ ++prod_index) {
+ if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ break;
+ }
+
+ /*
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
+ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+ cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+ if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) &
+ HNS_ROCE_CQE_QPN_MASK) == qpn) {
+ /* In v1 engine, not support SRQ */
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe(hr_cq, (prod_index + nfreed) &
+ hr_cq->ib_cq.cqe);
+ owner_bit = roce_get_bit(dest->cqe_byte_4,
+ CQE_BYTE_4_OWNER_S);
+ memcpy(dest, cqe, sizeof(*cqe));
+ roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
+ owner_bit);
+ }
+ }
+
+ if (nfreed) {
+ hr_cq->cons_index += nfreed;
+ /*
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
+}
+
+static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ spin_lock_irq(&hr_cq->lock);
+ __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
+ spin_unlock_irq(&hr_cq->lock);
+}
+
+void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+ dma_addr_t dma_handle, int nent, u32 vector)
+{
+ struct hns_roce_cq_context *cq_context = NULL;
+ void __iomem *tptr_addr;
+
+ cq_context = mb_buf;
+ memset(cq_context, 0, sizeof(*cq_context));
+
+ tptr_addr = 0;
+ hr_dev->priv_addr = tptr_addr;
+ hr_cq->tptr_addr = tptr_addr;
+
+ /* Register cq_context members */
+ roce_set_field(cq_context->cqc_byte_4,
+ CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
+ CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
+ roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
+ CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
+ cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
+
+ cq_context->cq_bt_l = (u32)dma_handle;
+ cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
+
+ roce_set_field(cq_context->cqc_byte_12,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
+ ((u64)dma_handle >> 32));
+ roce_set_field(cq_context->cqc_byte_12,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
+ CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
+ ilog2((unsigned int)nent));
+ roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
+
+ cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
+ cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
+
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
+ CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
+ cpu_to_le32((mtts[0]) >> 32));
+ /* Dedicated hardware, directly set 0 */
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
+ CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
+ /**
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(cq_context->cqc_byte_20,
+ CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
+ CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
+ (u64)tptr_addr >> 44);
+ cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
+
+ cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
+
+ roce_set_field(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
+ CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
+ roce_set_bit(cq_context->cqc_byte_32,
+ CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
+ 0);
+ /*The initial value of cq's ci is 0 */
+ roce_set_field(cq_context->cqc_byte_32,
+ CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
+ CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
+ cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
+}
+
+int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+ int ret = 0;
+
+ notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
+ IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
+ /*
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
+ doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
+ roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
+ roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
+ ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
+ hr_cq->cqn | notification_flag);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+
+ return ret;
+}
+
+static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
+ struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+ int qpn;
+ int is_send;
+ u16 wqe_ctr;
+ u32 status;
+ u32 opcode;
+ struct hns_roce_cqe *cqe;
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_wq *wq;
+ struct hns_roce_wqe_ctrl_seg *sq_wqe;
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Find cqe according consumer index */
+ cqe = next_cqe_sw(hr_cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++hr_cq->cons_index;
+ /* Memory barrier */
+ rmb();
+ /* 0->SQ, 1->RQ */
+ is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
+
+ /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
+ if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
+ qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
+ CQE_BYTE_20_PORT_NUM_S) +
+ roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S) *
+ HNS_ROCE_MAX_PORTS;
+ } else {
+ qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
+ CQE_BYTE_16_LOCAL_QPN_S);
+ }
+
+ if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
+ return -EINVAL;
+ }
+
+ *cur_qp = hr_qp;
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->vendor_err = 0;
+
+ status = roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
+ CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
+ HNS_ROCE_CQE_STATUS_MASK;
+ switch (status) {
+ case HNS_ROCE_CQE_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* CQE status error, directly return */
+ if (wc->status != IB_WC_SUCCESS)
+ return 0;
+
+ if (is_send) {
+ /* SQ conrespond to CQE */
+ sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_WQE_INDEX_M,
+ CQE_BYTE_4_WQE_INDEX_S)&
+ ((*cur_qp)->sq.wqe_cnt-1));
+ switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
+ case HNS_ROCE_WQE_OPCODE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_WQE_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case HNS_ROCE_WQE_OPCODE_UD_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+ wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
+ IB_WC_WITH_IMM : 0);
+
+ wq = &(*cur_qp)->sq;
+ if ((*cur_qp)->sq_signal_bits) {
+ /*
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
+ wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_WQE_INDEX_M,
+ CQE_BYTE_4_WQE_INDEX_S);
+ wq->tail += (wqe_ctr - (u16)wq->tail) &
+ (wq->wqe_cnt - 1);
+ }
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ } else {
+ /* RQ conrespond to CQE */
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ opcode = roce_get_field(cqe->cqe_byte_4,
+ CQE_BYTE_4_OPERATION_TYPE_M,
+ CQE_BYTE_4_OPERATION_TYPE_S) &
+ HNS_ROCE_CQE_OPCODE_MASK;
+ switch (opcode) {
+ case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ break;
+ case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
+ if (roce_get_bit(cqe->cqe_byte_4,
+ CQE_BYTE_4_IMM_INDICATOR_S)) {
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(
+ cqe->immediate_data);
+ } else {
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ }
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* Update tail pointer, record wr_id */
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
+ CQE_BYTE_20_SL_S);
+ wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
+ CQE_BYTE_20_REMOTE_QPN_M,
+ CQE_BYTE_20_REMOTE_QPN_S);
+ wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
+ CQE_BYTE_20_GRH_PRESENT_S) ?
+ IB_WC_GRH : 0);
+ wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
+ CQE_BYTE_28_P_KEY_IDX_M,
+ CQE_BYTE_28_P_KEY_IDX_S);
+ }
+
+ return 0;
+}
+
+int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+ int ret = 0;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
+ if (ret)
+ break;
+ }
+
+ if (npolled)
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
+
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+ if (ret == 0 || ret == -EAGAIN)
+ return npolled;
+ else
+ return ret;
+}
+
+int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ unsigned long end = 0, flags = 0;
+ uint32_t bt_cmd_val[2] = {0};
+ void __iomem *bt_cmd;
+ u64 bt_ba = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ bt_ba = priv->bt_table.qpc_buf.map >> 12;
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
+ bt_ba = priv->bt_table.mtpt_buf.map >> 12;
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ bt_ba = priv->bt_table.cqc_buf.map >> 12;
+ break;
+ case HEM_TYPE_SRQC:
+ dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
+ return -EINVAL;
+ default:
+ return 0;
+ }
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_val[0] = (uint32_t)bt_ba;
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
+ hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+ return 0;
+}
+
+static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt,
+ enum hns_roce_qp_state cur_state,
+ enum hns_roce_qp_state new_state,
+ struct hns_roce_qp_context *context,
+ struct hns_roce_qp *hr_qp)
+{
+ static const u16
+ op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
+ [HNS_ROCE_QP_STATE_RST] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
+ },
+ [HNS_ROCE_QP_STATE_INIT] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ /* Note: In v1 engine, HW doesn't support RST2INIT.
+ * We use RST2INIT cmd instead of INIT2INIT.
+ */
+ [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
+ [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
+ },
+ [HNS_ROCE_QP_STATE_RTR] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
+ },
+ [HNS_ROCE_QP_STATE_RTS] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
+ [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
+ },
+ [HNS_ROCE_QP_STATE_SQD] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
+ [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
+ },
+ [HNS_ROCE_QP_STATE_ERR] = {
+ [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
+ [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
+ }
+ };
+
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = 0;
+
+ if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
+ new_state >= HNS_ROCE_QP_NUM_STATE ||
+ !op[cur_state][new_state]) {
+ dev_err(dev, "[modify_qp]not support state %d to %d\n",
+ cur_state, new_state);
+ return -EINVAL;
+ }
+
+ if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
+ return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
+ HNS_ROCE_CMD_2RST_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+
+ if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
+ return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
+ HNS_ROCE_CMD_2ERR_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, context, sizeof(*context));
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
+ op[cur_state][new_state],
+ HNS_ROCE_CMD_TIME_CLASS_C);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_sqp_context *context;
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t dma_handle = 0;
+ int rq_pa_start;
+ u32 reg_val;
+ u64 *mtts;
+ u32 *addr;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ /* Search QP buf's MTTs */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ goto out;
+ }
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qp1c_bytes_4,
+ QP1C_BYTES_4_SQ_WQE_SHIFT_M,
+ QP1C_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qp1c_bytes_4,
+ QP1C_BYTES_4_RQ_WQE_SHIFT_M,
+ QP1C_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
+ QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
+
+ context->sq_rq_bt_l = (u32)(dma_handle);
+ roce_set_field(context->qp1c_bytes_12,
+ QP1C_BYTES_12_SQ_RQ_BT_H_M,
+ QP1C_BYTES_12_SQ_RQ_BT_H_S,
+ ((u32)(dma_handle >> 32)));
+
+ roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
+ QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
+ QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
+ roce_set_bit(context->qp1c_bytes_16,
+ QP1C_BYTES_16_SIGNALING_TYPE_S,
+ hr_qp->sq_signal_bits);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
+ 1);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
+ 1);
+ roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
+ 0);
+
+ roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
+ QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
+ roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
+ QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
+
+ rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
+ context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+
+ roce_set_field(context->qp1c_bytes_28,
+ QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
+ QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
+ (mtts[rq_pa_start]) >> 32);
+ roce_set_field(context->qp1c_bytes_28,
+ QP1C_BYTES_28_RQ_CUR_IDX_M,
+ QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
+
+ roce_set_field(context->qp1c_bytes_32,
+ QP1C_BYTES_32_RX_CQ_NUM_M,
+ QP1C_BYTES_32_RX_CQ_NUM_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(context->qp1c_bytes_32,
+ QP1C_BYTES_32_TX_CQ_NUM_M,
+ QP1C_BYTES_32_TX_CQ_NUM_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+
+ context->cur_sq_wqe_ba_l = (u32)mtts[0];
+
+ roce_set_field(context->qp1c_bytes_40,
+ QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
+ QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+ roce_set_field(context->qp1c_bytes_40,
+ QP1C_BYTES_40_SQ_CUR_IDX_M,
+ QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
+
+ /* Copy context to QP1C register */
+ addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->phy_port * sizeof(*context));
+
+ writel(context->qp1c_bytes_4, addr);
+ writel(context->sq_rq_bt_l, addr + 1);
+ writel(context->qp1c_bytes_12, addr + 2);
+ writel(context->qp1c_bytes_16, addr + 3);
+ writel(context->qp1c_bytes_20, addr + 4);
+ writel(context->cur_rq_wqe_ba_l, addr + 5);
+ writel(context->qp1c_bytes_28, addr + 6);
+ writel(context->qp1c_bytes_32, addr + 7);
+ writel(context->cur_sq_wqe_ba_l, addr + 8);
+ writel(context->qp1c_bytes_40, addr + 9);
+ }
+
+ /* Modify QP1C status */
+ reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->phy_port * sizeof(*context));
+ roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
+ ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
+ roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->phy_port * sizeof(*context), reg_val);
+
+ hr_qp->state = new_state;
+ if (new_state == IB_QPS_RESET) {
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ }
+
+ kfree(context);
+ return 0;
+
+out:
+ kfree(context);
+ return -EINVAL;
+}
+
+static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_context *context;
+ dma_addr_t dma_handle_2 = 0;
+ dma_addr_t dma_handle = 0;
+ uint32_t doorbell[2] = {0};
+ int rq_pa_start = 0;
+ u64 *mtts_2 = NULL;
+ int ret = -EINVAL;
+ u64 *mtts = NULL;
+ int port;
+ u8 *dmac;
+ u8 *smac;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (mtts == NULL) {
+ dev_err(dev, "qp buf pa find failed\n");
+ goto out;
+ }
+
+ /* Search IRRL's mtts */
+ mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
+ &dma_handle_2);
+ if (mtts_2 == NULL) {
+ dev_err(dev, "qp irrl_table find failed\n");
+ goto out;
+ }
+
+ /*
+ *Reset to init
+ * Mandatory param:
+ * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
+ * Optional param: NA
+ */
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
+ to_hr_qp_type(hr_qp->ibqp.qp_type));
+
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ );
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ );
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_PD_M,
+ QP_CONTEXT_QPC_BYTES_4_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+ hr_qp->access_flags = attr->qp_access_flags;
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+
+ if (ibqp->srq)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
+ to_hr_srq(ibqp->srq)->srqn);
+
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ attr->pkey_index);
+ hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->qpc_bytes_16,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
+ to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ } else {
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
+ !!(hr_qp->access_flags &
+ IB_ACCESS_REMOTE_READ));
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
+ !!(hr_qp->access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ }
+
+ roce_set_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
+ QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTES_4_PD_M,
+ QP_CONTEXT_QPC_BYTES_4_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
+ to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(context->qpc_bytes_8,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
+ QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
+ to_hr_cq(ibqp->recv_cq)->cqn);
+
+ if (ibqp->srq)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
+ QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ attr->pkey_index);
+ else
+ roce_set_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
+ hr_qp->pkey_index);
+
+ roce_set_field(context->qpc_bytes_16,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
+ QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ if ((attr_mask & IB_QP_ALT_PATH) ||
+ (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_PKEY_INDEX) ||
+ (attr_mask & IB_QP_QKEY)) {
+ dev_err(dev, "INIT2RTR attr_mask error\n");
+ goto out;
+ }
+
+ dmac = (u8 *)attr->ah_attr.dmac;
+
+ context->sq_rq_bt_l = (u32)(dma_handle);
+ roce_set_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
+ QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
+ ((u32)(dma_handle >> 32)));
+ roce_set_bit(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
+ 1);
+ roce_set_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
+ attr->min_rnr_timer);
+ context->irrl_ba_l = (u32)(dma_handle_2);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
+ ((u32)(dma_handle_2 >> 32)) &
+ QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
+ QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
+ 1);
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
+ hr_qp->sq_signal_bits);
+
+ for (port = 0; port < hr_dev->caps.num_ports; port++) {
+ smac = (u8 *)hr_dev->dev_addr[port];
+ dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
+ smac[0], smac[1], smac[2], smac[3], smac[4],
+ smac[5]);
+ if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
+ (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
+ (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
+ 1);
+ break;
+ }
+ }
+
+ if (hr_dev->loop_idc == 0x1)
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
+
+ roce_set_bit(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
+ attr->ah_attr.ah_flags);
+ roce_set_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
+ ilog2((unsigned int)attr->max_dest_rd_atomic));
+
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
+ attr->dest_qp_num);
+
+ /* Configure GID index */
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
+ hns_get_gid_index(hr_dev,
+ attr->ah_attr.port_num - 1,
+ attr->ah_attr.grh.sgid_index));
+
+ memcpy(&(context->dmac_l), dmac, 4);
+
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
+ *((u16 *)(&dmac[4])));
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
+ QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
+ attr->ah_attr.static_rate);
+ roce_set_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
+ attr->ah_attr.grh.hop_limit);
+
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
+ attr->ah_attr.grh.flow_label);
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
+ attr->ah_attr.grh.traffic_class);
+ roce_set_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_MTU_M,
+ QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
+
+ memcpy(context->dgid, attr->ah_attr.grh.dgid.raw,
+ sizeof(attr->ah_attr.grh.dgid.raw));
+
+ dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
+ roce_get_field(context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
+ QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
+
+ roce_set_field(context->qpc_bytes_68,
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
+ hr_qp->rq.head);
+ roce_set_field(context->qpc_bytes_68,
+ QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
+
+ rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
+ context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
+
+ roce_set_field(context->qpc_bytes_76,
+ QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
+ mtts[rq_pa_start] >> 32);
+ roce_set_field(context->qpc_bytes_76,
+ QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
+ QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
+
+ context->rx_rnr_time = 0;
+
+ roce_set_field(context->qpc_bytes_84,
+ QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
+ QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
+ attr->rq_psn - 1);
+ roce_set_field(context->qpc_bytes_84,
+ QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
+
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
+ attr->rq_psn);
+ roce_set_bit(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
+ roce_set_bit(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
+ 0);
+ roce_set_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
+ 0);
+
+ context->dma_length = 0;
+ context->r_key = 0;
+ context->va_l = 0;
+ context->va_h = 0;
+
+ roce_set_field(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
+ roce_set_bit(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
+ roce_set_bit(context->qpc_bytes_108,
+ QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_112,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
+ roce_set_field(context->qpc_bytes_112,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
+ QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
+
+ /* For chip resp ack */
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
+ hr_qp->phy_port);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
+ hr_qp->sl = attr->ah_attr.sl;
+ } else if (cur_state == IB_QPS_RTR &&
+ new_state == IB_QPS_RTS) {
+ /* If exist optional param, return error */
+ if ((attr_mask & IB_QP_ALT_PATH) ||
+ (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_QKEY) ||
+ (attr_mask & IB_QP_PATH_MIG_STATE) ||
+ (attr_mask & IB_QP_CUR_STATE) ||
+ (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+ dev_err(dev, "RTR2RTS attr_mask error\n");
+ goto out;
+ }
+
+ context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+
+ roce_set_field(context->qpc_bytes_120,
+ QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+
+ roce_set_field(context->qpc_bytes_124,
+ QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
+ QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
+ roce_set_field(context->qpc_bytes_124,
+ QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
+ QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
+
+ roce_set_field(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
+ attr->sq_psn);
+ roce_set_bit(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
+ 0);
+ roce_set_bit(context->qpc_bytes_128,
+ QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_132,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
+ roce_set_field(context->qpc_bytes_132,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
+ QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
+
+ roce_set_field(context->qpc_bytes_136,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
+ attr->sq_psn);
+ roce_set_field(context->qpc_bytes_136,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
+ QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
+ attr->sq_psn);
+
+ roce_set_field(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
+ (attr->sq_psn >> SQ_PSN_SHIFT));
+ roce_set_field(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
+ QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
+ roce_set_bit(context->qpc_bytes_140,
+ QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
+
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
+ QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
+ attr->retry_cnt);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
+ attr->rnr_retry);
+ roce_set_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_LSN_M,
+ QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
+
+ context->rnr_retry = 0;
+
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
+ QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
+ attr->retry_cnt);
+ if (attr->timeout < 0x12) {
+ dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
+ attr->timeout);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ 0x12);
+ } else {
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ attr->timeout);
+ }
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
+ QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
+ attr->rnr_retry);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
+ hr_qp->phy_port);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
+ hr_qp->sl = attr->ah_attr.sl;
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
+ ilog2((unsigned int)attr->max_rd_atomic));
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
+ context->pkt_use_len = 0;
+
+ roce_set_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
+ roce_set_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
+
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
+ attr->sq_psn);
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
+ QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
+ roce_set_field(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
+ QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
+ roce_set_bit(context->qpc_bytes_168,
+ QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
+ context->sge_use_len = 0;
+
+ roce_set_field(context->qpc_bytes_176,
+ QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
+ roce_set_field(context->qpc_bytes_176,
+ QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
+ 0);
+ roce_set_field(context->qpc_bytes_180,
+ QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
+ roce_set_field(context->qpc_bytes_180,
+ QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
+ QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
+
+ context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
+
+ roce_set_field(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
+ QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
+ (mtts[0]) >> 32);
+ roce_set_bit(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
+ roce_set_field(context->qpc_bytes_188,
+ QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
+ 0);
+ } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
+ dev_err(dev, "not support this status migration\n");
+ goto out;
+ }
+
+ /* Every status migrate must change state */
+ roce_set_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
+
+ /* SW pass context to HW */
+ ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
+ to_hns_roce_state(cur_state),
+ to_hns_roce_state(new_state), context,
+ hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_modify failed\n");
+ goto out;
+ }
+
+ /*
+ * Use rst2init to instead of init2init with drv,
+ * need to hw to flash RQ HEAD by DB again
+ */
+ if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ /* Memory barrier */
+ wmb();
+
+ roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
+
+ if (ibqp->uobject) {
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+ }
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ }
+
+ hr_qp->state = new_state;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ }
+out:
+ kfree(context);
+ return ret;
+}
+
+int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
+ return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+ else
+ return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+}
+
+static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
+{
+ switch (state) {
+ case HNS_ROCE_QP_STATE_RST:
+ return IB_QPS_RESET;
+ case HNS_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case HNS_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case HNS_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case HNS_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case HNS_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_qp_context *hr_context)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_QUERY_QP,
+ HNS_ROCE_CMD_TIME_CLASS_A);
+ if (!ret)
+ memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+ else
+ dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp_context *context;
+ int tmp_qp_state = 0;
+ int ret = 0;
+ int state;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
+ ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
+ if (ret) {
+ dev_err(dev, "query qpc error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state = roce_get_field(context->qpc_bytes_144,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
+ QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
+ tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
+ if (tmp_qp_state == -1) {
+ dev_err(dev, "to_ib_qp_state error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ hr_qp->state = (u8)tmp_qp_state;
+ qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+ qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_MTU_M,
+ QP_CONTEXT_QPC_BYTES_48_MTU_S);
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+ qp_attr->qkey = QKEY_VAL;
+
+ qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
+ QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
+ qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
+ QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
+ qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
+ qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
+ ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
+ ((roce_get_bit(context->qpc_bytes_4,
+ QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ qp_attr->ah_attr.sl = roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_SL_M,
+ QP_CONTEXT_QPC_BYTES_156_SL_S);
+ qp_attr->ah_attr.grh.flow_label = roce_get_field(
+ context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
+ QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
+ qp_attr->ah_attr.grh.sgid_index = roce_get_field(
+ context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
+ qp_attr->ah_attr.grh.hop_limit = roce_get_field(
+ context->qpc_bytes_44,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
+ QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
+ qp_attr->ah_attr.grh.traffic_class = roce_get_field(
+ context->qpc_bytes_48,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
+ QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
+
+ memcpy(qp_attr->ah_attr.grh.dgid.raw, context->dgid,
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+ }
+
+ qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
+ QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
+ qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
+ QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
+ QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
+ qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
+ QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
+ qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
+ QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
+ qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
+ qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
+ qp_attr->rnr_retry = context->rnr_retry;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ kfree(context);
+ return ret;
+}
+
+static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ int is_user)
+{
+ u32 sdbinvcnt;
+ unsigned long end = 0;
+ u32 sdbinvcnt_val;
+ u32 sdbsendptr_val;
+ u32 sdbisusepr_val;
+ struct hns_roce_cq *send_cq, *recv_cq;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
+ if (hr_qp->state != IB_QPS_RESET) {
+ /*
+ * Set qp to ERR,
+ * waiting for hw complete processing all dbs
+ */
+ if (hns_roce_v1_qp_modify(hr_dev, NULL,
+ to_hns_roce_state(
+ (enum ib_qp_state)hr_qp->state),
+ HNS_ROCE_QP_STATE_ERR, NULL,
+ hr_qp))
+ dev_err(dev, "modify QP %06lx to ERR failed.\n",
+ hr_qp->qpn);
+
+ /* Record issued doorbell */
+ sdbisusepr_val = roce_read(hr_dev,
+ ROCEE_SDB_ISSUE_PTR_REG);
+ /*
+ * Query db process status,
+ * until hw process completely
+ */
+ end = msecs_to_jiffies(
+ HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
+ do {
+ sdbsendptr_val = roce_read(hr_dev,
+ ROCEE_SDB_SEND_PTR_REG);
+ if (!time_before(jiffies, end)) {
+ dev_err(dev, "destroy qp(0x%lx) timeout!!!",
+ hr_qp->qpn);
+ break;
+ }
+ } while ((short)(roce_get_field(sdbsendptr_val,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
+ roce_get_field(sdbisusepr_val,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
+ ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
+ ) < 0);
+
+ /* Get list pointer */
+ sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+
+ /* Query db's list status, until hw reversal */
+ do {
+ sdbinvcnt_val = roce_read(hr_dev,
+ ROCEE_SDB_INV_CNT_REG);
+ if (!time_before(jiffies, end)) {
+ dev_err(dev, "destroy qp(0x%lx) timeout!!!",
+ hr_qp->qpn);
+ dev_err(dev, "SdbInvCnt = 0x%x\n",
+ sdbinvcnt_val);
+ break;
+ }
+ } while ((short)(roce_get_field(sdbinvcnt_val,
+ ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
+ ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
+ (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
+
+ /* Modify qp to reset before destroying qp */
+ if (hns_roce_v1_qp_modify(hr_dev, NULL,
+ to_hns_roce_state(
+ (enum ib_qp_state)hr_qp->state),
+ HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
+ dev_err(dev, "modify QP %06lx to RESET failed.\n",
+ hr_qp->qpn);
+ }
+ }
+
+ send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+ recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+ hns_roce_lock_cqs(send_cq, recv_cq);
+
+ if (!is_user) {
+ __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+
+ hns_roce_unlock_cqs(send_cq, recv_cq);
+
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ /* Not special_QP, free their QPN */
+ if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UD))
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+ if (is_user) {
+ ib_umem_release(hr_qp->umem);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+}
+
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ kfree(hr_to_hr_sqp(hr_qp));
+ else
+ kfree(hr_qp);
+
+ return 0;
+}
+
+struct hns_roce_v1_priv hr_v1_priv;
+
+struct hns_roce_hw hns_roce_hw_v1 = {
+ .reset = hns_roce_v1_reset,
+ .hw_profile = hns_roce_v1_profile,
+ .hw_init = hns_roce_v1_init,
+ .hw_exit = hns_roce_v1_exit,
+ .set_gid = hns_roce_v1_set_gid,
+ .set_mac = hns_roce_v1_set_mac,
+ .set_mtu = hns_roce_v1_set_mtu,
+ .write_mtpt = hns_roce_v1_write_mtpt,
+ .write_cqc = hns_roce_v1_write_cqc,
+ .clear_hem = hns_roce_v1_clear_hem,
+ .modify_qp = hns_roce_v1_modify_qp,
+ .query_qp = hns_roce_v1_query_qp,
+ .destroy_qp = hns_roce_v1_destroy_qp,
+ .post_send = hns_roce_v1_post_send,
+ .post_recv = hns_roce_v1_post_recv,
+ .req_notify_cq = hns_roce_v1_req_notify_cq,
+ .poll_cq = hns_roce_v1_poll_cq,
+ .priv = &hr_v1_priv,
+};
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
new file mode 100644
index 000000000000..539b0a3b92b0
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V1_H
+#define _HNS_ROCE_HW_V1_H
+
+#define CQ_STATE_VALID 2
+
+#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
+#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
+#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
+
+#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
+#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
+
+#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
+
+#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
+
+#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
+#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
+
+#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
+#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
+#define HNS_ROCE_V1_SG_NUM 2
+#define HNS_ROCE_V1_INLINE_SIZE 32
+
+#define HNS_ROCE_V1_UAR_NUM 256
+#define HNS_ROCE_V1_PHY_UAR_NUM 8
+
+#define HNS_ROCE_V1_GID_NUM 16
+
+#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
+#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+
+#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
+#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
+#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
+#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
+#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
+
+#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
+
+#define HNS_ROCE_V1_EXT_RAQ_WF 8
+#define HNS_ROCE_V1_RAQ_ENTRY 64
+#define HNS_ROCE_V1_RAQ_DEPTH 32768
+#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
+
+#define HNS_ROCE_V1_SDB_DEPTH 0x400
+#define HNS_ROCE_V1_ODB_DEPTH 0x400
+
+#define HNS_ROCE_V1_DB_RSVD 0x80
+
+#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+
+#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
+#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
+#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
+#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
+#define HNS_ROCE_V1_EXT_SDB_SIZE \
+ (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
+#define HNS_ROCE_V1_EXT_ODB_SIZE \
+ (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
+
+#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_EXT_SDB_ALFUL \
+ (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
+#define HNS_ROCE_V1_EXT_ODB_ALFUL \
+ (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+
+#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+
+#define HNS_ROCE_ODB_POLL_MODE 0
+
+#define HNS_ROCE_SDB_NORMAL_MODE 0
+#define HNS_ROCE_SDB_EXTEND_MODE 1
+
+#define HNS_ROCE_ODB_EXTEND_MODE 1
+
+#define KEY_VALID 0x02
+
+#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
+#define HNS_ROCE_CQE_STATUS_MASK 0x1f
+#define HNS_ROCE_CQE_OPCODE_MASK 0xf
+
+#define HNS_ROCE_CQE_SUCCESS 0x00
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
+#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
+#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
+#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
+#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
+#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
+#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
+#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
+
+#define QP1C_CFGN_OFFSET 0x28
+#define PHY_PORT_OFFSET 0x8
+#define MTPT_IDX_SHIFT 16
+#define ALL_PORT_VAL_OPEN 0x3f
+#define POL_TIME_INTERVAL_VAL 0x80
+#define SLEEP_TIME_INTERVAL 20
+#define SQ_PSN_SHIFT 8
+#define QKEY_VAL 0x80010000
+#define SDB_INV_CNT_OFFSET 8
+
+struct hns_roce_cq_context {
+ u32 cqc_byte_4;
+ u32 cq_bt_l;
+ u32 cqc_byte_12;
+ u32 cur_cqe_ba0_l;
+ u32 cqc_byte_20;
+ u32 cqe_tptr_addr_l;
+ u32 cur_cqe_ba1_l;
+ u32 cqc_byte_32;
+};
+
+#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
+#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
+ (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
+
+#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
+#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
+ (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
+#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
+ (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
+
+#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
+#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
+#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
+
+#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
+#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
+#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
+ (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
+
+#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
+
+#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
+#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
+#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
+
+#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
+#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
+ (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
+
+struct hns_roce_cqe {
+ u32 cqe_byte_4;
+ union {
+ u32 r_key;
+ u32 immediate_data;
+ };
+ u32 byte_cnt;
+ u32 cqe_byte_16;
+ u32 cqe_byte_20;
+ u32 s_mac_l;
+ u32 cqe_byte_28;
+ u32 reserved;
+};
+
+#define CQE_BYTE_4_OWNER_S 7
+#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
+
+#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
+#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
+ (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
+
+#define CQE_BYTE_4_WQE_INDEX_S 16
+#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
+
+#define CQE_BYTE_4_OPERATION_TYPE_S 0
+#define CQE_BYTE_4_OPERATION_TYPE_M \
+ (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
+
+#define CQE_BYTE_4_IMM_INDICATOR_S 15
+
+#define CQE_BYTE_16_LOCAL_QPN_S 0
+#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
+
+#define CQE_BYTE_20_PORT_NUM_S 26
+#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
+
+#define CQE_BYTE_20_SL_S 24
+#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
+
+#define CQE_BYTE_20_REMOTE_QPN_S 0
+#define CQE_BYTE_20_REMOTE_QPN_M \
+ (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
+
+#define CQE_BYTE_20_GRH_PRESENT_S 29
+
+#define CQE_BYTE_28_P_KEY_IDX_S 16
+#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
+
+#define CQ_DB_REQ_NOT_SOL 0
+#define CQ_DB_REQ_NOT (1 << 16)
+
+struct hns_roce_v1_mpt_entry {
+ u32 mpt_byte_4;
+ u32 pbl_addr_l;
+ u32 mpt_byte_12;
+ u32 virt_addr_l;
+ u32 virt_addr_h;
+ u32 length;
+ u32 mpt_byte_28;
+ u32 pa0_l;
+ u32 mpt_byte_36;
+ u32 mpt_byte_40;
+ u32 mpt_byte_44;
+ u32 mpt_byte_48;
+ u32 pa4_l;
+ u32 mpt_byte_56;
+ u32 mpt_byte_60;
+ u32 mpt_byte_64;
+};
+
+#define MPT_BYTE_4_KEY_STATE_S 0
+#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
+
+#define MPT_BYTE_4_KEY_S 8
+#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
+
+#define MPT_BYTE_4_PAGE_SIZE_S 16
+#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
+
+#define MPT_BYTE_4_MW_TYPE_S 20
+
+#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
+
+#define MPT_BYTE_4_OWN_S 22
+
+#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
+#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
+ (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
+
+#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
+#define MPT_BYTE_4_LOCAL_WRITE_S 27
+#define MPT_BYTE_4_REMOTE_WRITE_S 28
+#define MPT_BYTE_4_REMOTE_READ_S 29
+#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
+#define MPT_BYTE_4_ADDRESS_TYPE_S 31
+
+#define MPT_BYTE_12_PBL_ADDR_H_S 0
+#define MPT_BYTE_12_PBL_ADDR_H_M \
+ (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
+
+#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
+#define MPT_BYTE_12_MW_BIND_COUNTER_M \
+ (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
+
+#define MPT_BYTE_28_PD_S 0
+#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
+
+#define MPT_BYTE_28_L_KEY_IDX_L_S 16
+#define MPT_BYTE_28_L_KEY_IDX_L_M \
+ (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
+
+#define MPT_BYTE_36_PA0_H_S 0
+#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
+
+#define MPT_BYTE_36_PA1_L_S 8
+#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
+
+#define MPT_BYTE_40_PA1_H_S 0
+#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
+
+#define MPT_BYTE_40_PA2_L_S 16
+#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
+
+#define MPT_BYTE_44_PA2_H_S 0
+#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
+
+#define MPT_BYTE_44_PA3_L_S 24
+#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
+
+#define MPT_BYTE_48_PA3_H_S 0
+#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
+
+#define MPT_BYTE_56_PA4_H_S 0
+#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
+
+#define MPT_BYTE_56_PA5_L_S 8
+#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
+
+#define MPT_BYTE_60_PA5_H_S 0
+#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
+
+#define MPT_BYTE_60_PA6_L_S 16
+#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
+
+#define MPT_BYTE_64_PA6_H_S 0
+#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
+
+#define MPT_BYTE_64_L_KEY_IDX_H_S 24
+#define MPT_BYTE_64_L_KEY_IDX_H_M \
+ (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
+
+struct hns_roce_wqe_ctrl_seg {
+ __be32 sgl_pa_h;
+ __be32 flag;
+ __be32 imm_data;
+ __be32 msg_length;
+};
+
+struct hns_roce_wqe_data_seg {
+ __be64 addr;
+ __be32 lkey;
+ __be32 len;
+};
+
+struct hns_roce_wqe_raddr_seg {
+ __be32 rkey;
+ __be32 len;/* reserved */
+ __be64 raddr;
+};
+
+struct hns_roce_rq_wqe_ctrl {
+
+ u32 rwqe_byte_4;
+ u32 rocee_sgl_ba_l;
+ u32 rwqe_byte_12;
+ u32 reserved[5];
+};
+
+#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
+#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
+ (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
+
+#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
+
+#define GID_LEN 16
+
+struct hns_roce_ud_send_wqe {
+ u32 dmac_h;
+ u32 u32_8;
+ u32 immediate_data;
+
+ u32 u32_16;
+ union {
+ unsigned char dgid[GID_LEN];
+ struct {
+ u32 u32_20;
+ u32 u32_24;
+ u32 u32_28;
+ u32 u32_32;
+ };
+ };
+
+ u32 u32_36;
+ u32 u32_40;
+
+ u32 va0_l;
+ u32 va0_h;
+ u32 l_key0;
+
+ u32 va1_l;
+ u32 va1_h;
+ u32 l_key1;
+};
+
+#define UD_SEND_WQE_U32_4_DMAC_0_S 0
+#define UD_SEND_WQE_U32_4_DMAC_0_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_1_S 8
+#define UD_SEND_WQE_U32_4_DMAC_1_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_2_S 16
+#define UD_SEND_WQE_U32_4_DMAC_2_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
+
+#define UD_SEND_WQE_U32_4_DMAC_3_S 24
+#define UD_SEND_WQE_U32_4_DMAC_3_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
+
+#define UD_SEND_WQE_U32_8_DMAC_4_S 0
+#define UD_SEND_WQE_U32_8_DMAC_4_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
+
+#define UD_SEND_WQE_U32_8_DMAC_5_S 8
+#define UD_SEND_WQE_U32_8_DMAC_5_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
+
+#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
+#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
+ (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
+
+#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
+#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
+ (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
+
+#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
+
+#define UD_SEND_WQE_U32_16_DEST_QP_S 0
+#define UD_SEND_WQE_U32_16_DEST_QP_M \
+ (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
+
+#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
+#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
+
+#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
+#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
+ (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
+
+#define UD_SEND_WQE_U32_36_PRIORITY_S 20
+#define UD_SEND_WQE_U32_36_PRIORITY_M \
+ (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
+
+#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
+#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
+
+#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
+#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
+
+#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
+#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
+ (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
+
+struct hns_roce_sqp_context {
+ u32 qp1c_bytes_4;
+ u32 sq_rq_bt_l;
+ u32 qp1c_bytes_12;
+ u32 qp1c_bytes_16;
+ u32 qp1c_bytes_20;
+ u32 qp1c_bytes_28;
+ u32 cur_rq_wqe_ba_l;
+ u32 qp1c_bytes_32;
+ u32 cur_sq_wqe_ba_l;
+ u32 qp1c_bytes_40;
+};
+
+#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
+#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
+
+#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
+#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
+
+#define QP1C_BYTES_4_PD_S 16
+#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
+
+#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
+#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
+ (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
+
+#define QP1C_BYTES_16_RQ_HEAD_S 0
+#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
+
+#define QP1C_BYTES_16_PORT_NUM_S 16
+#define QP1C_BYTES_16_PORT_NUM_M \
+ (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
+
+#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
+#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
+#define QP1C_BYTES_16_RQ_BA_FLG_S 29
+#define QP1C_BYTES_16_SQ_BA_FLG_S 30
+#define QP1C_BYTES_16_QP1_ERR_S 31
+
+#define QP1C_BYTES_20_SQ_HEAD_S 0
+#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
+
+#define QP1C_BYTES_20_PKEY_IDX_S 16
+#define QP1C_BYTES_20_PKEY_IDX_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
+
+#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
+#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
+
+#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
+#define QP1C_BYTES_28_RQ_CUR_IDX_M \
+ (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
+
+#define QP1C_BYTES_32_TX_CQ_NUM_S 0
+#define QP1C_BYTES_32_TX_CQ_NUM_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
+
+#define QP1C_BYTES_32_RX_CQ_NUM_S 16
+#define QP1C_BYTES_32_RX_CQ_NUM_M \
+ (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
+
+#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
+#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
+
+#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
+#define QP1C_BYTES_40_SQ_CUR_IDX_M \
+ (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
+
+#define HNS_ROCE_WQE_INLINE (1UL<<31)
+#define HNS_ROCE_WQE_SE (1UL<<30)
+
+#define HNS_ROCE_WQE_SGE_NUM_BIT 24
+#define HNS_ROCE_WQE_IMM (1UL<<23)
+#define HNS_ROCE_WQE_FENCE (1UL<<21)
+#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
+
+#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
+#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
+#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
+#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
+#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
+#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
+
+struct hns_roce_qp_context {
+ u32 qpc_bytes_4;
+ u32 qpc_bytes_8;
+ u32 qpc_bytes_12;
+ u32 qpc_bytes_16;
+ u32 sq_rq_bt_l;
+ u32 qpc_bytes_24;
+ u32 irrl_ba_l;
+ u32 qpc_bytes_32;
+ u32 qpc_bytes_36;
+ u32 dmac_l;
+ u32 qpc_bytes_44;
+ u32 qpc_bytes_48;
+ u8 dgid[16];
+ u32 qpc_bytes_68;
+ u32 cur_rq_wqe_ba_l;
+ u32 qpc_bytes_76;
+ u32 rx_rnr_time;
+ u32 qpc_bytes_84;
+ u32 qpc_bytes_88;
+ union {
+ u32 rx_sge_len;
+ u32 dma_length;
+ };
+ union {
+ u32 rx_sge_num;
+ u32 rx_send_pktn;
+ u32 r_key;
+ };
+ u32 va_l;
+ u32 va_h;
+ u32 qpc_bytes_108;
+ u32 qpc_bytes_112;
+ u32 rx_cur_sq_wqe_ba_l;
+ u32 qpc_bytes_120;
+ u32 qpc_bytes_124;
+ u32 qpc_bytes_128;
+ u32 qpc_bytes_132;
+ u32 qpc_bytes_136;
+ u32 qpc_bytes_140;
+ u32 qpc_bytes_144;
+ u32 qpc_bytes_148;
+ union {
+ u32 rnr_retry;
+ u32 ack_time;
+ };
+ u32 qpc_bytes_156;
+ u32 pkt_use_len;
+ u32 qpc_bytes_164;
+ u32 qpc_bytes_168;
+ union {
+ u32 sge_use_len;
+ u32 pa_use_len;
+ };
+ u32 qpc_bytes_176;
+ u32 qpc_bytes_180;
+ u32 tx_cur_sq_wqe_ba_l;
+ u32 qpc_bytes_188;
+ u32 rvd21;
+};
+
+#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
+#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
+#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
+#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
+#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
+#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
+
+#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
+#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
+
+#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
+#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
+
+#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
+#define QP_CONTEXT_QPC_BYTES_4_PD_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
+
+#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
+#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
+
+#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
+#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
+
+#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
+#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
+
+#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
+#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
+
+#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
+#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
+ (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
+#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
+
+#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
+
+#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
+ (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
+#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
+
+#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
+#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
+#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
+#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
+
+#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
+#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
+
+#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
+#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
+
+#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
+#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
+#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
+#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
+
+#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
+#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
+#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
+ (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
+#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
+
+#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
+#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
+ (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
+
+#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
+#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
+#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
+#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
+#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
+
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
+ (((1UL << 2) - 1) << \
+ QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
+#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
+
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
+#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
+
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
+#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
+
+#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
+#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
+#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
+
+#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
+#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
+
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
+#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
+
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
+#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
+#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
+
+#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
+#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
+#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
+#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
+#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
+
+#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
+#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
+ (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
+#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
+#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
+#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
+#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
+ (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
+#define QP_CONTEXT_QPC_BYTES_156_SL_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
+#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
+
+#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
+#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
+
+#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
+#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
+ (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
+#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
+ (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
+#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
+#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
+ (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
+
+#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
+#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
+#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
+
+#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
+#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
+#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
+
+#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
+
+#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
+#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
+ (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
+
+#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
+
+#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
+#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
+ (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
+
+struct hns_roce_rq_db {
+ u32 u32_4;
+ u32 u32_8;
+};
+
+#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
+#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
+ (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
+
+#define RQ_DOORBELL_U32_8_QPN_S 0
+#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
+
+#define RQ_DOORBELL_U32_8_CMD_S 28
+#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
+
+#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
+
+struct hns_roce_sq_db {
+ u32 u32_4;
+ u32 u32_8;
+};
+
+#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
+#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
+ (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
+
+#define SQ_DOORBELL_U32_4_PORT_S 18
+#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
+
+#define SQ_DOORBELL_U32_8_QPN_S 0
+#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
+
+#define SQ_DOORBELL_HW_SYNC_S 31
+
+struct hns_roce_ext_db {
+ int esdb_dep;
+ int eodb_dep;
+ struct hns_roce_buf_list *sdb_buf_list;
+ struct hns_roce_buf_list *odb_buf_list;
+};
+
+struct hns_roce_db_table {
+ int sdb_ext_mod;
+ int odb_ext_mod;
+ struct hns_roce_ext_db *ext_db;
+};
+
+struct hns_roce_bt_table {
+ struct hns_roce_buf_list qpc_buf;
+ struct hns_roce_buf_list mtpt_buf;
+ struct hns_roce_buf_list cqc_buf;
+};
+
+struct hns_roce_v1_priv {
+ struct hns_roce_db_table db_table;
+ struct hns_roce_raq_table raq_table;
+ struct hns_roce_bt_table bt_table;
+};
+
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
new file mode 100644
index 000000000000..764e35a54457
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -0,0 +1,1168 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/acpi.h>
+#include <linux/of_platform.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_user.h"
+#include "hns_roce_hem.h"
+
+/**
+ * hns_roce_addrconf_ifid_eui48 - Get default gid.
+ * @eui: eui.
+ * @vlan_id: gid
+ * @dev: net device
+ * Description:
+ * MAC convert to GID
+ * gid[0..7] = fe80 0000 0000 0000
+ * gid[8] = mac[0] ^ 2
+ * gid[9] = mac[1]
+ * gid[10] = mac[2]
+ * gid[11] = ff (VLAN ID high byte (4 MS bits))
+ * gid[12] = fe (VLAN ID low byte)
+ * gid[13] = mac[3]
+ * gid[14] = mac[4]
+ * gid[15] = mac[5]
+ */
+static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
+static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ memset(gid, 0, sizeof(*gid));
+ gid->raw[0] = 0xFE;
+ gid->raw[1] = 0x80;
+ hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+/**
+ * hns_get_gid_index - Get gid index.
+ * @hr_dev: pointer to structure hns_roce_dev.
+ * @port: port, value range: 0 ~ MAX
+ * @gid_index: gid_index, value range: 0 ~ MAX
+ * Description:
+ * N ports shared gids, allocation method as follow:
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * GID[0][0], GID[1][0],.....GID[N - 1][0],
+ * And so on
+ */
+int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+{
+ return gid_index * hr_dev->caps.num_ports + port;
+}
+
+static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 gid_idx = 0;
+
+ if (gid_index >= hr_dev->caps.gid_table_len[port]) {
+ dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
+ gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
+ return -EINVAL;
+ }
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
+ if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
+ return -EINVAL;
+
+ memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
+
+ hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
+
+ return 0;
+}
+
+static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+{
+ u8 phy_port;
+ u32 i = 0;
+
+ if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
+ return;
+
+ for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
+ hr_dev->dev_addr[port][i] = addr[i];
+
+ phy_port = hr_dev->iboe.phy_port[port];
+ hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+}
+
+static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
+{
+ u8 phy_port = hr_dev->iboe.phy_port[port];
+ enum ib_mtu tmp;
+
+ tmp = iboe_get_mtu(mtu);
+ if (!tmp)
+ tmp = IB_MTU_256;
+
+ hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
+}
+
+static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
+{
+ struct ib_event event;
+
+ /* Refresh gid in ib_cache */
+ event.device = &hr_dev->ib_dev;
+ event.element.port_num = port + 1;
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+}
+
+static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
+ unsigned long event)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct net_device *netdev;
+ unsigned long flags;
+ union ib_gid gid;
+ int ret = 0;
+
+ netdev = hr_dev->iboe.netdevs[port];
+ if (!netdev) {
+ dev_err(dev, "port(%d) can't find netdev\n", port);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGEADDR:
+ hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ hns_roce_make_default_gid(netdev, &gid);
+ ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
+ if (!ret)
+ hns_roce_update_gids(hr_dev, port);
+ break;
+ case NETDEV_DOWN:
+ /*
+ * In v1 engine, only support all ports closed together.
+ */
+ break;
+ default:
+ dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
+ break;
+ }
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ return ret;
+}
+
+static int hns_roce_netdev_event(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct hns_roce_dev *hr_dev = NULL;
+ u8 port = 0;
+ int ret = 0;
+
+ hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
+ iboe = &hr_dev->iboe;
+
+ for (port = 0; port < hr_dev->caps.num_ports; port++) {
+ if (dev == iboe->netdevs[port]) {
+ ret = handle_en_event(hr_dev, port, event);
+ if (ret)
+ return NOTIFY_DONE;
+ break;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void hns_roce_addr_event(int event, struct net_device *event_netdev,
+ struct hns_roce_dev *hr_dev, union ib_gid *gid)
+{
+ struct hns_roce_ib_iboe *iboe = NULL;
+ int gid_table_len = 0;
+ unsigned long flags;
+ union ib_gid zgid;
+ u8 gid_idx = 0;
+ u8 port = 0;
+ int i = 0;
+ int free;
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
+ rdma_vlan_dev_real_dev(event_netdev) :
+ event_netdev;
+
+ if (event != NETDEV_UP && event != NETDEV_DOWN)
+ return;
+
+ iboe = &hr_dev->iboe;
+ while (port < hr_dev->caps.num_ports) {
+ if (real_dev == iboe->netdevs[port])
+ break;
+ port++;
+ }
+
+ if (port >= hr_dev->caps.num_ports) {
+ dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
+ return;
+ }
+
+ memset(zgid.raw, 0, sizeof(zgid.raw));
+ free = -1;
+ gid_table_len = hr_dev->caps.gid_table_len[port];
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ for (i = 0; i < gid_table_len; i++) {
+ gid_idx = hns_get_gid_index(hr_dev, port, i);
+ if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
+ sizeof(gid->raw)))
+ break;
+ if (free < 0 && !memcmp(zgid.raw,
+ iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
+ free = i;
+ }
+
+ if (i >= gid_table_len) {
+ if (free < 0) {
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ dev_dbg(&hr_dev->pdev->dev,
+ "gid_index overflow, port(%d)\n", port);
+ return;
+ }
+ if (!hns_roce_set_gid(hr_dev, port, free, gid))
+ hns_roce_update_gids(hr_dev, port);
+ } else if (event == NETDEV_DOWN) {
+ if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
+ hns_roce_update_gids(hr_dev, port);
+ }
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+}
+
+static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct hns_roce_dev *hr_dev;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ union ib_gid gid;
+
+ ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
+
+ hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
+
+ hns_roce_addr_event(event, dev, hr_dev, &gid);
+
+ return NOTIFY_DONE;
+}
+
+static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
+{
+ struct in_ifaddr *ifa_list = NULL;
+ union ib_gid gid = {{0} };
+ u32 ipaddr = 0;
+ int index = 0;
+ int ret = 0;
+ u8 i = 0;
+
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ hns_roce_set_mtu(hr_dev, i,
+ ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
+ hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
+
+ if (hr_dev->iboe.netdevs[i]->ip_ptr) {
+ ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
+ index = 1;
+ while (ifa_list) {
+ ipaddr = ifa_list->ifa_address;
+ ipv6_addr_set_v4mapped(ipaddr,
+ (struct in6_addr *)&gid);
+ ret = hns_roce_set_gid(hr_dev, i, index, &gid);
+ if (ret)
+ break;
+ index++;
+ ifa_list = ifa_list->ifa_next;
+ }
+ hns_roce_update_gids(hr_dev, i);
+ }
+ }
+
+ return ret;
+}
+
+static int hns_roce_query_device(struct ib_device *ib_dev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ memset(props, 0, sizeof(*props));
+
+ props->sys_image_guid = hr_dev->sys_image_guid;
+ props->max_mr_size = (u64)(~(0ULL));
+ props->page_size_cap = hr_dev->caps.page_size_cap;
+ props->vendor_id = hr_dev->vendor_id;
+ props->vendor_part_id = hr_dev->vendor_part_id;
+ props->hw_ver = hr_dev->hw_rev;
+ props->max_qp = hr_dev->caps.num_qps;
+ props->max_qp_wr = hr_dev->caps.max_wqes;
+ props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_sge = hr_dev->caps.max_sq_sg;
+ props->max_sge_rd = 1;
+ props->max_cq = hr_dev->caps.num_cqs;
+ props->max_cqe = hr_dev->caps.max_cqes;
+ props->max_mr = hr_dev->caps.num_mtpts;
+ props->max_pd = hr_dev->caps.num_pds;
+ props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
+ props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_pkeys = 1;
+ props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+
+ return 0;
+}
+
+static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
+ u8 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct net_device *ndev;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports)
+ return NULL;
+
+ rcu_read_lock();
+
+ ndev = hr_dev->iboe.netdevs[port_num - 1];
+ if (ndev)
+ dev_hold(ndev);
+
+ rcu_read_unlock();
+ return ndev;
+}
+
+static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
+ struct ib_port_attr *props)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct net_device *net_dev;
+ unsigned long flags;
+ enum ib_mtu mtu;
+ u8 port;
+
+ assert(port_num > 0);
+ port = port_num - 1;
+
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = hr_dev->caps.max_mtu;
+ props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP |
+ IB_PORT_BOOT_MGMT_SUP;
+ props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+ props->pkey_tbl_len = 1;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+
+ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+ net_dev = hr_dev->iboe.netdevs[port];
+ if (!net_dev) {
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+ dev_err(dev, "find netdev %d failed!\r\n", port);
+ return -EINVAL;
+ }
+
+ mtu = iboe_get_mtu(net_dev->mtu);
+ props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
+ props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3;
+
+ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+ return 0;
+}
+
+static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
+ union ib_gid *gid)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ u8 gid_idx = 0;
+ u8 port;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
+ index >= hr_dev->caps.gid_table_len[port_num - 1]) {
+ dev_err(dev,
+ "port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
+ port_num, index, hr_dev->caps.num_ports,
+ hr_dev->caps.gid_table_len[port_num - 1] - 1);
+ return -EINVAL;
+ }
+
+ port = port_num - 1;
+ gid_idx = hns_get_gid_index(hr_dev, port, index);
+ if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
+ dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
+ port_num, index, HNS_ROCE_MAX_GID_NUM);
+ return -EINVAL;
+ }
+
+ memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
+ HNS_ROCE_GID_SIZE);
+
+ return 0;
+}
+
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
+ u16 *pkey)
+{
+ *pkey = PKEY_ID;
+
+ return 0;
+}
+
+static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
+ struct ib_device_modify *props)
+{
+ unsigned long flags;
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
+ memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
+ spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
+ }
+
+ return 0;
+}
+
+static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
+ struct ib_port_modify *props)
+{
+ return 0;
+}
+
+static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
+ struct ib_udata *udata)
+{
+ int ret = 0;
+ struct hns_roce_ucontext *context;
+ struct hns_roce_ib_alloc_ucontext_resp resp;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+ resp.qp_tab_size = hr_dev->caps.num_qps;
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+ if (ret)
+ goto error_fail_uar_alloc;
+
+ ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (ret)
+ goto error_fail_copy_to_udata;
+
+ return &context->ibucontext;
+
+error_fail_copy_to_udata:
+ hns_roce_uar_free(hr_dev, &context->uar);
+
+error_fail_uar_alloc:
+ kfree(context);
+
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+
+ hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+ kfree(context);
+
+ return 0;
+}
+
+static int hns_roce_mmap(struct ib_ucontext *context,
+ struct vm_area_struct *vma)
+{
+ if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
+ return -EINVAL;
+
+ if (vma->vm_pgoff == 0) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ to_hr_ucontext(context)->uar.pfn,
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ ret = hns_roce_query_port(ib_dev, port_num, &attr);
+ if (ret)
+ return ret;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+
+ unregister_inetaddr_notifier(&iboe->nb_inet);
+ unregister_netdevice_notifier(&iboe->nb);
+ ib_unregister_device(&hr_dev->ib_dev);
+}
+
+static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct hns_roce_ib_iboe *iboe = NULL;
+ struct ib_device *ib_dev = NULL;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ iboe = &hr_dev->iboe;
+ spin_lock_init(&iboe->lock);
+
+ ib_dev = &hr_dev->ib_dev;
+ strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
+
+ ib_dev->owner = THIS_MODULE;
+ ib_dev->node_type = RDMA_NODE_IB_CA;
+ ib_dev->dma_device = dev;
+
+ ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
+ ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+ ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
+ ib_dev->uverbs_abi_ver = 1;
+ ib_dev->uverbs_cmd_mask =
+ (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ULL << IB_USER_VERBS_CMD_REG_MR) |
+ (1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
+
+ /* HCA||device||port */
+ ib_dev->modify_device = hns_roce_modify_device;
+ ib_dev->query_device = hns_roce_query_device;
+ ib_dev->query_port = hns_roce_query_port;
+ ib_dev->modify_port = hns_roce_modify_port;
+ ib_dev->get_link_layer = hns_roce_get_link_layer;
+ ib_dev->get_netdev = hns_roce_get_netdev;
+ ib_dev->query_gid = hns_roce_query_gid;
+ ib_dev->query_pkey = hns_roce_query_pkey;
+ ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
+ ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
+ ib_dev->mmap = hns_roce_mmap;
+
+ /* PD */
+ ib_dev->alloc_pd = hns_roce_alloc_pd;
+ ib_dev->dealloc_pd = hns_roce_dealloc_pd;
+
+ /* AH */
+ ib_dev->create_ah = hns_roce_create_ah;
+ ib_dev->query_ah = hns_roce_query_ah;
+ ib_dev->destroy_ah = hns_roce_destroy_ah;
+
+ /* QP */
+ ib_dev->create_qp = hns_roce_create_qp;
+ ib_dev->modify_qp = hns_roce_modify_qp;
+ ib_dev->query_qp = hr_dev->hw->query_qp;
+ ib_dev->destroy_qp = hr_dev->hw->destroy_qp;
+ ib_dev->post_send = hr_dev->hw->post_send;
+ ib_dev->post_recv = hr_dev->hw->post_recv;
+
+ /* CQ */
+ ib_dev->create_cq = hns_roce_ib_create_cq;
+ ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
+ ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
+ ib_dev->poll_cq = hr_dev->hw->poll_cq;
+
+ /* MR */
+ ib_dev->get_dma_mr = hns_roce_get_dma_mr;
+ ib_dev->reg_user_mr = hns_roce_reg_user_mr;
+ ib_dev->dereg_mr = hns_roce_dereg_mr;
+
+ /* OTHERS */
+ ib_dev->get_port_immutable = hns_roce_port_immutable;
+
+ ret = ib_register_device(ib_dev, NULL);
+ if (ret) {
+ dev_err(dev, "ib_register_device failed!\n");
+ return ret;
+ }
+
+ ret = hns_roce_setup_mtu_gids(hr_dev);
+ if (ret) {
+ dev_err(dev, "roce_setup_mtu_gids failed!\n");
+ goto error_failed_setup_mtu_gids;
+ }
+
+ iboe->nb.notifier_call = hns_roce_netdev_event;
+ ret = register_netdevice_notifier(&iboe->nb);
+ if (ret) {
+ dev_err(dev, "register_netdevice_notifier failed!\n");
+ goto error_failed_setup_mtu_gids;
+ }
+
+ iboe->nb_inet.notifier_call = hns_roce_inet_event;
+ ret = register_inetaddr_notifier(&iboe->nb_inet);
+ if (ret) {
+ dev_err(dev, "register inet addr notifier failed!\n");
+ goto error_failed_register_inetaddr_notifier;
+ }
+
+ return 0;
+
+error_failed_register_inetaddr_notifier:
+ unregister_netdevice_notifier(&iboe->nb);
+
+error_failed_setup_mtu_gids:
+ ib_unregister_device(ib_dev);
+
+ return ret;
+}
+
+static const struct of_device_id hns_roce_of_match[] = {
+ { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hns_roce_of_match);
+
+static const struct acpi_device_id hns_roce_acpi_match[] = {
+ { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
+
+static int hns_roce_node_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ /* get the 'device'corresponding to matching 'fwnode' */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_roce_node_match);
+ /* get the platform device */
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
+{
+ int i;
+ int ret;
+ u8 phy_port;
+ int port_cnt = 0;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct device_node *net_node;
+ struct net_device *netdev = NULL;
+ struct platform_device *pdev = NULL;
+ struct resource *res;
+
+ /* check if we are compatible with the underlying SoC */
+ if (dev_of_node(dev)) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(hns_roce_of_match, dev->of_node);
+ if (!of_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (struct hns_roce_hw *)of_id->data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific DT data!\n");
+ return -ENXIO;
+ }
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
+ if (!acpi_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific ACPI data!\n");
+ return -ENXIO;
+ }
+ } else {
+ dev_err(dev, "can't read compatibility data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ /* get the mapped register base address */
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ hr_dev->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hr_dev->reg_base))
+ return PTR_ERR(hr_dev->reg_base);
+
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* get the RoCE associated ethernet ports or netdevices */
+ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
+ if (dev_of_node(dev)) {
+ net_node = of_parse_phandle(dev->of_node, "eth-handle",
+ i);
+ if (!net_node)
+ continue;
+ pdev = of_find_device_by_node(net_node);
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+ struct fwnode_handle *fwnode;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "eth-handle",
+ i, &args);
+ if (ret)
+ continue;
+ fwnode = acpi_fwnode_handle(args.adev);
+ pdev = hns_roce_find_pdev(fwnode);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ if (pdev) {
+ netdev = platform_get_drvdata(pdev);
+ phy_port = (u8)i;
+ if (netdev) {
+ hr_dev->iboe.netdevs[port_cnt] = netdev;
+ hr_dev->iboe.phy_port[port_cnt] = phy_port;
+ } else {
+ dev_err(dev, "no netdev found with pdev %s\n",
+ pdev->name);
+ return -ENODEV;
+ }
+ port_cnt++;
+ }
+ }
+
+ if (port_cnt == 0) {
+ dev_err(dev, "unable to get eth-handle for available ports!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->caps.num_ports = port_cnt;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
+ /* read the interrupt names from the DT or ACPI */
+ ret = device_property_read_string_array(dev, "interrupt-names",
+ hr_dev->irq_names,
+ HNS_ROCE_MAX_IRQ_NUM);
+ if (ret < 0) {
+ dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* fetch the interrupt numbers */
+ for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
+ if (hr_dev->irq[i] <= 0) {
+ dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
+ HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_mtt_segs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTT context memory, aborting.\n");
+ return ret;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
+ HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
+ hr_dev->caps.num_mtpts, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
+ goto err_unmap_mtt;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
+ HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init QP context memory, aborting.\n");
+ goto err_unmap_dmpt;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
+ HEM_TYPE_IRRL,
+ hr_dev->caps.irrl_entry_sz *
+ hr_dev->caps.max_qp_init_rdma,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
+ goto err_unmap_qp;
+ }
+
+ ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
+ HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
+ hr_dev->caps.num_cqs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init CQ context memory, aborting.\n");
+ goto err_unmap_irrl;
+ }
+
+ return 0;
+
+err_unmap_irrl:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+
+err_unmap_qp:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+
+err_unmap_dmpt:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+
+err_unmap_mtt:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+
+ return ret;
+}
+
+/**
+* hns_roce_setup_hca - setup host channel adapter
+* @hr_dev: pointer to hns roce device
+* Return : int
+*/
+static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ spin_lock_init(&hr_dev->sm_lock);
+ spin_lock_init(&hr_dev->bt_cmd_lock);
+
+ ret = hns_roce_init_uar_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to initialize uar table. aborting\n");
+ return ret;
+ }
+
+ ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
+ if (ret) {
+ dev_err(dev, "Failed to allocate priv_uar.\n");
+ goto err_uar_table_free;
+ }
+
+ ret = hns_roce_init_pd_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init protected domain table.\n");
+ goto err_uar_alloc_free;
+ }
+
+ ret = hns_roce_init_mr_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init memory region table.\n");
+ goto err_pd_table_free;
+ }
+
+ ret = hns_roce_init_cq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init completion queue table.\n");
+ goto err_mr_table_free;
+ }
+
+ ret = hns_roce_init_qp_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init queue pair table.\n");
+ goto err_cq_table_free;
+ }
+
+ return 0;
+
+err_cq_table_free:
+ hns_roce_cleanup_cq_table(hr_dev);
+
+err_mr_table_free:
+ hns_roce_cleanup_mr_table(hr_dev);
+
+err_pd_table_free:
+ hns_roce_cleanup_pd_table(hr_dev);
+
+err_uar_alloc_free:
+ hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
+
+err_uar_table_free:
+ hns_roce_cleanup_uar_table(hr_dev);
+ return ret;
+}
+
+/**
+* hns_roce_probe - RoCE driver entrance
+* @pdev: pointer to platform device
+* Return : int
+*
+*/
+static int hns_roce_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hns_roce_dev *hr_dev;
+ struct device *dev = &pdev->dev;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
+ sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
+
+ hr_dev->pdev = pdev;
+ platform_set_drvdata(pdev, hr_dev);
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
+ dev_err(dev, "Not usable DMA addressing mode\n");
+ ret = -EIO;
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_get_cfg(hr_dev);
+ if (ret) {
+ dev_err(dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hr_dev->hw->reset(hr_dev, true);
+ if (ret) {
+ dev_err(dev, "Reset RoCE engine failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ hr_dev->hw->hw_profile(hr_dev);
+
+ ret = hns_roce_cmd_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "cmd init failed!\n");
+ goto error_failed_cmd_init;
+ }
+
+ ret = hns_roce_init_eq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
+ }
+
+ if (hr_dev->cmd_mod) {
+ ret = hns_roce_cmd_use_events(hr_dev);
+ if (ret) {
+ dev_err(dev, "Switch to event-driven cmd failed!\n");
+ goto error_failed_use_event;
+ }
+ }
+
+ ret = hns_roce_init_hem(hr_dev);
+ if (ret) {
+ dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
+ goto error_failed_init_hem;
+ }
+
+ ret = hns_roce_setup_hca(hr_dev);
+ if (ret) {
+ dev_err(dev, "setup hca failed!\n");
+ goto error_failed_setup_hca;
+ }
+
+ ret = hr_dev->hw->hw_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "hw_init failed!\n");
+ goto error_failed_engine_init;
+ }
+
+ ret = hns_roce_register_device(hr_dev);
+ if (ret)
+ goto error_failed_register_device;
+
+ return 0;
+
+error_failed_register_device:
+ hr_dev->hw->hw_exit(hr_dev);
+
+error_failed_engine_init:
+ hns_roce_cleanup_bitmap(hr_dev);
+
+error_failed_setup_hca:
+ hns_roce_cleanup_hem(hr_dev);
+
+error_failed_init_hem:
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+
+error_failed_use_event:
+ hns_roce_cleanup_eq_table(hr_dev);
+
+error_failed_eq_table:
+ hns_roce_cmd_cleanup(hr_dev);
+
+error_failed_cmd_init:
+ ret = hr_dev->hw->reset(hr_dev, false);
+ if (ret)
+ dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
+
+error_failed_get_cfg:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+/**
+* hns_roce_remove - remove RoCE device
+* @pdev: pointer to platform device
+*/
+static int hns_roce_remove(struct platform_device *pdev)
+{
+ struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
+
+ hns_roce_unregister_device(hr_dev);
+ hr_dev->hw->hw_exit(hr_dev);
+ hns_roce_cleanup_bitmap(hr_dev);
+ hns_roce_cleanup_hem(hr_dev);
+
+ if (hr_dev->cmd_mod)
+ hns_roce_cmd_use_polling(hr_dev);
+
+ hns_roce_cleanup_eq_table(hr_dev);
+ hns_roce_cmd_cleanup(hr_dev);
+ hr_dev->hw->reset(hr_dev, false);
+
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return 0;
+}
+
+static struct platform_driver hns_roce_driver = {
+ .probe = hns_roce_probe,
+ .remove = hns_roce_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hns_roce_of_match,
+ .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
+ },
+};
+
+module_platform_driver(hns_roce_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("HNS RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
new file mode 100644
index 000000000000..fb87883ead34
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+
+static u32 hw_index_to_key(unsigned long ind)
+{
+ return (u32)(ind >> 24) | (ind << 8);
+}
+
+static unsigned long key_to_hw_index(u32 key)
+{
+ return (key << 24) | (key >> 8);
+}
+
+static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
+{
+ return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
+ HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_TIME_CLASS_B);
+}
+
+static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
+{
+ return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
+ mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ HNS_ROCE_CMD_TIME_CLASS_B);
+}
+
+static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
+ unsigned long *seg)
+{
+ int o;
+ u32 m;
+
+ spin_lock(&buddy->lock);
+
+ for (o = order; o <= buddy->max_order; ++o) {
+ if (buddy->num_free[o]) {
+ m = 1 << (buddy->max_order - o);
+ *seg = find_first_bit(buddy->bits[o], m);
+ if (*seg < m)
+ goto found;
+ }
+ }
+ spin_unlock(&buddy->lock);
+ return -1;
+
+ found:
+ clear_bit(*seg, buddy->bits[o]);
+ --buddy->num_free[o];
+
+ while (o > order) {
+ --o;
+ *seg <<= 1;
+ set_bit(*seg ^ 1, buddy->bits[o]);
+ ++buddy->num_free[o];
+ }
+
+ spin_unlock(&buddy->lock);
+
+ *seg <<= order;
+ return 0;
+}
+
+static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
+ int order)
+{
+ seg >>= order;
+
+ spin_lock(&buddy->lock);
+
+ while (test_bit(seg ^ 1, buddy->bits[order])) {
+ clear_bit(seg ^ 1, buddy->bits[order]);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ set_bit(seg, buddy->bits[order]);
+ ++buddy->num_free[order];
+
+ spin_unlock(&buddy->lock);
+}
+
+static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
+{
+ int i, s;
+
+ buddy->max_order = max_order;
+ spin_lock_init(&buddy->lock);
+
+ buddy->bits = kzalloc((buddy->max_order + 1) * sizeof(long *),
+ GFP_KERNEL);
+ buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof(int *),
+ GFP_KERNEL);
+ if (!buddy->bits || !buddy->num_free)
+ goto err_out;
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
+ if (!buddy->bits[i])
+ goto err_out_free;
+
+ bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ }
+
+ set_bit(0, buddy->bits[buddy->max_order]);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free:
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+err_out:
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+ return -ENOMEM;
+}
+
+static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
+{
+ int i;
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ kfree(buddy->bits[i]);
+
+ kfree(buddy->bits);
+ kfree(buddy->num_free);
+}
+
+static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
+ unsigned long *seg)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ int ret = 0;
+
+ ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+ if (ret == -1)
+ return -1;
+
+ if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+ *seg + (1 << order) - 1)) {
+ hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+ return -1;
+ }
+
+ return 0;
+}
+
+int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
+ struct hns_roce_mtt *mtt)
+{
+ int ret = 0;
+ int i;
+
+ /* Page num is zero, correspond to DMA memory register */
+ if (!npages) {
+ mtt->order = -1;
+ mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
+ return 0;
+ }
+
+ /* Note: if page_shift is zero, FAST memory regsiter */
+ mtt->page_shift = page_shift;
+
+ /* Compute MTT entry necessary */
+ for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
+ i <<= 1)
+ ++mtt->order;
+
+ /* Allocate MTT entry */
+ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+ if (ret == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ if (mtt->order < 0)
+ return;
+
+ hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+}
+
+static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
+ u64 size, u32 access, int npages,
+ struct hns_roce_mr *mr)
+{
+ unsigned long index = 0;
+ int ret = 0;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Allocate a key for mr from mr_table */
+ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
+ if (ret == -1)
+ return -ENOMEM;
+
+ mr->iova = iova; /* MR va starting addr */
+ mr->size = size; /* MR addr range */
+ mr->pd = pd; /* MR num */
+ mr->access = access; /* MR access permit */
+ mr->enabled = 0; /* MR active status */
+ mr->key = hw_index_to_key(index); /* MR key */
+
+ if (size == ~0ull) {
+ mr->type = MR_TYPE_DMA;
+ mr->pbl_buf = NULL;
+ mr->pbl_dma_addr = 0;
+ } else {
+ mr->type = MR_TYPE_MR;
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int npages = 0;
+ int ret;
+
+ if (mr->enabled) {
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
+ & (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ }
+
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+ dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mr->key));
+}
+
+static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ int ret;
+ unsigned long mtpt_idx = key_to_hw_index(mr->key);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ /* Prepare HEM entry memory */
+ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ if (ret)
+ return ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_table;
+ }
+
+ ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ if (ret) {
+ dev_err(dev, "Write mtpt fail!\n");
+ goto err_page;
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ goto err_page;
+ }
+
+ mr->enabled = 1;
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_table:
+ hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ return ret;
+}
+
+static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, u32 start_index,
+ u32 npages, u64 *page_list)
+{
+ u32 i = 0;
+ __le64 *mtts = NULL;
+ dma_addr_t dma_handle;
+ u32 s = start_index * sizeof(u64);
+
+ /* All MTTs must fit in the same page */
+ if (start_index / (PAGE_SIZE / sizeof(u64)) !=
+ (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
+ return -EINVAL;
+
+ if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
+ return -EINVAL;
+
+ mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
+ &dma_handle);
+ if (!mtts)
+ return -ENOMEM;
+
+ /* Save page addr, low 12 bits : 0 */
+ for (i = 0; i < npages; ++i)
+ mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
+
+ return 0;
+}
+
+static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, u32 start_index,
+ u32 npages, u64 *page_list)
+{
+ int chunk;
+ int ret;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ while (npages > 0) {
+ chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+
+ ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
+ page_list);
+ if (ret)
+ return ret;
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+
+ return 0;
+}
+
+int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
+{
+ u32 i = 0;
+ int ret = 0;
+ u64 *page_list = NULL;
+
+ page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->npages; ++i) {
+ if (buf->nbufs == 1)
+ page_list[i] = buf->direct.map + (i << buf->page_shift);
+ else
+ page_list[i] = buf->page_list[i].map;
+
+ }
+ ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
+
+ kfree(page_list);
+
+ return ret;
+}
+
+int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ int ret = 0;
+
+ ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
+ hr_dev->caps.num_mtpts,
+ hr_dev->caps.num_mtpts - 1,
+ hr_dev->caps.reserved_mrws, 0);
+ if (ret)
+ return ret;
+
+ ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
+ ilog2(hr_dev->caps.num_mtt_segs));
+ if (ret)
+ goto err_buddy;
+
+ return 0;
+
+err_buddy:
+ hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
+ return ret;
+}
+
+void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+
+ hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+ hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
+}
+
+struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ int ret = 0;
+ struct hns_roce_mr *mr = NULL;
+
+ mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ if (mr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate memory region key */
+ ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
+ ~0ULL, acc, 0, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(to_hr_dev(pd->device), mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt, struct ib_umem *umem)
+{
+ struct scatterlist *sg;
+ int i, k, entry;
+ int ret = 0;
+ u64 *pages;
+ u32 n;
+ int len;
+
+ pages = (u64 *) __get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = n = 0;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ len = sg_dma_len(sg) >> mtt->page_shift;
+ for (k = 0; k < len; ++k) {
+ pages[i++] = sg_dma_address(sg) + umem->page_size * k;
+ if (i == PAGE_SIZE / sizeof(u64)) {
+ ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
+ pages);
+ if (ret)
+ goto out;
+ n += i;
+ i = 0;
+ }
+ }
+ }
+
+ if (i)
+ ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
+
+out:
+ free_page((unsigned long) pages);
+ return ret;
+}
+
+static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+ struct ib_umem *umem)
+{
+ int i = 0;
+ int entry;
+ struct scatterlist *sg;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
+ i++;
+ }
+
+ /* Memory barrier */
+ mb();
+
+ return 0;
+}
+
+struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_mr *mr = NULL;
+ int ret = 0;
+ int n = 0;
+
+ mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ ret = PTR_ERR(mr->umem);
+ goto err_free;
+ }
+
+ n = ib_umem_page_count(mr->umem);
+ if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
+ dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
+ mr->umem->page_size);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+ dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
+ length);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
+ access_flags, n, mr);
+ if (ret)
+ goto err_umem;
+
+ ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+ if (ret)
+ goto err_mr;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(hr_dev, mr);
+
+err_umem:
+ ib_umem_release(mr->umem);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+int hns_roce_dereg_mr(struct ib_mr *ibmr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
new file mode 100644
index 000000000000..05db7d59812a
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include "hns_roce_device.h"
+
+static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
+{
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
+}
+
+static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
+{
+ hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
+}
+
+int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
+ hr_dev->caps.num_pds - 1,
+ hr_dev->caps.reserved_pds, 0);
+}
+
+void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
+}
+
+struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_pd *pd;
+ int ret;
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
+ if (ret) {
+ kfree(pd);
+ dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n");
+ return ERR_PTR(ret);
+ }
+
+ if (context) {
+ if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
+ hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
+ dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
+ kfree(pd);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ return &pd->ibpd;
+}
+
+int hns_roce_dealloc_pd(struct ib_pd *pd)
+{
+ hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
+ kfree(to_hr_pd(pd));
+
+ return 0;
+}
+
+int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+{
+ struct resource *res;
+ int ret = 0;
+
+ /* Using bitmap to manager UAR index */
+ ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->index);
+ if (ret == -1)
+ return -ENOMEM;
+
+ if (uar->index > 0)
+ uar->index = (uar->index - 1) %
+ (hr_dev->caps.phy_num_uars - 1) + 1;
+
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
+
+ return 0;
+}
+
+void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
+{
+ hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
+}
+
+int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
+{
+ return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
+ hr_dev->caps.num_uars,
+ hr_dev->caps.num_uars - 1,
+ hr_dev->caps.reserved_uars, 0);
+}
+
+void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
new file mode 100644
index 000000000000..e86dd8d06777
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_user.h"
+
+#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
+
+void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock(&qp_table->lock);
+
+ if (!qp) {
+ dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ return;
+ }
+
+ qp->event(qp, (enum hns_roce_event)event_type);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
+ enum hns_roce_event type)
+{
+ struct ib_event event;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n",
+ type, hr_qp->qpn);
+ return;
+ }
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
+ int align, unsigned long *base)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
+}
+
+enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return HNS_ROCE_QP_STATE_RST;
+ case IB_QPS_INIT:
+ return HNS_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return HNS_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return HNS_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return HNS_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return HNS_ROCE_QP_STATE_ERR;
+ default:
+ return HNS_ROCE_QP_NUM_STATE;
+ }
+}
+
+static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ int ret;
+
+ if (!qpn)
+ return -EINVAL;
+
+ hr_qp->qpn = qpn;
+
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+ goto err_put_irrl;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
+
+ return 0;
+
+err_put_irrl:
+
+ return ret;
+}
+
+static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret;
+
+ if (!qpn)
+ return -EINVAL;
+
+ hr_qp->qpn = qpn;
+
+ /* Alloc memory for QPC */
+ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "QPC table get failed\n");
+ goto err_out;
+ }
+
+ /* Alloc memory for IRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "IRRL table get failed\n");
+ goto err_put_qp;
+ }
+
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(dev, "QPC radix_tree_insert failed\n");
+ goto err_put_irrl;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+ init_completion(&hr_qp->free);
+
+ return 0;
+
+err_put_irrl:
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+
+err_put_qp:
+ hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+
+err_out:
+ return ret;
+}
+
+void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp_table->lock, flags);
+ radix_tree_delete(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1));
+ spin_unlock_irqrestore(&qp_table->lock, flags);
+}
+
+void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ if (atomic_dec_and_test(&hr_qp->refcount))
+ complete(&hr_qp->free);
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ }
+}
+
+void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
+ int cnt)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+
+ if (base_qpn < SQP_NUM)
+ return;
+
+ hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+
+static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap, int is_user, int has_srq,
+ struct hns_roce_qp *hr_qp)
+{
+ u32 max_cnt;
+ struct device *dev = &hr_dev->pdev->dev;
+
+ /* Check the validity of QP support capacity */
+ if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
+ cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+ dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
+
+ /* If srq exit, set zero for relative number of rq */
+ if (has_srq) {
+ if (cap->max_recv_wr) {
+ dev_dbg(dev, "srq no need config max_recv_wr\n");
+ return -EINVAL;
+ }
+
+ hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
+ } else {
+ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
+ dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
+ return -EINVAL;
+ }
+
+ /* In v1 engine, parameter verification procession */
+ max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
+ cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
+ hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
+
+ if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
+ return -EINVAL;
+ }
+
+ max_cnt = max(1U, cap->max_recv_sge);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+ /* WQE is fixed for 64B */
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ }
+
+ cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs;
+
+ return 0;
+}
+
+static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
+ u8 max_sq_stride = ilog2(roundup_sq_stride);
+
+ /* Sanity check SQ size before proceeding */
+ if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
+ ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+ return -EINVAL;
+ }
+
+ hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+
+ /* Get buf size, SQ and RQ are aligned to page_szie */
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), PAGE_SIZE) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ return 0;
+}
+
+static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 max_cnt;
+
+ if (cap->max_send_wr > hr_dev->caps.max_wqes ||
+ cap->max_send_sge > hr_dev->caps.max_sq_sg ||
+ cap->max_inline_data > hr_dev->caps.max_sq_inline) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
+ return -EINVAL;
+ }
+
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq_max_wqes_per_wr = 1;
+ hr_qp->sq_spare_wqes = 0;
+
+ /* In v1 engine, parameter verification procession */
+ max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
+ cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
+ hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
+ if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
+ return -EINVAL;
+ }
+
+ /* Get data_seg numbers */
+ max_cnt = max(1U, cap->max_send_sge);
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+
+ /* Get buf size, SQ and RQ are aligned to page_szie */
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), PAGE_SIZE) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), PAGE_SIZE);
+
+ /* Get wr and sge number which send */
+ cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_sge = hr_qp->sq.max_gs;
+
+ /* We don't support inline sends for kernel QPs (yet) */
+ cap->max_inline_data = 0;
+
+ return 0;
+}
+
+static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+ struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, unsigned long sqpn,
+ struct hns_roce_qp *hr_qp)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_ib_create_qp ucmd;
+ unsigned long qpn = 0;
+ int ret = 0;
+
+ mutex_init(&hr_qp->mutex);
+ spin_lock_init(&hr_qp->sq.lock);
+ spin_lock_init(&hr_qp->rq.lock);
+
+ hr_qp->state = IB_QPS_RESET;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
+ else
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
+ !!init_attr->srq, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_rq_size failed\n");
+ goto err_out;
+ }
+
+ if (ib_pd->uobject) {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ dev_err(dev, "ib_copy_from_udata error for create qp\n");
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
+ goto err_out;
+ }
+
+ hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
+ ucmd.buf_addr, hr_qp->buff_size, 0,
+ 0);
+ if (IS_ERR(hr_qp->umem)) {
+ dev_err(dev, "ib_umem_get error for create qp\n");
+ ret = PTR_ERR(hr_qp->umem);
+ goto err_out;
+ }
+
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
+ ilog2((unsigned int)hr_qp->umem->page_size),
+ &hr_qp->mtt);
+ if (ret) {
+ dev_err(dev, "hns_roce_mtt_init error for create qp\n");
+ goto err_buf;
+ }
+
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
+ hr_qp->umem);
+ if (ret) {
+ dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
+ goto err_mtt;
+ }
+ } else {
+ if (init_attr->create_flags &
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+ dev_err(dev, "init_attr->create_flags error!\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ dev_err(dev, "init_attr->create_flags error!\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Set SQ size */
+ ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
+ hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
+ goto err_out;
+ }
+
+ /* QP doorbell register address */
+ hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ /* Allocate QP buf */
+ if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
+ &hr_qp->hr_buf)) {
+ dev_err(dev, "hns_roce_buf_alloc error!\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Write MTT */
+ ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
+ hr_qp->hr_buf.page_shift, &hr_qp->mtt);
+ if (ret) {
+ dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
+ goto err_buf;
+ }
+
+ ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
+ &hr_qp->hr_buf);
+ if (ret) {
+ dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
+ goto err_mtt;
+ }
+
+ hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
+ GFP_KERNEL);
+ if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ ret = -ENOMEM;
+ goto err_wrid;
+ }
+ }
+
+ if (sqpn) {
+ qpn = sqpn;
+ } else {
+ /* Get QPN */
+ ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
+ if (ret) {
+ dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
+ goto err_wrid;
+ }
+ }
+
+ if ((init_attr->qp_type) == IB_QPT_GSI) {
+ ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_alloc failed!\n");
+ goto err_qpn;
+ }
+ } else {
+ ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_alloc failed!\n");
+ goto err_qpn;
+ }
+ }
+
+ if (sqpn)
+ hr_qp->doorbell_qpn = 1;
+ else
+ hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+
+ hr_qp->event = hns_roce_ib_qp_event;
+
+ return 0;
+
+err_qpn:
+ if (!sqpn)
+ hns_roce_release_range_qp(hr_dev, qpn, 1);
+
+err_wrid:
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+
+err_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+err_buf:
+ if (ib_pd->uobject)
+ ib_umem_release(hr_qp->umem);
+ else
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+
+err_out:
+ return ret;
+}
+
+struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_sqp *hr_sqp;
+ struct hns_roce_qp *hr_qp;
+ int ret;
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC: {
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
+ hr_qp);
+ if (ret) {
+ dev_err(dev, "Create RC QP failed\n");
+ kfree(hr_qp);
+ return ERR_PTR(ret);
+ }
+
+ hr_qp->ibqp.qp_num = hr_qp->qpn;
+
+ break;
+ }
+ case IB_QPT_GSI: {
+ /* Userspace is not allowed to create special QPs: */
+ if (pd->uobject) {
+ dev_err(dev, "not support usr space GSI\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
+ if (!hr_sqp)
+ return ERR_PTR(-ENOMEM);
+
+ hr_qp = &hr_sqp->hr_qp;
+ hr_qp->port = init_attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
+ hr_qp->ibqp.qp_num, hr_qp);
+ if (ret) {
+ dev_err(dev, "Create GSI QP failed!\n");
+ kfree(hr_sqp);
+ return ERR_PTR(ret);
+ }
+
+ break;
+ }
+ default:{
+ dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return &hr_qp->ibqp;
+}
+
+int to_hr_qp_type(int qp_type)
+{
+ int transport_type;
+
+ if (qp_type == IB_QPT_RC)
+ transport_type = SERV_TYPE_RC;
+ else if (qp_type == IB_QPT_UC)
+ transport_type = SERV_TYPE_UC;
+ else if (qp_type == IB_QPT_UD)
+ transport_type = SERV_TYPE_UD;
+ else if (qp_type == IB_QPT_GSI)
+ transport_type = SERV_TYPE_UD;
+ else
+ transport_type = -1;
+
+ return transport_type;
+}
+
+int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct device *dev = &hr_dev->pdev->dev;
+ int ret = -EINVAL;
+ int p;
+ enum ib_mtu active_mtu;
+
+ mutex_lock(&hr_qp->mutex);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
+ new_state = attr_mask & IB_QP_STATE ?
+ attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ dev_err(dev, "ib_modify_qp_is_ok failed\n");
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+ dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
+ attr->port_num);
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+ dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
+ attr->pkey_index);
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if (attr->path_mtu > IB_MTU_2048 ||
+ attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > active_mtu) {
+ dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ goto out;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+ dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
+ attr->max_rd_atomic);
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+ dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
+ attr->max_dest_rd_atomic);
+ goto out;
+ }
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ ret = -EPERM;
+ dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
+ new_state);
+ goto out;
+ }
+
+ ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
+ new_state);
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+
+ return ret;
+}
+
+void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ spin_lock_irq(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
+ struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
+ __releases(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
+__be32 send_ieth(struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return cpu_to_le32(wr->ex.imm_data);
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_le32(wr->ex.invalidate_rkey);
+ default:
+ return 0;
+ }
+}
+
+static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+{
+
+ return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
+}
+
+void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
+{
+ return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
+}
+
+void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
+{
+ return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
+}
+
+bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
+ struct ib_cq *ib_cq)
+{
+ struct hns_roce_cq *hr_cq;
+ u32 cur;
+
+ cur = hr_wq->head - hr_wq->tail;
+ if (likely(cur + nreq < hr_wq->max_post))
+ return 0;
+
+ hr_cq = to_hr_cq(ib_cq);
+ spin_lock(&hr_cq->lock);
+ cur = hr_wq->head - hr_wq->tail;
+ spin_unlock(&hr_cq->lock);
+
+ return cur + nreq >= hr_wq->max_post;
+}
+
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ int reserved_from_top = 0;
+ int ret;
+
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+
+ /* A port include two SQP, six port total 12 */
+ ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
+ hr_dev->caps.num_qps - 1, SQP_NUM,
+ reserved_from_top);
+ if (ret) {
+ dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
+{
+ hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
+}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
index a277c31fcaf7..a28f761a9f65 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_user.h
+++ b/drivers/infiniband/hw/hns/hns_roce_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+ * Copyright (c) 2016 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -29,46 +29,25 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __IWCH_USER_H__
-#define __IWCH_USER_H__
-#define IWCH_UVERBS_ABI_VERSION 1
+#ifndef _HNS_ROCE_USER_H
+#define _HNS_ROCE_USER_H
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-struct iwch_create_cq_req {
- __u64 user_rptr_addr;
+struct hns_roce_ib_create_cq {
+ __u64 buf_addr;
};
-struct iwch_create_cq_resp_v0 {
- __u64 key;
- __u32 cqid;
- __u32 size_log2;
+struct hns_roce_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u8 log_sq_bb_count;
+ __u8 log_sq_stride;
+ __u8 sq_no_prefetch;
+ __u8 reserved[5];
};
-struct iwch_create_cq_resp {
- __u64 key;
- __u32 cqid;
- __u32 size_log2;
- __u32 memsize;
- __u32 reserved;
+struct hns_roce_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
};
-struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
- __u32 qpid;
- __u32 size_log2;
- __u32 sq_size_log2;
- __u32 rq_size_log2;
-};
-
-struct iwch_reg_user_mr_resp {
- __u32 pbl_addr;
-};
-#endif
+#endif /*_HNS_ROCE_USER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 7ca0638579c0..85637696f6e9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3166,8 +3166,11 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
- cm_core->event_wq = create_singlethread_workqueue("iwewq");
- cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
+ cm_core->event_wq = alloc_ordered_workqueue("iwewq",
+ WQ_MEM_RECLAIM);
+
+ cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
+ WQ_MEM_RECLAIM);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 445e230d5ff8..ac2f3cd9478c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1615,7 +1615,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
break;
- iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
+ iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
status = i40iw_add_mac_ip(iwdev);
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 5fc623362731..b9bf0759f10a 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if (vlan_tag < 0x1000)
vlan_tag |= (ah_attr->sl & 7) << 13;
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
- ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ ah->av.eth.gid_index = ret;
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
if (ah_attr->static_rate) {
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index c74ef2620b85..5e9939045852 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -881,7 +881,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
- create_singlethread_workqueue(alias_wq_name);
+ alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
if (!dev->sriov.alias_guid.ports_guid[i].wq) {
ret = -ENOMEM;
goto err_thread;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5df63dacaaa3..6a0fec357dae 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -37,7 +37,7 @@
#include <linux/slab.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
{
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
return &cq->ibcq;
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 0f21c3a25552..1672907ff219 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -230,6 +230,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
mad->mad_hdr.method == IB_MGMT_METHOD_SET)
switch (mad->mad_hdr.attr_id) {
case IB_SMP_ATTR_PORT_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
lid = be16_to_cpu(pinfo->lid);
@@ -245,6 +247,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_PKEY_TABLE:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
if (!mlx4_is_mfunc(dev->dev)) {
mlx4_ib_dispatch_event(dev, port_num,
IB_EVENT_PKEY_CHANGE);
@@ -281,6 +285,8 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
break;
case IB_SMP_ATTR_GUID_INFO:
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ return;
/* paravirtualized master's guid is guid 0 -- does not change */
if (!mlx4_is_master(dev->dev))
mlx4_ib_dispatch_event(dev, port_num,
@@ -296,6 +302,26 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad
}
break;
+ case IB_SMP_ATTR_SL_TO_VL_TABLE:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
+ dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
+ return;
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port_num, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
+ }
+ break;
+
default:
break;
}
@@ -345,7 +371,8 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
}
}
@@ -805,8 +832,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
if (!out_mad->mad_hdr.status) {
- if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
+ smp_snoop(ibdev, port_num, in_mad, prev_lid);
/* slaves get node desc from FW */
if (!mlx4_is_slave(to_mdev(ibdev)->dev))
node_desc_override(ibdev, out_mad);
@@ -1037,6 +1063,23 @@ static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
}
}
+
+ /* Update the sl to vl table from inside client rereg
+ * only if in secure-host mode (snooping is not possible)
+ * and the sl-to-vl change event is not generated by FW.
+ */
+ if (!mlx4_is_slave(dev->dev) &&
+ dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
+ if (mlx4_is_master(dev->dev))
+ /* already in work queue from mlx4_ib_event queueing
+ * mlx4_handle_port_mgmt_change_event, which calls
+ * this procedure. Therefore, call sl2vl_update directly.
+ */
+ mlx4_ib_sl2vl_update(dev, port_num);
+ else
+ mlx4_sched_ib_sl2vl_update_work(dev, port_num);
+ }
mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
}
@@ -1176,6 +1219,24 @@ void handle_port_mgmt_change_event(struct work_struct *work)
handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
}
break;
+
+ case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
+ /* cache sl to vl mapping changes for use in
+ * filling QP1 LRH VL field when sending packets
+ */
+ if (!mlx4_is_slave(dev->dev)) {
+ union sl2vl_tbl_to_u64 sl2vl64;
+ int jj;
+
+ for (jj = 0; jj < 8; jj++) {
+ sl2vl64.sl8[jj] =
+ eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
+ pr_debug("port %u, sl2vl[%d] = %02x\n",
+ port, jj, sl2vl64.sl8[jj]);
+ }
+ atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
+ }
+ break;
default:
pr_warn("Unsupported subtype 0x%x for "
"Port Management Change event\n", eqe->subtype);
@@ -1918,7 +1979,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_buf;
}
- ctx->pd = ib_alloc_pd(ctx->ib_dev);
+ ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
pr_err("Couldn't create tunnel PD (%d)\n", ret);
@@ -2091,7 +2152,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
}
snprintf(name, sizeof name, "mlx4_ibt%d", port);
- ctx->wq = create_singlethread_workqueue(name);
+ ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
ret = -ENOMEM;
@@ -2099,7 +2160,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
}
snprintf(name, sizeof name, "mlx4_ibud%d", port);
- ctx->ud_wq = create_singlethread_workqueue(name);
+ ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 87ba9bca4181..b597e8227591 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -55,7 +55,7 @@
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
#define DRV_NAME MLX4_IB_DRV_NAME
#define DRV_VERSION "2.2-1"
@@ -832,6 +832,66 @@ static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return ret;
}
+static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
+{
+ union sl2vl_tbl_to_u64 sl2vl64;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
+ int err = -ENOMEM;
+ int jj;
+
+ if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
+ *sl2vl_tbl = 0;
+ return 0;
+ }
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
+ in_mad->attr_mod = 0;
+
+ if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
+ mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
+
+ err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
+ in_mad, out_mad);
+ if (err)
+ goto out;
+
+ for (jj = 0; jj < 8; jj++)
+ sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
+ *sl2vl_tbl = sl2vl64.sl64;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
+{
+ u64 sl2vl;
+ int i;
+ int err;
+
+ for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
+ if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+ continue;
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
+ if (err) {
+ pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ i, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
+ }
+}
+
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey, int netw_view)
{
@@ -886,7 +946,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
return -EOPNOTSUPP;
spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
/*
@@ -897,7 +957,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (IS_ERR(mailbox))
return 0;
- memcpy(mailbox->buf, props->node_desc, 64);
+ memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -1259,7 +1319,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
if (err)
goto err1;
- xrcd->pd = ib_alloc_pd(ibdev);
+ xrcd->pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(xrcd->pd)) {
err = PTR_ERR(xrcd->pd);
goto err2;
@@ -1361,6 +1421,19 @@ struct mlx4_ib_steering {
union ib_gid gid;
};
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD dst_ip
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+ memchr_inv((void *)&filter.field +\
+ sizeof(filter.field), 0,\
+ sizeof(filter) -\
+ offsetof(typeof(filter), field) -\
+ sizeof(filter.field))
+
static int parse_flow_attr(struct mlx4_dev *dev,
u32 qp_num,
union ib_flow_spec *ib_spec,
@@ -1370,6 +1443,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
ETH_ALEN);
@@ -1379,6 +1455,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
case IB_FLOW_SPEC_IB:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IB;
mlx4_spec->ib.l3_qpn =
cpu_to_be32(qp_num);
@@ -1388,6 +1467,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_IPV4:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -ENOTSUPP;
+
type = MLX4_NET_TRANS_RULE_ID_IPV4;
mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
@@ -1397,6 +1479,9 @@ static int parse_flow_attr(struct mlx4_dev *dev,
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
+
type = ib_spec->type == IB_FLOW_SPEC_TCP ?
MLX4_NET_TRANS_RULE_ID_TCP :
MLX4_NET_TRANS_RULE_ID_UDP;
@@ -2000,7 +2085,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
@@ -2653,6 +2738,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
+ mlx4_init_sl2vl_tbl(ibdev);
for (i = 0; i < ibdev->num_ports; ++i) {
mutex_init(&ibdev->counters_table[i].mutex);
@@ -3101,6 +3187,47 @@ static void handle_bonded_port_state_event(struct work_struct *work)
ib_dispatch_event(&ibev);
}
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
+{
+ u64 sl2vl;
+ int err;
+
+ err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
+ if (err) {
+ pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
+ port, err);
+ sl2vl = 0;
+ }
+ atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
+}
+
+static void ib_sl2vl_update_work(struct work_struct *work)
+{
+ struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *mdev = ew->ib_dev;
+ int port = ew->port;
+
+ mlx4_ib_sl2vl_update(mdev, port);
+
+ kfree(ew);
+}
+
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port)
+{
+ struct ib_event_work *ew;
+
+ ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
+ if (ew) {
+ INIT_WORK(&ew->work, ib_sl2vl_update_work);
+ ew->port = port;
+ ew->ib_dev = ibdev;
+ queue_work(wq, &ew->work);
+ } else {
+ pr_err("failed to allocate memory for sl2vl update work\n");
+ }
+}
+
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
@@ -3131,10 +3258,14 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_UP:
if (p > ibdev->num_ports)
return;
- if (mlx4_is_master(dev) &&
+ if (!mlx4_is_slave(dev) &&
rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
IB_LINK_LAYER_INFINIBAND) {
- mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (mlx4_is_master(dev))
+ mlx4_ib_invalidate_all_guid_record(ibdev, p);
+ if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
+ !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
+ mlx4_sched_ib_sl2vl_update_work(ibdev, p);
}
ibev.event = IB_EVENT_PORT_ACTIVE;
break;
@@ -3222,7 +3353,7 @@ static int __init mlx4_ib_init(void)
{
int err;
- wq = create_singlethread_workqueue("mlx4_ib");
+ wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 097bfcc4ee99..a21d37f02f35 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1045,7 +1045,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
atomic_set(&ctx->tid, 0);
sprintf(name, "mlx4_ib_mcg%d", ctx->port);
- ctx->mcg_wq = create_singlethread_workqueue(name);
+ ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->mcg_wq)
return -ENOMEM;
@@ -1246,7 +1246,7 @@ void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
int mlx4_ib_mcg_init(void)
{
- clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
+ clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
if (!clean_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 686ab48ff644..35141f451e5c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -570,6 +570,7 @@ struct mlx4_ib_dev {
struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
struct ib_ah *sm_ah[MLX4_MAX_PORTS];
spinlock_t sm_lock;
+ atomic64_t sl2vl[MLX4_MAX_PORTS];
struct mlx4_ib_sriov sriov;
struct mutex cap_mask_mutex;
@@ -600,6 +601,7 @@ struct ib_event_work {
struct work_struct work;
struct mlx4_ib_dev *ib_dev;
struct mlx4_eqe ib_eqe;
+ int port;
};
struct mlx4_ib_qp_tunnel_init_attr {
@@ -883,4 +885,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
u8 port_num, int index);
+void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
+ int port);
+
+void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 7fb9629bd12b..570bc866b1d6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -47,7 +47,7 @@
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
@@ -2405,6 +2405,22 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0;
}
+static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num)
+{
+ union sl2vl_tbl_to_u64 tmp_vltab;
+ u8 vl;
+
+ if (sl > 15)
+ return 0xf;
+ tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]);
+ vl = tmp_vltab.sl8[sl >> 1];
+ if (sl & 1)
+ vl &= 0x0f;
+ else
+ vl >>= 4;
+ return vl;
+}
+
#define MLX4_ROCEV2_QP1_SPORT 0xC000
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
@@ -2590,7 +2606,12 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
+ sl_to_vl(to_mdev(ib_dev),
+ sqp->ud_header.lrh.service_level,
+ sqp->qp.port);
+ if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
+ return -EINVAL;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 0597f3eef5d0..7dd3f267f06b 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -37,7 +37,7 @@
#include <linux/vmalloc.h>
#include "mlx4_ib.h"
-#include "user.h"
+#include <rdma/mlx4-abi.h>
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
deleted file mode 100644
index 07e6769ef43b..000000000000
--- a/drivers/infiniband/hw/mlx4/user.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_IB_USER_H
-#define MLX4_IB_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-
-#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
-#define MLX4_IB_UVERBS_ABI_VERSION 4
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mlx4_ib_alloc_ucontext_resp_v3 {
- __u32 qp_tab_size;
- __u16 bf_reg_size;
- __u16 bf_regs_per_page;
-};
-
-struct mlx4_ib_alloc_ucontext_resp {
- __u32 dev_caps;
- __u32 qp_tab_size;
- __u16 bf_reg_size;
- __u16 bf_regs_per_page;
- __u32 cqe_size;
-};
-
-struct mlx4_ib_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
-};
-
-struct mlx4_ib_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mlx4_ib_resize_cq {
- __u64 buf_addr;
-};
-
-struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
-};
-
-struct mlx4_ib_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u8 log_sq_bb_count;
- __u8 log_sq_stride;
- __u8 sq_no_prefetch;
- __u8 reserved[5];
-};
-
-#endif /* MLX4_IB_USER_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5de9a65f53bc..fcd04b881ec1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -35,7 +35,6 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
-#include "user.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{
@@ -933,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_create;
} else {
- /* for now choose 64 bytes till we have a proper interface */
- cqe_size = 64;
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 364aab9f3c9e..39e58489dcc2 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -394,7 +394,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (err)
goto out;
- memcpy(node_desc, out_mad->data, 64);
+ memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
out:
kfree(in_mad);
kfree(out_mad);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 551aa0e789aa..32b09f059c84 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,7 +53,6 @@
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/fs.h>
-#include "user.h"
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
@@ -106,13 +105,42 @@ static int mlx5_netdev_event(struct notifier_block *this,
struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
roce.nb);
- if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
- return NOTIFY_DONE;
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_UNREGISTER:
+ write_lock(&ibdev->roce.netdev_lock);
+ if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
+ ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&ibdev->roce.netdev_lock);
+ break;
+
+ case NETDEV_UP:
+ case NETDEV_DOWN: {
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *upper = NULL;
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ }
+
+ if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ && ibdev->ib_active) {
+ struct ib_event ibev = {0};
+
+ ibev.device = &ibdev->ib_dev;
+ ibev.event = (event == NETDEV_UP) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ibev.element.port_num = 1;
+ ib_dispatch_event(&ibev);
+ }
+ break;
+ }
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ default:
+ break;
+ }
return NOTIFY_DONE;
}
@@ -123,6 +151,10 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ if (ndev)
+ return ndev;
+
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
read_lock(&ibdev->roce.netdev_lock);
@@ -138,7 +170,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct net_device *ndev;
+ struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
u16 qkey_viol_cntr;
@@ -162,6 +194,17 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (!ndev)
return 0;
+ if (mlx5_lag_is_active(dev->mdev)) {
+ rcu_read_lock();
+ upper = netdev_master_upper_dev_get_rcu(ndev);
+ if (upper) {
+ dev_put(ndev);
+ ndev = upper;
+ dev_hold(ndev);
+ }
+ rcu_read_unlock();
+ }
+
if (netif_running(ndev) && netif_carrier_ok(ndev)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = 5;
@@ -429,7 +472,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
}
struct mlx5_reg_node_desc {
- u8 desc[64];
+ u8 desc[IB_DEVICE_NODE_DESC_MAX];
};
static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
@@ -532,6 +575,26 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.tso_caps);
}
}
+
+ if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
+ resp.rss_caps.rx_hash_function =
+ MLX5_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX5_RX_HASH_SRC_IPV4 |
+ MLX5_RX_HASH_DST_IPV4 |
+ MLX5_RX_HASH_SRC_IPV6 |
+ MLX5_RX_HASH_DST_IPV6 |
+ MLX5_RX_HASH_SRC_PORT_TCP |
+ MLX5_RX_HASH_DST_PORT_TCP |
+ MLX5_RX_HASH_SRC_PORT_UDP |
+ MLX5_RX_HASH_DST_PORT_UDP;
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+ } else {
+ if (field_avail(typeof(resp), tso_caps, uhw->outlen))
+ resp.response_length += sizeof(resp.tso_caps);
+ if (field_avail(typeof(resp), rss_caps, uhw->outlen))
+ resp.response_length += sizeof(resp.rss_caps);
}
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
@@ -595,6 +658,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (!mlx5_core_is_pf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ props->rss_caps.max_rwq_indirection_tables =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
+ props->rss_caps.max_rwq_indirection_table_size =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -846,13 +920,13 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
* If possible, pass node desc to FW, so it can generate
* a 144 trap. If cmd fails, just ignore.
*/
- memcpy(&in, props->node_desc, 64);
+ memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
if (err)
return err;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
return err;
}
@@ -945,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
@@ -1395,28 +1469,77 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
return 0;
}
-static bool outer_header_zero(u32 *match_criteria)
+enum {
+ MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MATCH_CRITERIA_ENABLE_INNER_BIT
+};
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+static u8 get_match_criteria_enable(u32 *match_criteria)
{
- int size = MLX5_ST_SZ_BYTES(fte_match_param);
- char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers);
+ u8 match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MATCH_CRITERIA_ENABLE_INNER_BIT;
- return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
- outer_headers_c + 1,
- size - 1);
+ return match_criteria_enable;
}
+static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+}
+
+static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
+}
+
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD tos
+#define LAST_IPV6_FIELD traffic_class
+#define LAST_TCP_UDP_FIELD src_port
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field)\
+ memchr_inv((void *)&filter.field +\
+ sizeof(filter.field), 0,\
+ sizeof(filter) -\
+ offsetof(typeof(filter), field) -\
+ sizeof(filter.field))
+
static int parse_flow_attr(u32 *match_c, u32 *match_v,
- union ib_flow_spec *ib_spec)
+ const union ib_flow_spec *ib_spec)
{
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters);
+
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
- if (ib_spec->size != sizeof(ib_spec->eth))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -ENOTSUPP;
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dmac_47_16),
@@ -1463,8 +1586,8 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
ethertype, ntohs(ib_spec->eth.val.ether_type));
break;
case IB_FLOW_SPEC_IPV4:
- if (ib_spec->size != sizeof(ib_spec->ipv4))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
ethertype, 0xffff);
@@ -1487,10 +1610,16 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
+
+ set_tos(outer_headers_c, outer_headers_v,
+ ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
+
+ set_proto(outer_headers_c, outer_headers_v,
+ ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
break;
case IB_FLOW_SPEC_IPV6:
- if (ib_spec->size != sizeof(ib_spec->ipv6))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
ethertype, 0xffff);
@@ -1513,10 +1642,26 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.dst_ip,
sizeof(ib_spec->ipv6.val.dst_ip));
+
+ set_tos(outer_headers_c, outer_headers_v,
+ ib_spec->ipv6.mask.traffic_class,
+ ib_spec->ipv6.val.traffic_class);
+
+ set_proto(outer_headers_c, outer_headers_v,
+ ib_spec->ipv6.mask.next_hdr,
+ ib_spec->ipv6.val.next_hdr);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c,
+ outer_ipv6_flow_label,
+ ntohl(ib_spec->ipv6.mask.flow_label));
+ MLX5_SET(fte_match_set_misc, misc_params_v,
+ outer_ipv6_flow_label,
+ ntohl(ib_spec->ipv6.val.flow_label));
break;
case IB_FLOW_SPEC_TCP:
- if (ib_spec->size != sizeof(ib_spec->tcp_udp))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xff);
@@ -1534,8 +1679,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_UDP:
- if (ib_spec->size != sizeof(ib_spec->tcp_udp))
- return -EINVAL;
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xff);
@@ -1582,7 +1728,7 @@ static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
is_multicast_ether_addr(eth_spec->val.dst_mac);
}
-static bool is_valid_attr(struct ib_flow_attr *flow_attr)
+static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
{
union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
bool has_ipv4_spec = false;
@@ -1626,12 +1772,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule);
+ put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
mlx5_del_flow_rule(handler->rule);
- put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
+ put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
kfree(handler);
@@ -1647,10 +1794,16 @@ static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
return priority;
}
+enum flow_table_type {
+ MLX5_IB_FT_RX,
+ MLX5_IB_FT_TX
+};
+
#define MLX5_FS_MAX_TYPES 10
#define MLX5_FS_MAX_ENTRIES 32000UL
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
- struct ib_flow_attr *flow_attr)
+ struct ib_flow_attr *flow_attr,
+ enum flow_table_type ft_type)
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_namespace *ns = NULL;
@@ -1681,6 +1834,19 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
&num_entries,
&num_groups);
prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ allow_sniffer_and_nic_rx_shared_tir))
+ return ERR_PTR(-ENOTSUPP);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX :
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX);
+
+ prio = &dev->flow_db.sniffer[ft_type];
+ priority = 0;
+ num_entries = 1;
+ num_groups = 1;
}
if (!ns)
@@ -1706,13 +1872,13 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
- struct ib_flow_attr *flow_attr,
+ const struct ib_flow_attr *flow_attr,
struct mlx5_flow_destination *dst)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
struct mlx5_flow_spec *spec;
- void *ib_flow = flow_attr + 1;
+ const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
u32 action;
int err = 0;
@@ -1738,9 +1904,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
- /* Outer header support only */
- spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
- << 0;
+ spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec,
@@ -1753,7 +1917,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
- handler->prio = ft_prio - dev->flow_db.prios;
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
ft_prio->flow_table = ft;
free:
@@ -1777,6 +1942,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
flow_attr, dst);
if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule);
+ ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
} else {
@@ -1838,6 +2004,8 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
+ mlx5_del_flow_rule(handler->rule);
+ ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
} else {
@@ -1848,6 +2016,43 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
return handler;
}
+static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_rx,
+ struct mlx5_ib_flow_prio *ft_tx,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_rx;
+ struct mlx5_ib_flow_handler *handler_tx;
+ int err;
+ static const struct ib_flow_attr flow_attr = {
+ .num_of_specs = 0,
+ .size = sizeof(flow_attr)
+ };
+
+ handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
+ if (IS_ERR(handler_rx)) {
+ err = PTR_ERR(handler_rx);
+ goto err;
+ }
+
+ handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
+ if (IS_ERR(handler_tx)) {
+ err = PTR_ERR(handler_tx);
+ goto err_tx;
+ }
+
+ list_add(&handler_tx->list, &handler_rx->list);
+
+ return handler_rx;
+
+err_tx:
+ mlx5_del_flow_rule(handler_rx->rule);
+ ft_rx->refcount--;
+ kfree(handler_rx);
+err:
+ return ERR_PTR(err);
+}
+
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1856,6 +2061,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_flow_handler *handler = NULL;
struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
int err;
@@ -1873,11 +2079,19 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
mutex_lock(&dev->flow_db.lock);
- ft_prio = get_flow_table(dev, flow_attr);
+ ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
}
+ if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
+ if (IS_ERR(ft_prio_tx)) {
+ err = PTR_ERR(ft_prio_tx);
+ ft_prio_tx = NULL;
+ goto destroy_ft;
+ }
+ }
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
if (mqp->flags & MLX5_IB_QP_RSS)
@@ -1897,6 +2111,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
dst);
+ } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
} else {
err = -EINVAL;
goto destroy_ft;
@@ -1908,7 +2124,6 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto destroy_ft;
}
- ft_prio->refcount++;
mutex_unlock(&dev->flow_db.lock);
kfree(dst);
@@ -1916,6 +2131,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
destroy_ft:
put_flow_table(dev, ft_prio, false);
+ if (ft_prio_tx)
+ put_flow_table(dev, ft_prio_tx, false);
unlock:
mutex_unlock(&dev->flow_db.lock);
kfree(dst);
@@ -2094,25 +2311,30 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
-
+ bool fatal = false;
u8 port = 0;
switch (event) {
case MLX5_DEV_EVENT_SYS_ERROR:
- ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
+ fatal = true;
break;
case MLX5_DEV_EVENT_PORT_UP:
- ibev.event = IB_EVENT_PORT_ACTIVE;
- port = (u8)param;
- break;
-
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
+
+ /* In RoCE, port up/down events are handled in
+ * mlx5_netdev_event().
+ */
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return;
+
+ ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
@@ -2148,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (ibdev->ib_active)
ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
}
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -2235,7 +2460,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_0;
}
- pd = ib_alloc_pd(&dev->ib_dev);
+ pd = ib_alloc_pd(&dev->ib_dev, 0);
if (IS_ERR(pd)) {
mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
ret = PTR_ERR(pd);
@@ -2517,30 +2742,88 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
+static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
+ MLX5_FLOW_NAMESPACE_LAG);
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!ns || !mlx5_lag_is_active(mdev))
+ return 0;
+
+ err = mlx5_cmd_create_vport_lag(mdev);
+ if (err)
+ return err;
+
+ ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_destroy_vport_lag;
+ }
+
+ dev->flow_db.lag_demux_ft = ft;
+ return 0;
+
+err_destroy_vport_lag:
+ mlx5_cmd_destroy_vport_lag(mdev);
+ return err;
+}
+
+static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (dev->flow_db.lag_demux_ft) {
+ mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
+ dev->flow_db.lag_demux_ft = NULL;
+
+ mlx5_cmd_destroy_vport_lag(mdev);
+ }
+}
+
+static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
+{
+ if (dev->roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce.nb);
+ dev->roce.nb.notifier_call = NULL;
+ }
+}
+
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{
int err;
dev->roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier(&dev->roce.nb);
- if (err)
+ if (err) {
+ dev->roce.nb.notifier_call = NULL;
return err;
+ }
err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err)
goto err_unregister_netdevice_notifier;
+ err = mlx5_roce_lag_init(dev);
+ if (err)
+ goto err_disable_roce;
+
return 0;
+err_disable_roce:
+ mlx5_nic_vport_disable_roce(dev->mdev);
+
err_unregister_netdevice_notifier:
- unregister_netdevice_notifier(&dev->roce.nb);
+ mlx5_remove_roce_notifier(dev);
return err;
}
static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
{
+ mlx5_roce_lag_cleanup(dev);
mlx5_nic_vport_disable_roce(dev->mdev);
- unregister_netdevice_notifier(&dev->roce.nb);
}
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2655,6 +2938,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
struct mlx5_ib_dev *dev;
enum rdma_link_layer ll;
int port_type_cap;
+ const char *name;
int err;
int i;
@@ -2687,7 +2971,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
- strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
+ if (!mlx5_lag_is_active(mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+
+ strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -2829,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err = init_node_data(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
@@ -2839,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
}
err = create_dev_resources(&dev->devr);
@@ -2889,8 +3178,10 @@ err_rsrc:
destroy_dev_resources(&dev->devr);
err_disable_roce:
- if (ll == IB_LINK_LAYER_ETHERNET)
+ if (ll == IB_LINK_LAYER_ETHERNET) {
mlx5_disable_roce(dev);
+ mlx5_remove_roce_notifier(dev);
+ }
err_free_port:
kfree(dev->port);
@@ -2906,6 +3197,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ mlx5_remove_roce_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 67cc7416fdff..7d689903c87c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -44,6 +44,7 @@
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/mlx5-abi.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -142,6 +143,7 @@ struct mlx5_ib_pd {
#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+#define MLX5_IB_NUM_SNIFFER_FTS 2
struct mlx5_ib_flow_prio {
struct mlx5_flow_table *flow_table;
unsigned int refcount;
@@ -150,12 +152,14 @@ struct mlx5_ib_flow_prio {
struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
- unsigned int prio;
+ struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule;
};
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
+ struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
* only single add/removal of flow steering rule could be done
@@ -225,7 +229,7 @@ struct mlx5_ib_wq {
struct mlx5_ib_rwq {
struct ib_wq ibwq;
- u32 rqn;
+ struct mlx5_core_qp core_qp;
u32 rq_num_pas;
u32 log_rq_stride;
u32 log_rq_size;
@@ -603,6 +607,7 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
+ atomic_t next_port;
};
struct mlx5_ib_dev {
@@ -621,6 +626,8 @@ struct mlx5_ib_dev {
struct mlx5_ib_resources devr;
struct mlx5_mr_cache cache;
struct timer_list delay_timer;
+ /* Prevents soft lock on massive reg MRs */
+ struct mutex slow_path_mutex;
int fill_delay;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
@@ -663,6 +670,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
}
+static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
+{
+ return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
+}
+
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
{
return container_of(mmkey, struct mlx5_ib_mr, mmkey);
@@ -947,4 +959,40 @@ static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
return 0;
}
+
+static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_qp *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
+ !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
+ !!cqe_version))
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_srq *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
+ !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
+ !!cqe_version))
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6f7e34753abc..4e9012463c37 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -40,7 +40,6 @@
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -611,7 +610,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int err;
int i;
- cache->wq = create_singlethread_workqueue("mkey_cache");
+ mutex_init(&dev->slow_path_mutex);
+ cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
return -ENOMEM;
@@ -1183,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto error;
}
- if (!mr)
+ if (!mr) {
+ mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
page_shift, access_flags);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 34e79e709c67..cacb631a7b0a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -782,8 +782,8 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
int __init mlx5_ib_odp_init(void)
{
- mlx5_ib_page_fault_wq =
- create_singlethread_workqueue("mlx5_ib_page_faults");
+ mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
+ WQ_MEM_RECLAIM);
if (!mlx5_ib_page_fault_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9529b464fbdc..d1e921816bfe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -35,7 +35,6 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
/* not supported currently */
static int wq_signature;
@@ -53,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
@@ -77,6 +75,17 @@ struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
+enum raw_qp_set_mask_map {
+ MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+};
+
+struct mlx5_modify_raw_qp_param {
+ u16 operation;
+
+ u32 set_mask; /* raw_qp_set_mask_map */
+ u8 rq_q_ctr_id;
+};
+
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
@@ -1863,7 +1872,8 @@ static void get_cqs(enum ib_qp_type qp_type,
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u16 operation);
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 lag_tx_affinity);
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
@@ -1888,8 +1898,11 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
} else {
- err = modify_raw_packet_qp(dev, qp,
- MLX5_CMD_OP_2RST_QP);
+ struct mlx5_modify_raw_qp_param raw_qp_param = {
+ .operation = MLX5_CMD_OP_2RST_QP
+ };
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
}
if (err)
mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
@@ -2038,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- to_mcq(init_attr->recv_cq)->mcq.cqn,
- to_mcq(init_attr->send_cq)->mcq.cqn);
+ init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
+ init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
qp->trans_qp.xrcdn = xrcdn;
@@ -2153,6 +2166,31 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
return err;
}
+static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 tx_affinity)
+{
+ void *in;
+ void *tisc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
@@ -2363,8 +2401,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
-static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
- struct mlx5_ib_rq *rq, int new_state)
+static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
void *in;
void *rqc;
@@ -2381,7 +2420,17 @@ static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev,
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
- err = mlx5_core_modify_rq(dev, rq->base.mqp.qpn, in, inlen);
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
+ } else
+ pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
+ dev->ib_dev.name);
+ }
+
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
if (err)
goto out;
@@ -2422,7 +2471,8 @@ out:
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u16 operation)
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 tx_affinity)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
@@ -2431,7 +2481,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
int sq_state;
int err;
- switch (operation) {
+ switch (raw_qp_param->operation) {
case MLX5_CMD_OP_RST2INIT_QP:
rq_state = MLX5_RQC_STATE_RDY;
sq_state = MLX5_SQC_STATE_RDY;
@@ -2448,21 +2498,31 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
- /* Nothing to do here... */
- return 0;
+ if (raw_qp_param->set_mask)
+ return -EINVAL;
+ else
+ return 0;
default:
WARN_ON(1);
return -EINVAL;
}
if (qp->rq.wqe_cnt) {
- err = modify_raw_packet_qp_rq(dev->mdev, rq, rq_state);
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
- if (qp->sq.wqe_cnt)
+ if (qp->sq.wqe_cnt) {
+ if (tx_affinity) {
+ err = modify_raw_packet_tx_affinity(dev->mdev, sq,
+ tx_affinity);
+ if (err)
+ return err;
+ }
+
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+ }
return 0;
}
@@ -2514,12 +2574,14 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
+ struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
int sqd_event;
int mlx5_st;
int err;
u16 op;
+ u8 tx_affinity = 0;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -2549,6 +2611,23 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
+ if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
+ if ((ibqp->qp_type == IB_QPT_RC) ||
+ (ibqp->qp_type == IB_QPT_UD &&
+ !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
+ (ibqp->qp_type == IB_QPT_UC) ||
+ (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
+ (ibqp->qp_type == IB_QPT_XRC_INI) ||
+ (ibqp->qp_type == IB_QPT_XRC_TGT)) {
+ if (mlx5_lag_is_active(dev->mdev)) {
+ tx_affinity = (unsigned int)atomic_add_return(1,
+ &dev->roce.next_port) %
+ MLX5_MAX_PORTS + 1;
+ context->flags |= cpu_to_be32(tx_affinity << 24);
+ }
+ }
+ }
+
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if (ibqp->qp_type == IB_QPT_UD ||
@@ -2654,8 +2733,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
- struct mlx5_ib_port *mibport = &dev->port[port_num];
-
+ mibport = &dev->port[port_num];
context->qp_counter_set_usr_page |=
cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
}
@@ -2690,11 +2768,20 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
- err = modify_raw_packet_qp(dev, qp, op);
- else
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {};
+
+ raw_qp_param.operation = op;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
+ }
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
+ } else {
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
&base->mqp);
+ }
+
if (err)
goto out;
@@ -4497,6 +4584,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return 0;
}
+static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
+{
+ struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
+ struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
+ struct ib_event event;
+
+ if (rwq->ibwq.event_handler) {
+ event.device = rwq->ibwq.device;
+ event.element.wq = &rwq->ibwq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_WQ_FATAL;
+ break;
+ default:
+ mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
+ return;
+ }
+
+ rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
+ }
+}
+
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct ib_wq_init_attr *init_attr)
{
@@ -4534,7 +4643,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
- err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
+ err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
kvfree(in);
return err;
}
@@ -4650,7 +4759,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- rwq->ibwq.wq_num = rwq->rqn;
+ rwq->ibwq.wq_num = rwq->core_qp.qpn;
rwq->ibwq.state = IB_WQS_RESET;
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
@@ -4660,10 +4769,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
goto err_copy;
}
+ rwq->core_qp.event = mlx5_ib_wq_event;
+ rwq->ibwq.event_handler = init_attr->event_handler;
return &rwq->ibwq;
err_copy:
- mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
destroy_user_rq(pd, rwq);
err:
@@ -4676,7 +4787,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
- mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(wq->pd, rwq);
kfree(rwq);
@@ -4703,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
udata->inlen))
return ERR_PTR(-EOPNOTSUPP);
+ if (init_attr->log_ind_tbl_size >
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+ init_attr->log_ind_tbl_size,
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+ return ERR_PTR(-EINVAL);
+ }
+
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
@@ -4808,7 +4927,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
MLX5_SET(rqc, rqc, state, wq_state);
- err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
kvfree(in);
if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index ed6ac52355f1..3857dbd9c956 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -38,7 +38,6 @@
#include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h"
-#include "user.h"
/* not supported currently */
static int srq_signature;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
deleted file mode 100644
index 188dac4301b5..000000000000
--- a/drivers/infiniband/hw/mlx5/user.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_IB_USER_H
-#define MLX5_IB_USER_H
-
-#include <linux/types.h>
-
-#include "mlx5_ib.h"
-
-enum {
- MLX5_QP_FLAG_SIGNATURE = 1 << 0,
- MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
-};
-
-enum {
- MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
-};
-
-enum {
- MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
-};
-
-
-/* Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MLX5_IB_UVERBS_ABI_VERSION 1
-
-/* Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mlx5_ib_alloc_ucontext_req {
- __u32 total_num_uuars;
- __u32 num_low_latency_uuars;
-};
-
-struct mlx5_ib_alloc_ucontext_req_v2 {
- __u32 total_num_uuars;
- __u32 num_low_latency_uuars;
- __u32 flags;
- __u32 comp_mask;
- __u8 max_cqe_version;
- __u8 reserved0;
- __u16 reserved1;
- __u32 reserved2;
-};
-
-enum mlx5_ib_alloc_ucontext_resp_mask {
- MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
-};
-
-enum mlx5_user_cmds_supp_uhw {
- MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
-};
-
-struct mlx5_ib_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 bf_reg_size;
- __u32 tot_uuars;
- __u32 cache_line_size;
- __u16 max_sq_desc_sz;
- __u16 max_rq_desc_sz;
- __u32 max_send_wqebb;
- __u32 max_recv_wr;
- __u32 max_srq_recv_wr;
- __u16 num_ports;
- __u16 reserved1;
- __u32 comp_mask;
- __u32 response_length;
- __u8 cqe_version;
- __u8 cmds_supp_uhw;
- __u16 reserved2;
- __u64 hca_core_clock_offset;
-};
-
-struct mlx5_ib_alloc_pd_resp {
- __u32 pdn;
-};
-
-struct mlx5_ib_tso_caps {
- __u32 max_tso; /* Maximum tso payload size in bytes */
-
- /* Corresponding bit will be set if qp type from
- * 'enum ib_qp_type' is supported, e.g.
- * supported_qpts |= 1 << IB_QPT_UD
- */
- __u32 supported_qpts;
-};
-
-struct mlx5_ib_query_device_resp {
- __u32 comp_mask;
- __u32 response_length;
- struct mlx5_ib_tso_caps tso_caps;
-};
-
-struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 cqe_size;
- __u32 reserved; /* explicit padding (optional on i386) */
-};
-
-struct mlx5_ib_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mlx5_ib_resize_cq {
- __u64 buf_addr;
- __u16 cqe_size;
- __u16 reserved0;
- __u32 reserved1;
-};
-
-struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 flags;
- __u32 reserved0; /* explicit padding (optional on i386) */
- __u32 uidx;
- __u32 reserved1;
-};
-
-struct mlx5_ib_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 sq_wqe_count;
- __u32 rq_wqe_count;
- __u32 rq_wqe_shift;
- __u32 flags;
- __u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
-};
-
-/* RX Hash function flags */
-enum mlx5_rx_hash_function_flags {
- MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
-};
-
-/*
- * RX Hash flags, these flags allows to set which incoming packet's field should
- * participates in RX Hash. Each flag represent certain packet's field,
- * when the flag is set the field that is represented by the flag will
- * participate in RX Hash calculation.
- * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
- * and *TCP and *UDP flags can't be enabled together on the same QP.
-*/
-enum mlx5_rx_hash_fields {
- MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
- MLX5_RX_HASH_DST_IPV4 = 1 << 1,
- MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
- MLX5_RX_HASH_DST_IPV6 = 1 << 3,
- MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
- MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
- MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
-};
-
-struct mlx5_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
- __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
- __u8 rx_key_len; /* valid only for Toeplitz */
- __u8 reserved[6];
- __u8 rx_hash_key[128]; /* valid only for Toeplitz */
- __u32 comp_mask;
- __u32 reserved1;
-};
-
-struct mlx5_ib_create_qp_resp {
- __u32 uuar_index;
-};
-
-struct mlx5_ib_alloc_mw {
- __u32 comp_mask;
- __u8 num_klms;
- __u8 reserved1;
- __u16 reserved2;
-};
-
-struct mlx5_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
- __u32 rq_wqe_count;
- __u32 rq_wqe_shift;
- __u32 user_index;
- __u32 flags;
- __u32 comp_mask;
- __u32 reserved;
-};
-
-struct mlx5_ib_create_wq_resp {
- __u32 response_length;
- __u32 reserved;
-};
-
-struct mlx5_ib_create_rwq_ind_tbl_resp {
- __u32 response_length;
- __u32 reserved;
-};
-
-struct mlx5_ib_modify_wq {
- __u32 comp_mask;
- __u32 reserved;
-};
-
-static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
- struct mlx5_ib_create_qp *ucmd,
- int inlen,
- u32 *user_index)
-{
- u8 cqe_version = ucontext->cqe_version;
-
- if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
- return 0;
-
- if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
- !!cqe_version))
- return -EINVAL;
-
- return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
-}
-
-static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
- struct mlx5_ib_create_srq *ucmd,
- int inlen,
- u32 *user_index)
-{
- u8 cqe_version = ucontext->cqe_version;
-
- if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
- !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
- return 0;
-
- if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
- !!cqe_version))
- return -EINVAL;
-
- return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
-}
-#endif /* MLX5_IB_USER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 712d2a30fbe5..f6474c24f193 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -187,7 +187,7 @@ int __init mthca_catas_init(void)
{
INIT_WORK(&catas_work, catas_reset);
- catas_wq = create_singlethread_workqueue("mthca_catas");
+ catas_wq = alloc_ordered_workqueue("mthca_catas", WQ_MEM_RECLAIM);
if (!catas_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7c3f2fb44ba5..9139405c4810 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -153,7 +153,8 @@ static void node_desc_override(struct ib_device *dev,
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
mutex_lock(&to_mdev(dev)->cap_mask_mutex);
- memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index da2335f7f7c3..358930a41e36 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -46,7 +46,7 @@
#include "mthca_dev.h"
#include "mthca_cmd.h"
-#include "mthca_user.h"
+#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
static void init_query_mad(struct ib_smp *mad)
@@ -193,7 +193,8 @@ static int mthca_modify_device(struct ib_device *ibdev,
if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
- memcpy(ibdev->node_desc, props->node_desc, 64);
+ memcpy(ibdev->node_desc, props->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
}
@@ -1138,7 +1139,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (err)
goto out;
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+ memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bd9d132f11c7..e7430c9254d3 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -165,7 +165,7 @@ do { \
#include "nes_hw.h"
#include "nes_verbs.h"
#include "nes_context.h"
-#include "nes_user.h"
+#include <rdma/nes-abi.h>
#include "nes_cm.h"
#include "nes_mgt.h"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 7f0aa23aef9d..57db9b332f44 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2692,12 +2692,12 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
- cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ cm_core->event_wq = alloc_ordered_workqueue("nesewq", 0);
if (!cm_core->event_wq)
goto out_free_cmcore;
cm_core->post_event = nes_cm_post_event;
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
- cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+ cm_core->disconn_wq = alloc_ordered_workqueue("nesdwq", 0);
if (!cm_core->disconn_wq)
goto out_free_wq;
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
deleted file mode 100644
index 529c421bb15c..000000000000
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_USER_H
-#define NES_USER_H
-
-#include <linux/types.h>
-
-#define NES_ABI_USERSPACE_VER 2
-#define NES_ABI_KERNEL_VER 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct nes_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct nes_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
- __u8 kernel_ver;
- __u8 reserved[2];
-};
-
-struct nes_alloc_pd_resp {
- __u32 pd_id;
- __u32 mmap_db_index;
-};
-
-struct nes_create_cq_req {
- __u64 user_cq_buffer;
- __u32 mcrqf;
- __u8 reserved[4];
-};
-
-struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
-};
-
-enum iwnes_memreg_type {
- IWNES_MEMREG_TYPE_MEM = 0x0000,
- IWNES_MEMREG_TYPE_QP = 0x0001,
- IWNES_MEMREG_TYPE_CQ = 0x0002,
- IWNES_MEMREG_TYPE_MW = 0x0003,
- IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
-};
-
-struct nes_mem_reg_req {
- __u32 reg_type; /* indicates if id is memory, QP or CQ */
- __u32 reserved;
-};
-
-struct nes_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct nes_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 mmap_sq_db_index;
- __u32 mmap_rq_db_index;
- __u32 nes_drv_opt;
-};
-
-#endif /* NES_USER_H */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
deleted file mode 100644
index 430b1350fe96..000000000000
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* This file is part of the Emulex RoCE Device Driver for
- * RoCE (RDMA over Converged Ethernet) adapters.
- * Copyright (C) 2012-2015 Emulex. All rights reserved.
- * EMULEX and SLI are trademarks of Emulex.
- * www.emulex.com
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License (GPL) Version 2, available from the file COPYING in the main
- * directory of this source tree, or the BSD license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Contact Information:
- * linux-drivers@emulex.com
- *
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
- */
-
-#ifndef __OCRDMA_ABI_H__
-#define __OCRDMA_ABI_H__
-
-#define OCRDMA_ABI_VERSION 2
-#define OCRDMA_BE_ROCE_ABI_VERSION 1
-/* user kernel communication data structures. */
-
-struct ocrdma_alloc_ucontext_resp {
- u32 dev_id;
- u32 wqe_size;
- u32 max_inline_data;
- u32 dpp_wqe_size;
- u64 ah_tbl_page;
- u32 ah_tbl_len;
- u32 rqe_size;
- u8 fw_ver[32];
- /* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
-};
-
-struct ocrdma_alloc_pd_ureq {
- u64 rsvd1;
-};
-
-struct ocrdma_alloc_pd_uresp {
- u32 id;
- u32 dpp_enabled;
- u32 dpp_page_addr_hi;
- u32 dpp_page_addr_lo;
- u64 rsvd1;
-};
-
-struct ocrdma_create_cq_ureq {
- u32 dpp_cq;
- u32 rsvd; /* pad */
-};
-
-#define MAX_CQ_PAGES 8
-struct ocrdma_create_cq_uresp {
- u32 cq_id;
- u32 page_size;
- u32 num_pages;
- u32 max_hw_cqe;
- u64 page_addr[MAX_CQ_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 phase_change;
- /* for future use/new features in progress */
- u64 rsvd1;
- u64 rsvd2;
-};
-
-#define MAX_QP_PAGES 8
-#define MAX_UD_AV_PAGES 8
-
-struct ocrdma_create_qp_ureq {
- u8 enable_dpp_cq;
- u8 rsvd;
- u16 dpp_cq_id;
- u32 rsvd1; /* pad */
-};
-
-struct ocrdma_create_qp_uresp {
- u16 qp_id;
- u16 sq_dbid;
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 sq_page_size;
- u32 rq_page_size;
- u32 num_sq_pages;
- u32 num_rq_pages;
- u64 sq_page_addr[MAX_QP_PAGES];
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
- u32 db_page_size;
- u32 dpp_credit;
- u32 dpp_offset;
- u32 num_wqe_allocated;
- u32 num_rqe_allocated;
- u32 db_sq_offset;
- u32 db_rq_offset;
- u32 db_shift;
- u64 rsvd[11];
-} __packed;
-
-struct ocrdma_create_srq_uresp {
- u16 rq_dbid;
- u16 resv0; /* pad */
- u32 resv1;
-
- u32 rq_page_size;
- u32 num_rq_pages;
-
- u64 rq_page_addr[MAX_QP_PAGES];
- u64 db_page_addr;
-
- u32 db_page_size;
- u32 num_rqe_allocated;
- u32 db_rq_offset;
- u32 db_shift;
-
- u64 rsvd2;
- u64 rsvd3;
-};
-
-#endif /* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 07d0c6c5b046..896071502739 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -56,7 +56,7 @@
#include "be_roce.h"
#include "ocrdma_hw.h"
#include "ocrdma_stats.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
@@ -119,6 +119,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
{
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 0aa854737e74..6af44f8db3d5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -51,7 +51,7 @@
#include "ocrdma.h"
#include "ocrdma_hw.h"
#include "ocrdma_verbs.h"
-#include "ocrdma_abi.h"
+#include <rdma/ocrdma-abi.h>
int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 000000000000..6c9f3923e838
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,8 @@
+config INFINIBAND_QEDR
+ tristate "QLogic RoCE driver"
+ depends on 64BIT && QEDE
+ select QED_LL2
+ select QED_RDMA
+ ---help---
+ This driver provides low-level InfiniBand over Ethernet
+ support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 000000000000..ba7067c77f2f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 000000000000..7b74d09a8217
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,914 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event ibev;
+
+ ibev.device = &dev->ibdev;
+ ibev.element.port_num = port_num;
+ ibev.event = type;
+
+ ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+ snprintf(str, str_len, "%d. %d. %d. %d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+ struct qedr_dev *qdev;
+
+ qdev = get_qedr_dev(dev);
+ dev_hold(qdev->ndev);
+
+ /* The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+ strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+ dev->ibdev.node_guid = dev->attr.node_guid;
+ memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+ dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+ QEDR_UVERBS(QUERY_DEVICE) |
+ QEDR_UVERBS(QUERY_PORT) |
+ QEDR_UVERBS(ALLOC_PD) |
+ QEDR_UVERBS(DEALLOC_PD) |
+ QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+ QEDR_UVERBS(CREATE_CQ) |
+ QEDR_UVERBS(RESIZE_CQ) |
+ QEDR_UVERBS(DESTROY_CQ) |
+ QEDR_UVERBS(REQ_NOTIFY_CQ) |
+ QEDR_UVERBS(CREATE_QP) |
+ QEDR_UVERBS(MODIFY_QP) |
+ QEDR_UVERBS(QUERY_QP) |
+ QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(REG_MR) |
+ QEDR_UVERBS(DEREG_MR) |
+ QEDR_UVERBS(POLL_CQ) |
+ QEDR_UVERBS(POST_SEND) |
+ QEDR_UVERBS(POST_RECV);
+
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->num_cnq;
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+ dev->ibdev.query_device = qedr_query_device;
+ dev->ibdev.query_port = qedr_query_port;
+ dev->ibdev.modify_port = qedr_modify_port;
+
+ dev->ibdev.query_gid = qedr_query_gid;
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+ dev->ibdev.mmap = qedr_mmap;
+
+ dev->ibdev.alloc_pd = qedr_alloc_pd;
+ dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+ dev->ibdev.create_cq = qedr_create_cq;
+ dev->ibdev.destroy_cq = qedr_destroy_cq;
+ dev->ibdev.resize_cq = qedr_resize_cq;
+ dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+ dev->ibdev.create_qp = qedr_create_qp;
+ dev->ibdev.modify_qp = qedr_modify_qp;
+ dev->ibdev.query_qp = qedr_query_qp;
+ dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+ dev->ibdev.query_pkey = qedr_query_pkey;
+
+ dev->ibdev.create_ah = qedr_create_ah;
+ dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+ dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+ dev->ibdev.dereg_mr = qedr_dereg_mr;
+ dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+ dev->ibdev.alloc_mr = qedr_alloc_mr;
+ dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+ dev->ibdev.poll_cq = qedr_poll_cq;
+ dev->ibdev.post_send = qedr_post_send;
+ dev->ibdev.post_recv = qedr_post_recv;
+
+ dev->ibdev.process_mad = qedr_process_mad;
+ dev->ibdev.get_port_immutable = qedr_port_immutable;
+ dev->ibdev.get_netdev = qedr_get_netdev;
+
+ dev->ibdev.dma_device = &dev->pdev->dev;
+
+ dev->ibdev.get_link_layer = qedr_link_layer;
+ dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+ return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt)
+ return -ENOMEM;
+
+ rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_CNQ);
+ if (rc) {
+ pr_err("Status block initialization failed\n");
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, int sb_id)
+{
+ if (sb_info->sb_virt) {
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ }
+
+ kfree(dev->cnq_array);
+ kfree(dev->sb_array);
+ kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+ struct qedr_cnq *cnq;
+ __le16 *cons_pi;
+ u16 n_entries;
+ int i, rc;
+
+ dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+ QEDR_MAX_SGID, GFP_KERNEL);
+ if (!dev->sgid_tbl)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
+
+ /* Allocate Status blocks for CNQ */
+ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+ sizeof(*dev->cnq_array), GFP_KERNEL);
+ if (!dev->cnq_array) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+ /* Allocate CNQ PBLs */
+ n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ for (i = 0; i < dev->num_cnq; i++) {
+ cnq = &dev->cnq_array[i];
+
+ rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+ dev->sb_start + i);
+ if (rc)
+ goto err3;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ n_entries,
+ sizeof(struct regpair *),
+ &cnq->pbl);
+ if (rc)
+ goto err4;
+
+ cnq->dev = dev;
+ cnq->sb = &dev->sb_array[i];
+ cons_pi = dev->sb_array[i].sb_virt->pi_array;
+ cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+ cnq->index = i;
+ sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+ i, qed_chain_get_cons_idx(&cnq->pbl));
+ }
+
+ return 0;
+err4:
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+ for (--i; i >= 0; i--) {
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ }
+ kfree(dev->cnq_array);
+err2:
+ kfree(dev->sb_array);
+err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 val;
+
+ dev->atomic_cap = IB_ATOMIC_NONE;
+
+ bridge = pdev->bus->self;
+ if (!bridge)
+ return;
+
+ /* Check whether we are connected directly or via a switch */
+ while (bridge && bridge->bus->parent) {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+ bridge->bus->number, bridge->bus->primary);
+ /* Need to check Atomic Op Routing Supported all the way to
+ * root complex.
+ */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+ pcie_capability_clear_word(pdev,
+ PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ return;
+ }
+ bridge = bridge->bus->parent->self;
+ }
+ bridge = pdev->bus->self;
+
+ /* according to bridge capability */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+ } else {
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+ struct qedr_cnq *cnq = handle;
+ struct regpair *cq_handle;
+ struct qedr_cq *cq;
+
+ qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+ qed_sb_update_sb_idx(cnq->sb);
+
+ hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ /* Align protocol-index and chain reads */
+ rmb();
+
+ while (sw_comp_cons != hw_comp_cons) {
+ cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+ cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+ cq_handle->lo);
+
+ if (cq == NULL) {
+ DP_ERR(cnq->dev,
+ "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+ cq_handle->hi, cq_handle->lo, sw_comp_cons,
+ hw_comp_cons);
+
+ break;
+ }
+
+ if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+ DP_ERR(cnq->dev,
+ "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+ cq_handle->hi, cq_handle->lo, cq);
+ break;
+ }
+
+ cq->arm_flags = 0;
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler)
+ (&cq->ibcq, cq->ibcq.cq_context);
+
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ cnq->n_comp++;
+
+ }
+
+ qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+ sw_comp_cons);
+
+ qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+ u32 vector;
+ int i;
+
+ for (i = 0; i < dev->int_info.used_cnt; i++) {
+ if (dev->int_info.msix_cnt) {
+ vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ synchronize_irq(vector);
+ free_irq(vector, &dev->cnq_array[i]);
+ }
+ }
+
+ dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+ int i, rc = 0;
+
+ if (dev->num_cnq > dev->int_info.msix_cnt) {
+ DP_ERR(dev,
+ "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+ dev->num_cnq, dev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ qedr_irq_handler, 0, dev->cnq_array[i].name,
+ &dev->cnq_array[i]);
+ if (rc) {
+ DP_ERR(dev, "Request cnq %d irq failed\n", i);
+ qedr_sync_free_irqs(dev);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+ dev->cnq_array[i].name, i,
+ &dev->cnq_array[i]);
+ dev->int_info.used_cnt++;
+ }
+ }
+
+ return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+ /* Learn Interrupt configuration */
+ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+ if (rc < 0)
+ return rc;
+
+ rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+ if (rc) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+ return rc;
+ }
+
+ if (dev->int_info.msix_cnt) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+ dev->int_info.msix_cnt);
+ rc = qedr_req_msix_irqs(dev);
+ if (rc)
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+ return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+ struct qed_rdma_device *qed_attr;
+ struct qedr_device_attr *attr;
+ u32 page_size;
+
+ /* Part 1 - query core capabilities */
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+ page_size = ~dev->attr.page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+ PAGE_SIZE, page_size);
+ return -ENODEV;
+ }
+
+ /* Part 3 - copy and update capabilities */
+ attr = &dev->attr;
+ attr->vendor_id = qed_attr->vendor_id;
+ attr->vendor_part_id = qed_attr->vendor_part_id;
+ attr->hw_ver = qed_attr->hw_ver;
+ attr->fw_ver = qed_attr->fw_ver;
+ attr->node_guid = qed_attr->node_guid;
+ attr->sys_image_guid = qed_attr->sys_image_guid;
+ attr->max_cnq = qed_attr->max_cnq;
+ attr->max_sge = qed_attr->max_sge;
+ attr->max_inline = qed_attr->max_inline;
+ attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+ attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+ attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+ attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+ attr->max_dev_resp_rd_atomic_resc =
+ qed_attr->max_dev_resp_rd_atomic_resc;
+ attr->max_cq = qed_attr->max_cq;
+ attr->max_qp = qed_attr->max_qp;
+ attr->max_mr = qed_attr->max_mr;
+ attr->max_mr_size = qed_attr->max_mr_size;
+ attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+ attr->max_mw = qed_attr->max_mw;
+ attr->max_fmr = qed_attr->max_fmr;
+ attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+ attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+ attr->max_pd = qed_attr->max_pd;
+ attr->max_ah = qed_attr->max_ah;
+ attr->max_pkey = qed_attr->max_pkey;
+ attr->max_srq = qed_attr->max_srq;
+ attr->max_srq_wr = qed_attr->max_srq_wr;
+ attr->dev_caps = qed_attr->dev_caps;
+ attr->page_size_caps = qed_attr->page_size_caps;
+ attr->dev_ack_delay = qed_attr->dev_ack_delay;
+ attr->reserved_lkey = qed_attr->reserved_lkey;
+ attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+ attr->max_stats_queues = qed_attr->max_stats_queues;
+
+ return 0;
+}
+
+void qedr_unaffiliated_event(void *context,
+ u8 event_code)
+{
+ pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ 1
+#define EVENT_TYPE_QP 2
+ struct qedr_dev *dev = (struct qedr_dev *)context;
+ union event_ring_data *data = fw_handle;
+ u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
+ data->roce_handle.lo;
+ u8 event_type = EVENT_TYPE_NOT_DEFINED;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct qedr_cq *cq;
+ struct qedr_qp *qp;
+
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+ roce_handle64);
+ }
+
+ switch (event_type) {
+ case EVENT_TYPE_CQ:
+ cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+ if (cq) {
+ ibcq = &cq->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+ } else {
+ WARN(1,
+ "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+ break;
+ case EVENT_TYPE_QP:
+ qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+ if (qp) {
+ ibqp = &qp->ibqp;
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+ } else {
+ WARN(1,
+ "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+ break;
+ default:
+ break;
+ }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+ struct qed_rdma_add_user_out_params out_params;
+ struct qed_rdma_start_in_params *in_params;
+ struct qed_rdma_cnq_params *cur_pbl;
+ struct qed_rdma_events events;
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+ int rc = 0;
+ int i;
+
+ in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
+ if (!in_params) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ in_params->desired_cnq = dev->num_cnq;
+ for (i = 0; i < dev->num_cnq; i++) {
+ cur_pbl = &in_params->cnq_pbl_list[i];
+
+ page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+ cur_pbl->num_pbl_pages = page_cnt;
+
+ p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+ cur_pbl->pbl_ptr = (u64)p_phys_table;
+ }
+
+ events.affiliated_event = qedr_affiliated_event;
+ events.unaffiliated_event = qedr_unaffiliated_event;
+ events.context = dev;
+
+ in_params->events = &events;
+ in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+ in_params->max_mtu = dev->ndev->mtu;
+ ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+ rc = dev->ops->rdma_init(dev->cdev, in_params);
+ if (rc)
+ goto out;
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+ if (rc)
+ goto out;
+
+ dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_phys_addr = out_params.dpi_phys_addr;
+ dev->db_size = out_params.dpi_size;
+ dev->dpi = out_params.dpi;
+
+ rc = qedr_set_device_attr(dev);
+out:
+ kfree(in_params);
+ if (rc)
+ DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+ return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+ dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+ dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+ struct net_device *ndev)
+{
+ struct qed_dev_rdma_info dev_info;
+ struct qedr_dev *dev;
+ int rc = 0, i;
+
+ dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+ dev->pdev = pdev;
+ dev->ndev = ndev;
+ dev->cdev = cdev;
+
+ qed_ops = qed_get_rdma_ops();
+ if (!qed_ops) {
+ DP_ERR(dev, "Failed to get qed roce operations\n");
+ goto init_err;
+ }
+
+ dev->ops = qed_ops;
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto init_err;
+
+ dev->num_hwfns = dev_info.common.num_hwfns;
+ dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+ dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+ if (!dev->num_cnq) {
+ DP_ERR(dev, "not enough CNQ resources.\n");
+ goto init_err;
+ }
+
+ dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+ qedr_pci_set_atomic(dev, pdev);
+
+ rc = qedr_alloc_resources(dev);
+ if (rc)
+ goto init_err;
+
+ rc = qedr_init_hw(dev);
+ if (rc)
+ goto alloc_err;
+
+ rc = qedr_setup_irqs(dev);
+ if (rc)
+ goto irq_err;
+
+ rc = qedr_register_device(dev);
+ if (rc) {
+ DP_ERR(dev, "Unable to allocate register device\n");
+ goto reg_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+ goto sysfs_err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+sysfs_err:
+ ib_unregister_device(&dev->ibdev);
+reg_err:
+ qedr_sync_free_irqs(dev);
+irq_err:
+ qedr_stop_hw(dev);
+alloc_err:
+ qedr_free_resources(dev);
+init_err:
+ ib_dealloc_device(&dev->ibdev);
+ DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+ return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+ /* First unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ qedr_remove_sysfiles(dev);
+ ib_unregister_device(&dev->ibdev);
+
+ qedr_stop_hw(dev);
+ qedr_sync_free_irqs(dev);
+ qedr_free_resources(dev);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static int qedr_close(struct qedr_dev *dev)
+{
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+
+ return 0;
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+ qedr_close(dev);
+ qedr_remove(dev);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+ u8 guid[8], mac_addr[6];
+ int rc;
+
+ /* Update SGID */
+ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+ guid[0] = mac_addr[0] ^ 2;
+ guid[1] = mac_addr[1];
+ guid[2] = mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac_addr[3];
+ guid[6] = mac_addr[4];
+ guid[7] = mac_addr[5];
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+ /* Update LL2 */
+ rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+ switch (event) {
+ case QEDE_UP:
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+ break;
+ case QEDE_CLOSE:
+ qedr_shutdown(dev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qedr_mac_address_change(dev);
+ break;
+ default:
+ pr_err("Event not supported\n");
+ }
+}
+
+static struct qedr_driver qedr_drv = {
+ .name = "qedr_driver",
+ .add = qedr_add,
+ .remove = qedr_remove,
+ .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+ return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+ qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 000000000000..620badd7d4fb
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,495 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include "qedr_hsi.h"
+
+#define QEDR_MODULE_VERSION "8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...) \
+ pr_debug("(%s) " module ": " fmt, \
+ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ " CQ"
+#define QEDR_MSG_MR " MR"
+#define QEDR_MSG_RQ " RQ"
+#define QEDR_MSG_SQ " SQ"
+#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_GSI " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+ struct qedr_dev *dev;
+ struct qed_chain pbl;
+ struct qed_sb_info *sb;
+ char name[32];
+ u64 n_comp;
+ __le16 *hw_cons_ptr;
+ u8 index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u8 max_cnq;
+ u8 max_sge;
+ u16 max_inline;
+ u32 max_sqe;
+ u32 max_rqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u8 max_srq_sge;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+
+ enum ib_atomic_cap atomic_cap;
+
+ void *rdma_ctx;
+ struct qedr_device_attr attr;
+
+ const struct qed_rdma_ops *ops;
+ struct qed_int_info int_info;
+
+ struct qed_sb_info *sb_array;
+ struct qedr_cnq *cnq_array;
+ int num_cnq;
+ int sb_start;
+
+ void __iomem *db_addr;
+ u64 db_phys_addr;
+ u32 db_size;
+ u16 dpi;
+
+ union ib_gid *sgid_tbl;
+
+ /* Lock for sgid table */
+ spinlock_t sgid_lock;
+
+ u64 guid;
+
+ u32 dp_module;
+ u8 dp_level;
+ u8 num_hwfns;
+ uint wq_multiplier;
+ u8 gsi_ll2_mac_address[ETH_ALEN];
+ int gsi_qp_created;
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
+};
+
+#define QEDR_MAX_SQ_PBL (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_SQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_RQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+ sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+ (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+#define QEDR_MAX_PORT (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+ struct list_head list_entry;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+ struct ib_ucontext ibucontext;
+ struct qedr_dev *dev;
+ struct qedr_pd *pd;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+ u16 dpi;
+
+ struct list_head mm_head;
+
+ /* Lock to protect mm list */
+ struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+ struct rdma_pwm_val32_data data;
+ u64 raw;
+};
+
+enum qedr_cq_type {
+ QEDR_CQ_TYPE_GSI,
+ QEDR_CQ_TYPE_KERNEL,
+ QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ bool two_layered;
+};
+
+struct qedr_userq {
+ struct ib_umem *umem;
+ struct qedr_pbl_info pbl_info;
+ struct qedr_pbl *pbl_tbl;
+ u64 buf_addr;
+ size_t buf_len;
+};
+
+struct qedr_cq {
+ struct ib_cq ibcq;
+
+ enum qedr_cq_type cq_type;
+ u32 sig;
+
+ u16 icid;
+
+ /* Lock to protect completion handler */
+ spinlock_t comp_handler_lock;
+
+ /* Lock to protect multiplem CQ's */
+ spinlock_t cq_lock;
+ u8 arm_flags;
+ struct qed_chain pbl;
+
+ void __iomem *db_addr;
+ union db_prod64 db;
+
+ u8 pbl_toggle;
+ union rdma_cqe *latest_cqe;
+ union rdma_cqe *toggle_cqe;
+
+ u32 cq_cons;
+
+ struct qedr_userq q;
+};
+
+struct qedr_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+ struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+ struct {
+ u64 phy_addr;
+ unsigned long len;
+ } key;
+ struct list_head entry;
+};
+
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+ /* WQE Elements */
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 max_sges;
+
+ /* WQE */
+ u16 prod;
+ u16 cons;
+ u16 wqe_cons;
+ u16 gsi_cons;
+ u16 max_wr;
+
+ /* DB */
+ void __iomem *db;
+ union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index) \
+ do { \
+ p_info->index = (p_info->index + 1) & \
+ qed_chain_get_capacity(p_info->pbl) \
+ } while (0)
+
+enum qedr_qp_err_bitmap {
+ QEDR_QP_ERR_SQ_FULL = 1,
+ QEDR_QP_ERR_RQ_FULL = 2,
+ QEDR_QP_ERR_BAD_SR = 4,
+ QEDR_QP_ERR_BAD_RR = 8,
+ QEDR_QP_ERR_SQ_PBL_FULL = 16,
+ QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+ struct ib_qp ibqp; /* must be first */
+ struct qedr_dev *dev;
+
+ struct qedr_qp_hwq_info sq;
+ struct qedr_qp_hwq_info rq;
+
+ u32 max_inline_data;
+
+ /* Lock for QP's */
+ spinlock_t q_lock;
+ struct qedr_cq *sq_cq;
+ struct qedr_cq *rq_cq;
+ struct qedr_srq *srq;
+ enum qed_roce_qp_state state;
+ u32 id;
+ struct qedr_pd *pd;
+ enum ib_qp_type qp_type;
+ struct qed_rdma_qp *qed_qp;
+ u32 qp_id;
+ u16 icid;
+ u16 mtu;
+ int sgid_idx;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 qkey;
+ u32 dest_qp_num;
+
+ /* Relevant to qps created from kernel space only (ULPs) */
+ u8 prev_wqe_size;
+ u16 wqe_cons;
+ u32 err_bitmap;
+ bool signaled;
+
+ /* SQ shadow */
+ struct {
+ u64 wr_id;
+ enum ib_wc_opcode opcode;
+ u32 bytes_len;
+ u8 wqe_size;
+ bool signaled;
+ dma_addr_t icrc_mapping;
+ u32 *icrc;
+ struct qedr_mr *mr;
+ } *wqe_wr_id;
+
+ /* RQ shadow */
+ struct {
+ u64 wr_id;
+ struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+ u8 wqe_size;
+
+ u8 smac[ETH_ALEN];
+ u16 vlan_id;
+ int rc;
+ } *rqe_wr_id;
+
+ /* Relevant to qps created from user space only (applications) */
+ struct qedr_userq usq;
+ struct qedr_userq urq;
+};
+
+struct qedr_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+ QEDR_MR_USER,
+ QEDR_MR_KERNEL,
+ QEDR_MR_DMA,
+ QEDR_MR_FRMR,
+};
+
+struct mr_info {
+ struct qedr_pbl *pbl_table;
+ struct qedr_pbl_info pbl_info;
+ struct list_head free_pbl_list;
+ struct list_head inuse_pbl_list;
+ u32 completed;
+ u32 completed_handled;
+};
+
+struct qedr_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+
+ struct qed_rdma_register_tid_in_params hw_mr;
+ enum qedr_mr_type type;
+
+ struct qedr_dev *dev;
+ struct mr_info info;
+
+ u64 *pages;
+ u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+ RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+ info->cons = (info->cons + 1) % info->max_wr;
+ info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+ info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ union ib_gid zero_sgid = { { 0 } };
+ struct in6_addr in6;
+
+ if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ DP_ERR(dev, "Local port GID not supported\n");
+ eth_zero_addr(mac_addr);
+ return -EINVAL;
+ }
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ ether_addr_copy(mac_addr, ah_attr->dmac);
+
+ return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644
index 000000000000..63890ebb72bd
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -0,0 +1,622 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "qedr_hsi.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_hsi.h"
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+ info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+ dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+ struct qedr_cq *cq = dev->gsi_sqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+ dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+ cq->ibcq.comp_handler ? "Yes" : "No");
+
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+ pkt->header.baddr);
+ kfree(pkt);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ qedr_inc_sw_gsi_cons(&qp->sq);
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_dev;
+ struct qedr_cq *cq = dev->gsi_rqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+ ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+ qedr_inc_sw_gsi_cons(&qp->rq);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qedr_cq *cq;
+
+ cq = get_qedr_cq(attrs->send_cq);
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ cq = get_qedr_cq(attrs->recv_cq);
+ /* if a dedicated recv_cq was used, delete it too */
+ if (iparams.icid != cq->icid) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+ attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+ attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+ attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
+{
+ struct qed_roce_ll2_params ll2_params;
+ int rc;
+
+ rc = qedr_check_gsi_qp_attrs(dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* configure and start LL2 */
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+ ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+ ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+ ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+ ll2_params.cb_cookie = (void *)dev;
+ ll2_params.mtu = dev->ndev->mtu;
+ ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+ rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+ if (rc) {
+ DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ /* create QP */
+ qp->ibqp.qp_num = 1;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ goto err;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ goto err;
+
+ qedr_store_gsi_qp_cq(dev, qp, attrs);
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ /* the GSI CQ is handled by the driver so remove it from the FW */
+ qedr_destroy_gsi_cq(dev, attrs);
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+ return &qp->ibqp;
+
+err:
+ kfree(qp->rqe_wr_id);
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+ return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+ int rc;
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+ else
+ DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+ return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE (100)
+#define QEDR_GSI_QPN (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct ib_ud_header *udh,
+ int *roce_mode)
+{
+ bool has_vlan = false, has_grh_ipv6 = true;
+ struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ struct ib_global_route *grh = &ah_attr->grh;
+ union ib_gid sgid;
+ int send_size = 0;
+ u16 vlan_id = 0;
+ u16 ether_type;
+ struct ib_gid_attr sgid_attr;
+ int rc;
+ int ip_ver = 0;
+
+ bool has_udp = false;
+ int i;
+
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
+ rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ grh->sgid_index, &sgid, &sgid_attr);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+ ah_attr->port_num, grh->sgid_index);
+ return rc;
+ }
+
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+ DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+ ah_attr->grh.sgid_index);
+ return -ENOENT;
+ }
+
+ has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ if (!has_udp) {
+ /* RoCE v1 */
+ ether_type = ETH_P_ROCE;
+ *roce_mode = ROCE_V1;
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ /* RoCE v2 IPv4 */
+ ip_ver = 4;
+ ether_type = ETH_P_IP;
+ has_grh_ipv6 = false;
+ *roce_mode = ROCE_V2_IPV4;
+ } else {
+ /* RoCE v2 IPv6 */
+ ip_ver = 6;
+ ether_type = ETH_P_IPV6;
+ *roce_mode = ROCE_V2_IPV6;
+ }
+
+ rc = ib_ud_header_init(send_size, false, true, has_vlan,
+ has_grh_ipv6, ip_ver, has_udp, 0, udh);
+ if (rc) {
+ DP_ERR(dev, "gsi post send: failed to init header\n");
+ return rc;
+ }
+
+ /* ENET + VLAN headers */
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+ ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+ if (has_vlan) {
+ udh->eth.type = htons(ETH_P_8021Q);
+ udh->vlan.tag = htons(vlan_id);
+ udh->vlan.type = htons(ether_type);
+ } else {
+ udh->eth.type = htons(ether_type);
+ }
+
+ /* BTH */
+ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+ udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+ udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+ udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ /* DETH */
+ udh->deth.qkey = htonl(0x80010000);
+ udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+ if (has_grh_ipv6) {
+ /* GRH / IPv6 header */
+ udh->grh.traffic_class = grh->traffic_class;
+ udh->grh.flow_label = grh->flow_label;
+ udh->grh.hop_limit = grh->hop_limit;
+ udh->grh.destination_gid = grh->dgid;
+ memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ sizeof(udh->grh.source_gid.raw));
+ } else {
+ /* IPv4 header */
+ u32 ipv4_addr;
+
+ udh->ip4.protocol = IPPROTO_UDP;
+ udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.frag_off = htons(IP_DF);
+ udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ udh->ip4.saddr = ipv4_addr;
+ ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ udh->ip4.daddr = ipv4_addr;
+ /* note: checksum is calculated by the device */
+ }
+
+ /* UDP */
+ if (has_udp) {
+ udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+ udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+ udh->udp.csum = 0;
+ /* UDP length is untouched hence is zero */
+ }
+ return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct qed_roce_ll2_packet **p_packet)
+{
+ u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+ struct qed_roce_ll2_packet *packet;
+ struct pci_dev *pdev = dev->pdev;
+ int roce_mode, header_size;
+ struct ib_ud_header udh;
+ int i, rc;
+
+ *p_packet = NULL;
+
+ rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+ if (rc)
+ return rc;
+
+ header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+ &packet->header.baddr,
+ GFP_ATOMIC);
+ if (!packet->header.vaddr) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+
+ packet->roce_mode = roce_mode;
+ memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+ packet->header.len = header_size;
+ packet->n_seg = swr->num_sge;
+ for (i = 0; i < packet->n_seg; i++) {
+ packet->payload[i].baddr = swr->sg_list[i].addr;
+ packet->payload[i].len = swr->sg_list[i].length;
+ }
+
+ *p_packet = packet;
+
+ return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qed_roce_ll2_packet *pkt = NULL;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_tx_params params;
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int rc;
+
+ if (qp->state != QED_ROCE_QP_STATE_RTS) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+ DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+ wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (wr->opcode != IB_WR_SEND) {
+ DP_ERR(dev,
+ "gsi post send: failed due to unsupported opcode %d\n",
+ wr->opcode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+ if (rc) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ goto err;
+ }
+
+ rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+ if (!rc) {
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+ qedr_inc_sw_prod(&qp->sq);
+ DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+ "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+ wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ } else {
+ if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+ } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+ /* NTD since TX failed while posting a fragment. We will
+ * release the resources on TX callback
+ */
+ }
+
+ DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+ rc = -EAGAIN;
+ *bad_wr = wr;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (wr->next) {
+ DP_ERR(dev,
+ "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+ *bad_wr = wr->next;
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ *bad_wr = wr;
+ return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_buffer buf;
+ unsigned long flags;
+ int status = 0;
+ int rc;
+
+ if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+ (qp->state != QED_ROCE_QP_STATE_RTS)) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+ wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+ goto err;
+ }
+
+ buf.baddr = wr->sg_list[0].addr;
+ buf.len = wr->sg_list[0].length;
+
+ rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+ sizeof(qp->rqe_wr_id[qp->rq.prod]));
+ qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+err:
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc[i].opcode = IB_WC_RECV;
+ wc[i].pkey_index = 0;
+ wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+ IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+ /* 0 - currently only one recv sg is supported */
+ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+ wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+ ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+ wc[i].wc_flags |= IB_WC_WITH_SMAC;
+ if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+ wc[i].wc_flags |= IB_WC_WITH_VLAN;
+ wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ }
+
+ qedr_inc_sw_cons(&qp->rq);
+ i++;
+ }
+
+ while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc[i].opcode = IB_WC_SEND;
+ wc[i].status = IB_WC_SUCCESS;
+
+ qedr_inc_sw_cons(&qp->sq);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+ num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+ qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/qedr/qedr_cm.h
index 295f422b9a3a..9ba6e15cd93f 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -17,7 +17,7 @@
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
+ * disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
@@ -29,52 +29,33 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __C4IW_USER_H__
-#define __C4IW_USER_H__
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
-#define C4IW_UVERBS_ABI_VERSION 3
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
- __u32 cqid;
- __u32 size;
- __u32 qid_mask;
- __u32 reserved; /* explicit padding (optional for i386) */
-};
+#define QEDR_GSI_MAX_RECV_WR (4096)
+#define QEDR_GSI_MAX_SEND_WR (4096)
+#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
-enum {
- C4IW_QPF_ONCHIP = (1<<0)
-};
+#define ETH_P_ROCE (0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
-struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
- __u32 sqid;
- __u32 rqid;
- __u32 sq_size;
- __u32 rq_size;
- __u32 qid_mask;
- __u32 flags;
-};
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+ return *(u32 *)(void *)&gid[12];
+}
-struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
- __u32 status_page_size;
- __u32 reserved; /* explicit padding (optional for i386) */
-};
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
new file mode 100644
index 000000000000..66d27521373f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi.h
@@ -0,0 +1,56 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_ROCE__
+#define __QED_HSI_ROCE__
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+/* Affiliated asynchronous events / errors enumeration */
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
+#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 000000000000..5c98d2055cad
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+ struct regpair cq_handle;
+};
+
+struct rdma_cqe_responder {
+ struct regpair srq_wr_id;
+ struct regpair qp_handle;
+ __le32 imm_data_or_inv_r_Key;
+ __le32 length;
+ __le32 imm_data_hi;
+ __le16 rq_cons;
+ u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
+ u8 status;
+};
+
+struct rdma_cqe_requester {
+ __le16 sq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+ struct regpair qp_handle;
+ struct regpair reserved2;
+ __le32 reserved3;
+ __le16 reserved4;
+ u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
+ u8 status;
+};
+
+struct rdma_cqe_common {
+ struct regpair reserved0;
+ struct regpair qp_handle;
+ __le16 reserved1[7];
+ u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK 0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT 1
+#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
+ u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+ struct rdma_cqe_responder resp;
+ struct rdma_cqe_requester req;
+ struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+ RDMA_CQE_REQ_STS_OK,
+ RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+ RDMA_CQE_RESP_STS_OK,
+ RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+ RDMA_CQE_TYPE_REQUESTER,
+ RDMA_CQE_TYPE_RESPONDER_RQ,
+ RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_INVALID,
+ MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+ __le32 length;
+ struct regpair addr;
+ __le32 l_key;
+};
+
+struct rdma_rq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT 0
+#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
+#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+ __le16 icid;
+ __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+ struct rdma_pwm_val16_data as_struct;
+ __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+ __le16 icid;
+ u8 agg_flags;
+ u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+ __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+ RDMA_DIF_BLOCK_512 = 0,
+ RDMA_DIF_BLOCK_4096 = 1,
+ MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+ RDMA_DIF_CRC_SEED_0000 = 0,
+ RDMA_DIF_CRC_SEED_FFFF = 1,
+ MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+ __le32 error_intervals;
+ __le32 dif_error_1st_interval;
+ u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
+ u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+ RDMA_DIF_DIR_RX = 0,
+ RDMA_DIF_DIR_TX = 1,
+ MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+ __le16 guard_tag;
+ __le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+ RDMA_MW_TYPE_1,
+ RDMA_MW_TYPE_2A,
+ MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+ __le32 reserved1[3];
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+ struct regpair reserved;
+ __le32 inv_l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+ RDMA_SQ_REQ_TYPE_SEND,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_RDMA_WR,
+ RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_RDMA_RD,
+ RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+ RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+ RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_FAST_MR,
+ RDMA_SQ_REQ_TYPE_BIND,
+ RDMA_SQ_REQ_TYPE_INVALID,
+ MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+ __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 000000000000..a61514296767
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,3547 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+ return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ int rc = 0;
+
+ if (!rdma_cap_roce_gid_table(ibdev, port))
+ return -ENODEV;
+
+ rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+ if (rc == -EAGAIN) {
+ memcpy(sgid, &zgid, sizeof(*sgid));
+ return 0;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev,
+ "qedr_query_device called with invalid params rdma_ctx=%p\n",
+ dev->rdma_ctx);
+ return -EINVAL;
+ }
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->fw_ver = qattr->fw_ver;
+ attr->sys_image_guid = qattr->sys_image_guid;
+ attr->max_mr_size = qattr->max_mr_size;
+ attr->page_size_cap = qattr->page_size_caps;
+ attr->vendor_id = qattr->vendor_id;
+ attr->vendor_part_id = qattr->vendor_part_id;
+ attr->hw_ver = qattr->hw_ver;
+ attr->max_qp = qattr->max_qp;
+ attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ attr->max_sge = qattr->max_sge;
+ attr->max_sge_rd = qattr->max_sge;
+ attr->max_cq = qattr->max_cq;
+ attr->max_cqe = qattr->max_cqe;
+ attr->max_mr = qattr->max_mr;
+ attr->max_mw = qattr->max_mw;
+ attr->max_pd = qattr->max_pd;
+ attr->atomic_cap = dev->atomic_cap;
+ attr->max_fmr = qattr->max_fmr;
+ attr->max_map_per_fmr = 16;
+ attr->max_qp_init_rd_atom =
+ 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+ attr->max_qp_rd_atom =
+ min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+ attr->max_qp_init_rd_atom);
+
+ attr->max_srq = qattr->max_srq;
+ attr->max_srq_sge = qattr->max_srq_sge;
+ attr->max_srq_wr = qattr->max_srq_wr;
+
+ attr->local_ca_ack_delay = qattr->dev_ack_delay;
+ attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+ attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_ah = qattr->max_ah;
+
+ return 0;
+}
+
+#define QEDR_SPEED_SDR (1)
+#define QEDR_SPEED_DDR (2)
+#define QEDR_SPEED_QDR (4)
+#define QEDR_SPEED_FDR10 (8)
+#define QEDR_SPEED_FDR (16)
+#define QEDR_SPEED_EDR (32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+{
+ switch (speed) {
+ case 1000:
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+ *ib_speed = QEDR_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 100000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+ struct qedr_dev *dev;
+ struct qed_rdma_port *rdma_port;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "rdma_ctx is NULL\n");
+ return -EINVAL;
+ }
+
+ rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+ memset(attr, 0, sizeof(*attr));
+
+ if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = 5;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = 3;
+ }
+ attr->max_mtu = IB_MTU_4096;
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+ attr->lid = 0;
+ attr->lmc = 0;
+ attr->sm_lid = 0;
+ attr->sm_sl = 0;
+ attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+ attr->qkey_viol_cntr = 0;
+ get_link_speed_and_width(rdma_port->link_speed,
+ &attr->active_speed, &attr->active_width);
+ attr->max_msg_sz = rdma_port->max_msg_size;
+ attr->max_vl_num = 4;
+
+ return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct qedr_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
+ mm->key.phy_addr = phy_addr;
+ /* This function might be called with a length which is not a multiple
+ * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+ * forces this granularity by increasing the requested size if needed.
+ * When qedr_mmap is called, it will search the list with the updated
+ * length as a key. To prevent search failures, the length is rounded up
+ * in advance to PAGE_SIZE.
+ */
+ mm->key.len = roundup(len, PAGE_SIZE);
+ INIT_LIST_HEAD(&mm->entry);
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_add(&mm->entry, &uctx->mm_head);
+ mutex_unlock(&uctx->mm_list_lock);
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ (unsigned long long)mm->key.phy_addr,
+ (unsigned long)mm->key.len, uctx);
+
+ return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ bool found = false;
+ struct qedr_mm *mm;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry(mm, &uctx->mm_head, entry) {
+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+ mm->key.phy_addr, mm->key.len, uctx, found);
+
+ return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ int rc;
+ struct qedr_ucontext *ctx;
+ struct qedr_alloc_ucontext_resp uresp;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_add_user_out_params oparams;
+
+ if (!udata)
+ return ERR_PTR(-EFAULT);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+ if (rc) {
+ DP_ERR(dev,
+ "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+ rc);
+ goto err;
+ }
+
+ ctx->dpi = oparams.dpi;
+ ctx->dpi_addr = oparams.dpi_addr;
+ ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+ ctx->dpi_size = oparams.dpi_size;
+ INIT_LIST_HEAD(&ctx->mm_head);
+ mutex_init(&ctx->mm_list_lock);
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_size = ctx->dpi_size;
+ uresp.max_send_wr = dev->attr.max_sqe;
+ uresp.max_recv_wr = dev->attr.max_rqe;
+ uresp.max_srq_wr = dev->attr.max_srq_wr;
+ uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+ uresp.max_cqes = QEDR_MAX_CQES;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ goto err;
+
+ ctx->dev = dev;
+
+ rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+ &ctx->ibucontext);
+ return &ctx->ibucontext;
+
+err:
+ kfree(ctx);
+ return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+ struct qedr_mm *mm, *tmp;
+ int status = 0;
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+ uctx);
+ uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ mm->key.phy_addr, mm->key.len, uctx);
+ list_del(&mm->entry);
+ kfree(mm);
+ }
+
+ kfree(uctx);
+ return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+ struct qedr_dev *dev = get_qedr_dev(context->device);
+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+ u64 unmapped_db = dev->db_phys_addr;
+ unsigned long len = (vma->vm_end - vma->vm_start);
+ int rc = 0;
+ bool found;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+ vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+ if (vma->vm_start & (PAGE_SIZE - 1)) {
+ DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+ vma->vm_start);
+ return -EINVAL;
+ }
+
+ found = qedr_search_mmap(ucontext, vm_page, len);
+ if (!found) {
+ DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+ dev->db_size))) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+ if (vma->vm_flags & VM_READ) {
+ DP_ERR(dev, "Trying to map doorbell bar for read\n");
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ PAGE_SIZE, vma->vm_page_prot);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+ rc = remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff, len, vma->vm_page_prot);
+ }
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+ return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_ucontext *uctx = NULL;
+ struct qedr_alloc_pd_uresp uresp;
+ struct qedr_pd *pd;
+ u16 pd_id;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+ (udata && context) ? "User Lib" : "Kernel");
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "invlaid RDMA context\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+
+ uresp.pd_id = pd_id;
+ pd->pd_id = pd_id;
+
+ if (udata && context) {
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+ uctx = get_qedr_ucontext(context);
+ uctx->pd = pd;
+ pd->uctx = uctx;
+ }
+
+ return &pd->ibpd;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+ if (!pd)
+ pr_err("Invalid PD received in dealloc_pd\n");
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+ kfree(pd);
+
+ return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ if (!pbl[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl[i].va, pbl[i].pa);
+ }
+
+ kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ gfp_t flags)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct qedr_pbl *pbl_table;
+ dma_addr_t *pbl_main_tbl;
+ dma_addr_t pa;
+ void *va;
+ int i;
+
+ pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+ if (!pbl_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
+ if (!va)
+ goto err;
+
+ memset(va, 0, pbl_info->pbl_size);
+ pbl_table[i].va = va;
+ pbl_table[i].pa = pa;
+ }
+
+ /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+ * the first one with physical pointers to all of the rest
+ */
+ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+ for (i = 0; i < pbl_info->num_pbls - 1; i++)
+ pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+ return pbl_table;
+
+err:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl_table[i].va, pbl_table[i].pa);
+
+ qedr_free_pbl(dev, pbl_info, pbl_table);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ u32 num_pbes, int two_layer_capable)
+{
+ u32 pbl_capacity;
+ u32 pbl_size;
+ u32 num_pbls;
+
+ if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+ if (num_pbes > MAX_PBES_TWO_LAYER) {
+ DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+ num_pbes);
+ return -EINVAL;
+ }
+
+ /* calculate required pbl page size */
+ pbl_size = MIN_FW_PBL_PAGE_SIZE;
+ pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+ NUM_PBES_ON_PAGE(pbl_size);
+
+ while (pbl_capacity < num_pbes) {
+ pbl_size *= 2;
+ pbl_capacity = pbl_size / sizeof(u64);
+ pbl_capacity = pbl_capacity * pbl_capacity;
+ }
+
+ num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+ num_pbls++; /* One for the layer0 ( points to the pbls) */
+ pbl_info->two_layered = true;
+ } else {
+ /* One layered PBL */
+ num_pbls = 1;
+ pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+ roundup_pow_of_two((num_pbes * sizeof(u64))));
+ pbl_info->two_layered = false;
+ }
+
+ pbl_info->num_pbls = num_pbls;
+ pbl_info->pbl_size = pbl_size;
+ pbl_info->num_pbes = num_pbes;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+ pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+ return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+ struct qedr_pbl *pbl,
+ struct qedr_pbl_info *pbl_info)
+{
+ int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ struct qedr_pbl *pbl_tbl;
+ struct scatterlist *sg;
+ struct regpair *pbe;
+ int entry;
+ u32 addr;
+
+ if (!pbl_info->num_pbes)
+ return;
+
+ /* If we have a two layered pbl, the first pbl points to the rest
+ * of the pbls and the first entry lays on the second pbl in the table
+ */
+ if (pbl_info->two_layered)
+ pbl_tbl = &pbl[1];
+ else
+ pbl_tbl = pbl;
+
+ pbe = (struct regpair *)pbl_tbl->va;
+ if (!pbe) {
+ DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+ return;
+ }
+
+ pbe_cnt = 0;
+
+ shift = ilog2(umem->page_size);
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ addr = upper_32_bits(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ pbe->hi = cpu_to_le32(addr);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+ }
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+ struct qedr_cq *cq, struct ib_udata *udata)
+{
+ struct qedr_create_cq_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.icid = cq->icid;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+ return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+ if (cq->latest_cqe == cq->toggle_cqe)
+ cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+ cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+ u64 size, aligned_size;
+
+ /* We allocate an extra entry that we don't report to the FW. */
+ size = (entries + 1) * QEDR_CQE_SIZE;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_userq *q,
+ u64 buf_addr, size_t buf_len,
+ int access, int dmasync)
+{
+ int page_cnt;
+ int rc;
+
+ q->buf_addr = buf_addr;
+ q->buf_len = buf_len;
+ q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ if (IS_ERR(q->umem)) {
+ DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+ PTR_ERR(q->umem));
+ return PTR_ERR(q->umem);
+ }
+
+ page_cnt = ib_umem_page_count(q->umem);
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ if (rc)
+ goto err0;
+
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(q->pbl_tbl))
+ goto err0;
+
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+ return 0;
+
+err0:
+ ib_umem_release(q->umem);
+
+ return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+ struct qedr_ucontext *ctx,
+ struct qedr_dev *dev, int vector,
+ int chain_entries, int page_cnt,
+ u64 pbl_ptr,
+ struct qed_rdma_create_cq_in_params
+ *params)
+{
+ memset(params, 0, sizeof(*params));
+ params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+ params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+ params->cnq_id = vector;
+ params->cq_size = chain_entries - 1;
+ params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+ params->pbl_num_pages = page_cnt;
+ params->pbl_ptr = pbl_ptr;
+ params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+ /* Flush data before signalling doorbell */
+ wmb();
+ cq->db.data.agg_flags = flags;
+ cq->db.data.value = cpu_to_le32(cons);
+ writeq(cq->db.raw, cq->db_addr);
+
+ /* Make sure write would stick */
+ mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ unsigned long sflags;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, sflags);
+
+ cq->arm_flags = 0;
+
+ if (flags & IB_CQ_SOLICITED)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+ if (flags & IB_CQ_NEXT_COMP)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+ return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+ struct qed_rdma_destroy_cq_out_params destroy_oparams;
+ struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_create_cq_in_params params;
+ struct qedr_create_cq_ureq ureq;
+ int vector = attr->comp_vector;
+ int entries = attr->cqe;
+ struct qedr_cq *cq;
+ int chain_entries;
+ int page_cnt;
+ u64 pbl_ptr;
+ u16 icid;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "create_cq: called from %s. entries=%d, vector=%d\n",
+ udata ? "User Lib" : "Kernel", entries, vector);
+
+ if (entries > QEDR_MAX_CQES) {
+ DP_ERR(dev,
+ "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+ entries, QEDR_MAX_CQES);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chain_entries = qedr_align_cq_entries(entries);
+ chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata) {
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create cq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ if (!ureq.len) {
+ DP_ERR(dev,
+ "create cq: cannot create a cq with 0 entries\n");
+ goto err0;
+ }
+
+ cq->cq_type = QEDR_CQ_TYPE_USER;
+
+ rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+ ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ if (rc)
+ goto err0;
+
+ pbl_ptr = cq->q.pbl_tbl->pa;
+ page_cnt = cq->q.pbl_info.num_pbes;
+ } else {
+ cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ chain_entries,
+ sizeof(union rdma_cqe),
+ &cq->pbl);
+ if (rc)
+ goto err1;
+
+ page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+ pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ }
+
+ qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+ pbl_ptr, &params);
+
+ rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+ if (rc)
+ goto err2;
+
+ cq->icid = icid;
+ cq->sig = QEDR_CQ_MAGIC_NUMBER;
+ spin_lock_init(&cq->cq_lock);
+
+ if (ib_ctx) {
+ rc = qedr_copy_cq_uresp(dev, cq, udata);
+ if (rc)
+ goto err3;
+ } else {
+ /* Generate doorbell address. */
+ cq->db_addr = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ cq->db.data.icid = cq->icid;
+ cq->db.data.params = DB_AGG_CMD_SET <<
+ RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+ /* point to the very last element, passing it we will toggle */
+ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+ cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+ cq->latest_cqe = NULL;
+ consume_cqe(cq);
+ cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+ cq->icid, cq, params.cq_size);
+
+ return &cq->ibcq;
+
+err3:
+ destroy_iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+ &destroy_oparams);
+err2:
+ if (udata)
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ else
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+ if (udata)
+ ib_umem_release(cq->q.umem);
+err0:
+ kfree(cq);
+ return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+ return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+ /* GSIs CQs are handled by driver, so they don't exist in the FW */
+ if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+
+ if (ibcq->uobject && ibcq->uobject->context) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+ }
+
+ kfree(cq);
+
+ return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct qed_rdma_modify_qp_in_params
+ *qp_params)
+{
+ enum rdma_network_type nw_type;
+ struct ib_gid_attr gid_attr;
+ union ib_gid gid;
+ u32 ipv4_addr;
+ int rc = 0;
+ int i;
+
+ rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+ attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ if (rc)
+ return rc;
+
+ if (!memcmp(&gid, &zgid, sizeof(gid)))
+ return -ENOENT;
+
+ if (gid_attr.ndev) {
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+ dev_put(gid_attr.ndev);
+ nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV6:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V2_IPV6;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ break;
+ case RDMA_NETWORK_IB:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V1;
+ break;
+ case RDMA_NETWORK_IPV4:
+ memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+ memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+ ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ qp_params->sgid.ipv4_addr = ipv4_addr;
+ ipv4_addr =
+ qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qp_params->dgid.ipv4_addr = ipv4_addr;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ qp_params->roce_mode = ROCE_V2_IPV4;
+ break;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+ qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+ }
+
+ if (qp_params->vlan_id >= VLAN_CFI_MASK)
+ qp_params->vlan_id = 0;
+
+ return 0;
+}
+
+static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ ib_umem_release(qp->usq.umem);
+}
+
+static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ ib_umem_release(qp->urq.umem);
+}
+
+static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ kfree(qp->wqe_wr_id);
+}
+
+static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ kfree(qp->rqe_wr_id);
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ /* QP0... attrs->qp_type == IB_QPT_GSI */
+ if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: unsupported qp type=0x%x requested\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > qattr->max_sqe) {
+ DP_ERR(dev,
+ "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+ attrs->cap.max_send_wr, qattr->max_sqe);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_inline_data > qattr->max_inline) {
+ DP_ERR(dev,
+ "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+ attrs->cap.max_inline_data, qattr->max_inline);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+ attrs->cap.max_send_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+ attrs->cap.max_recv_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ /* Unprivileged user space cannot create special QP */
+ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ DP_ERR(dev,
+ "create qp: userspace can't create special QPs of type=0x%x\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_udata *udata)
+{
+ struct qedr_create_qp_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+ qedr_copy_sq_uresp(&uresp, qp);
+ qedr_copy_rq_uresp(&uresp, qp);
+
+ uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp.qp_id = qp->qp_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev,
+ "create qp: failed a copy to user space with qp icid=0x%x.\n",
+ qp->icid);
+
+ return rc;
+}
+
+static void qedr_set_qp_init_params(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ qp->pd = pd;
+
+ spin_lock_init(&qp->q_lock);
+
+ qp->qp_type = attrs->qp_type;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->state = QED_ROCE_QP_STATE_RESET;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->dev = dev;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+ pd->pd_id, qp->qp_type, qp->max_inline_data,
+ qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+}
+
+static inline void
+qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
+ struct qedr_create_qp_ureq *ureq)
+{
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = ureq->qp_handle_lo;
+ params->qp_handle_hi = ureq->qp_handle_hi;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline int
+qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
+{
+ /* Allocate driver internal RQ array */
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
+
+ return 0;
+}
+
+static inline int
+qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ u32 temp_max_wr;
+
+ /* Allocate driver internal SQ array */
+ temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
+ temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
+
+ /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
+ qp->sq.max_wr = (u16)temp_max_wr;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
+
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
+
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_sq_elems, n_sq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ * the ring. The ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_sq_entries = attrs->cap.max_send_wr;
+ n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+ n_sq_entries = max_t(u32, n_sq_entries, 1);
+ n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl);
+ if (rc) {
+ DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SQ,
+ "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
+ n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_rq_elems, n_rq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+ * the ring. There ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
+ n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl);
+
+ if (rc) {
+ DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_RQ,
+ "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
+ n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
+
+ /* n_rq_entries < u16 so the casting is safe */
+ qp->rq.max_wr = (u16)n_rq_entries;
+
+ return 0;
+}
+
+static inline void
+qedr_init_qp_in_params_sq(struct qedr_dev *dev,
+ struct qedr_pd *pd,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ /* QP handle to be written in an async event */
+ params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
+
+ params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+ params->fmr_and_reserved_lkey = !udata;
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->max_sq_sges = 0;
+ params->stats_queue = 0;
+
+ if (udata) {
+ params->sq_num_pages = qp->usq.pbl_info.num_pbes;
+ params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ } else {
+ params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+ params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+ }
+}
+
+static inline void
+qedr_init_qp_in_params_rq(struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = 0;
+ params->use_srq = false;
+
+ if (udata) {
+ params->rq_num_pages = qp->urq.pbl_info.num_pbes;
+ params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ } else {
+ params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+ params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+ }
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
+ qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
+ qp->urq.buf_len);
+}
+
+static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_create_qp_ureq *ureq)
+{
+ int rc;
+
+ /* SQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
+ ureq->sq_len, 0, 0);
+ if (rc)
+ return rc;
+
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
+ ureq->rq_len, 0, 0);
+
+ if (rc)
+ qedr_cleanup_user_sq(dev, qp);
+ return rc;
+}
+
+static inline int
+qedr_init_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ int rc;
+
+ rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
+ if (rc) {
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
+ if (rc) {
+ qedr_cleanup_kernel_sq(dev, qp);
+ DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
+ qedr_cleanup_kernel_sq(dev, qp);
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ return rc;
+ }
+
+ return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_create_qp_ureq ureq;
+ struct qedr_qp *qp;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+ udata ? "user library" : "kernel", pd);
+
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+ get_qedr_cq(attrs->send_cq),
+ get_qedr_cq(attrs->send_cq)->icid,
+ get_qedr_cq(attrs->recv_cq),
+ get_qedr_cq(attrs->recv_cq)->icid);
+
+ qedr_set_qp_init_params(dev, qp, pd, attrs);
+
+ if (attrs->qp_type == IB_QPT_GSI) {
+ if (udata) {
+ DP_ERR(dev,
+ "create qp: unexpected udata when creating GSI QP\n");
+ goto err0;
+ }
+ return qedr_create_gsi_qp(dev, attrs, qp);
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+
+ if (udata) {
+ if (!(udata && ibpd->uobject && ibpd->uobject->context))
+ goto err0;
+
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create qp: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
+ if (rc)
+ goto err0;
+
+ qedr_init_qp_user_params(&in_params, &ureq);
+ } else {
+ rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
+ if (rc)
+ goto err0;
+ }
+
+ qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
+ qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ &in_params, &out_params);
+
+ if (!qp->qed_qp)
+ goto err1;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+ qp->ibqp.qp_num = qp->qp_id;
+
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata);
+ if (rc)
+ goto err2;
+
+ qedr_qp_user_print(dev, qp);
+ } else {
+ qedr_init_qp_kernel_doorbell_sq(dev, qp);
+ qedr_init_qp_kernel_doorbell_rq(dev, qp);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
+ udata ? "user" : "kernel", qp);
+
+ return &qp->ibqp;
+
+err2:
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+err1:
+ if (udata) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+err0:
+ kfree(qp);
+
+ return ERR_PTR(-EFAULT);
+}
+
+enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+ switch (qp_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ return IB_QPS_RESET;
+ case QED_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case QED_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case QED_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case QED_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ case QED_ROCE_QP_STATE_SQE:
+ return IB_QPS_SQE;
+ }
+ return IB_QPS_ERR;
+}
+
+enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+{
+ switch (qp_state) {
+ case IB_QPS_RESET:
+ return QED_ROCE_QP_STATE_RESET;
+ case IB_QPS_INIT:
+ return QED_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return QED_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return QED_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ enum qed_roce_qp_state new_state)
+{
+ int status = 0;
+
+ if (new_state == qp->state)
+ return 1;
+
+ switch (qp->state) {
+ case QED_ROCE_QP_STATE_RESET:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_INIT:
+ qp->prev_wqe_size = 0;
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qedr_reset_qp_hwq_info(&qp->rq);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTR:
+ /* Update doorbell (in case post_recv was
+ * done before move to RTR)
+ */
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_SQD:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+
+ return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ enum ib_qp_state old_qp_state, new_qp_state;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+ attr->qp_state);
+
+ old_qp_state = qedr_get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qp_state = attr->qp_state;
+ else
+ new_qp_state = old_qp_state;
+
+ if (!ib_modify_qp_is_ok
+ (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+ new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Translate the masks... */
+ if (attr_mask & IB_QP_STATE) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+ qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+ }
+
+ if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp_params.sqd_async = true;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+ if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+ qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ;
+ qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE;
+ qp_params.incoming_atomic_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC;
+ }
+
+ if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+ ib_mtu_enum_to_int(iboe_get_mtu
+ (dev->ndev->mtu)));
+ }
+
+ if (!qp->mtu) {
+ qp->mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+ qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+ qp_params.flow_label = attr->ah_attr.grh.flow_label;
+ qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+ qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+ rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+ if (rc) {
+ DP_ERR(dev,
+ "modify qp: problems with GID index %d (rc=%d)\n",
+ attr->ah_attr.grh.sgid_index, rc);
+ return rc;
+ }
+
+ rc = qedr_get_dmac(dev, &attr->ah_attr,
+ qp_params.remote_mac_addr);
+ if (rc)
+ return rc;
+
+ qp_params.use_local_mac = true;
+ ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+ qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+ qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+ qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+ qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+ qp_params.remote_mac_addr);
+;
+
+ qp_params.mtu = qp->mtu;
+ qp_params.lb_indication = false;
+ }
+
+ if (!qp_params.mtu) {
+ /* Stay with current MTU */
+ if (qp->mtu)
+ qp_params.mtu = qp->mtu;
+ else
+ qp_params.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+ qp_params.ack_timeout = attr->timeout;
+ if (attr->timeout) {
+ u32 temp;
+
+ temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+ /* FW requires [msec] */
+ qp_params.ack_timeout = temp;
+ } else {
+ /* Infinite */
+ qp_params.ack_timeout = 0;
+ }
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+ qp_params.retry_cnt = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+ qp_params.rnr_retry_cnt = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+ qp_params.rq_psn = attr->rq_psn;
+ qp->rq_psn = attr->rq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+ rc = -EINVAL;
+ DP_ERR(dev,
+ "unsupported max_rd_atomic=%d, supported=%d\n",
+ attr->max_rd_atomic,
+ dev->attr.max_qp_req_rd_atomic_resc);
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+ qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+ qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+ qp_params.sq_psn = attr->sq_psn;
+ qp->sq_psn = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic >
+ dev->attr.max_qp_resp_rd_atomic_resc) {
+ DP_ERR(dev,
+ "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->attr.max_qp_resp_rd_atomic_resc);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+ qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+ qp_params.dest_qp = attr->dest_qp_num;
+ qp->dest_qp_num = attr->dest_qp_num;
+ }
+
+ if (qp->qp_type != IB_QPT_GSI)
+ rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+ qp->qed_qp, &qp_params);
+
+ if (attr_mask & IB_QP_STATE) {
+ if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+ qedr_update_qp_state(dev, qp, qp_params.new_state);
+ qp->state = qp_params.new_state;
+ }
+
+err:
+ return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (params->incoming_rdma_write_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (params->incoming_rdma_read_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (params->incoming_atomic_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct qed_rdma_query_qp_out_params params;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ int rc = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.rq_psn;
+ qp_attr->sq_psn = params.sq_psn;
+ qp_attr->dest_qp_num = params.dest_qp;
+
+ qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+ qp_attr->cap.max_send_wr = qp->sq.max_wr;
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+ memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+ qp_attr->ah_attr.grh.flow_label = params.flow_label;
+ qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+ qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+ qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+ qp_attr->ah_attr.port_num = 1;
+ qp_attr->ah_attr.sl = 0;
+ qp_attr->timeout = params.timeout;
+ qp_attr->rnr_retry = params.rnr_retry;
+ qp_attr->retry_cnt = params.retry_cnt;
+ qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+ qp_attr->pkey_index = params.pkey_index;
+ qp_attr->port_num = 1;
+ qp_attr->ah_attr.src_path_bits = 0;
+ qp_attr->ah_attr.static_rate = 0;
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+ qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+ qp_attr->max_rd_atomic = params.max_rd_atomic;
+ qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+ qp_attr->cap.max_inline_data);
+
+err:
+ return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ struct ib_qp_attr attr;
+ int attr_mask = 0;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+ qp, qp->qp_type);
+
+ if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
+ QED_ROCE_QP_STATE_INIT)) {
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
+
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ } else {
+ qedr_destroy_gsi_qp(dev);
+ }
+
+ if (ibqp->uobject && ibqp->uobject->context) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+ kfree(qp);
+
+ return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ struct qedr_ah *ah;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->attr = *attr;
+
+ return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ kfree(ah);
+ return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+ struct qedr_pbl *pbl, *tmp;
+
+ if (info->pbl_table)
+ list_add_tail(&info->pbl_table->list_entry,
+ &info->free_pbl_list);
+
+ if (!list_empty(&info->inuse_pbl_list))
+ list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+ list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+ list_del(&pbl->list_entry);
+ qedr_free_pbl(dev, &info->pbl_info, pbl);
+ }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+ size_t page_list_len, bool two_layered)
+{
+ struct qedr_pbl *tmp;
+ int rc;
+
+ INIT_LIST_HEAD(&info->free_pbl_list);
+ INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+ rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+ page_list_len, two_layered);
+ if (rc)
+ goto done;
+
+ info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!info->pbl_table) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+ &info->pbl_table->pa);
+
+ /* in usual case we use 2 PBLs, so we add one to free
+ * list and allocating another one
+ */
+ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!tmp) {
+ DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+ goto done;
+ }
+
+ list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+ if (rc)
+ free_mr_info(dev, info);
+
+ return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ struct qedr_pd *pd;
+ int rc = -ENOMEM;
+
+ pd = get_qedr_pd(ibpd);
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+ pd->pd_id, start, len, usr_addr, acc);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->type = QEDR_MR_USER;
+
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ if (IS_ERR(mr->umem)) {
+ rc = -EFAULT;
+ goto err0;
+ }
+
+ rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ if (rc)
+ goto err1;
+
+ qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+ &mr->info.pbl_info);
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+ mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+ mr->hw_mr.length = len;
+ mr->hw_mr.vaddr = usr_addr;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = false;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+ mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ib_mr);
+ struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+ int rc = 0;
+
+ rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+ if (rc)
+ return rc;
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+ if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+ /* it could be user registered memory. */
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return rc;
+}
+
+struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ int rc = -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+ max_page_list_len);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->dev = dev;
+ mr->type = QEDR_MR_FRMR;
+
+ rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+ if (rc)
+ goto err0;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err0;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = 0;
+ mr->hw_mr.remote_read = 0;
+ mr->hw_mr.remote_write = 0;
+ mr->hw_mr.remote_atomic = 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = 0;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.fbo = 0;
+ mr->hw_mr.length = 0;
+ mr->hw_mr.vaddr = 0;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = true;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ mr->ibmr.rkey = mr->ibmr.lkey;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+ return mr;
+
+err1:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type, u32 max_num_sg)
+{
+ struct qedr_dev *dev;
+ struct qedr_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+ if (IS_ERR(mr))
+ return ERR_PTR(-EINVAL);
+
+ dev = mr->dev;
+
+ return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+ struct qedr_pbl *pbl_table;
+ struct regpair *pbe;
+ u32 pbes_in_page;
+
+ if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+ DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+ mr->npages, addr);
+
+ pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+ pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+ pbe = (struct regpair *)pbl_table->va;
+ pbe += mr->npages % pbes_in_page;
+ pbe->lo = cpu_to_le32((u32)addr);
+ pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+ mr->npages++;
+
+ return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+ int work = info->completed - info->completed_handled - 1;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+ while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+ struct qedr_pbl *pbl;
+
+ /* Free all the page list that are possible to be freed
+ * (all the ones that were invalidated), under the assumption
+ * that if an FMR was completed successfully that means that
+ * if there was an invalidate operation before it also ended
+ */
+ pbl = list_first_entry(&info->inuse_pbl_list,
+ struct qedr_pbl, list_entry);
+ list_del(&pbl->list_entry);
+ list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ info->completed_handled++;
+ }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+ mr->npages = 0;
+
+ handle_completed_mrs(mr->dev, &mr->info);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_mr *mr;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = QEDR_MR_DMA;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.dma_mr = true;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+ return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+ int i, len = 0;
+
+ for (i = 0; i < num_sge; i++)
+ len += sg_list[i].length;
+
+ return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+ int i;
+
+ for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+ *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+ struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr, u8 *bits,
+ u8 bit)
+{
+ u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+ char *seg_prt, *wqe;
+ int i, seg_siz;
+
+ if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+ DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+ *bad_wr = wr;
+ return 0;
+ }
+
+ if (!data_size)
+ return data_size;
+
+ *bits |= bit;
+
+ seg_prt = NULL;
+ wqe = NULL;
+ seg_siz = 0;
+
+ /* Copy data inline */
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 len = wr->sg_list[i].length;
+ void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+ while (len > 0) {
+ u32 cur;
+
+ /* New segment required */
+ if (!seg_siz) {
+ wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+ seg_prt = wqe;
+ seg_siz = sizeof(struct rdma_sq_common_wqe);
+ (*wqe_size)++;
+ }
+
+ /* Calculate currently allowed length */
+ cur = min_t(u32, len, seg_siz);
+ memcpy(seg_prt, src, cur);
+
+ /* Update segment variables */
+ seg_prt += cur;
+ seg_siz -= cur;
+
+ /* Update sge variables */
+ src += cur;
+ len -= cur;
+
+ /* Swap fully-completed segments */
+ if (!seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+ }
+ }
+
+ /* swap last not completed segment */
+ if (seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+
+ return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->flags = cpu_to_le32(vflags); \
+ } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
+ do { \
+ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
+ (hdr)->num_sges = num_sge; \
+ } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->l_key = cpu_to_le32(vlkey); \
+ } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr)
+{
+ u32 data_size = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+ DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+ sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+ sge->length = cpu_to_le32(wr->sg_list[i].length);
+ data_size += wr->sg_list[i].length;
+ }
+
+ if (wqe_size)
+ *wqe_size += wr->num_sge;
+
+ return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_rdma_wqe_1st *rwqe,
+ struct rdma_sq_rdma_wqe_2nd *rwqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+ DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+ bad_wr, &rwqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_send_wqe_1st *swqe,
+ struct rdma_sq_send_wqe_2st *swqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ memset(swqe2, 0, sizeof(*swqe2));
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+ bad_wr, &swqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+ struct rdma_sq_fmr_wqe_1st *fwqe1,
+ struct ib_reg_wr *wr)
+{
+ struct qedr_mr *mr = get_qedr_mr(wr->mr);
+ struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+ fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+ fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+ fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+ fwqe1->l_key = wr->key;
+
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ fwqe2->fmr_ctrl = 0;
+
+ SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+ ilog2(mr->ibmr.page_size) - 12);
+
+ fwqe2->length_hi = 0;
+ fwqe2->length_lo = mr->ibmr.length;
+ fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+ fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+ qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+ return 0;
+}
+
+enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ return IB_WC_SEND;
+ case IB_WR_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_WC_FETCH_ADD;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+ int wq_is_full, err_wr, pbl_is_full;
+ struct qedr_dev *dev = qp->dev;
+
+ /* prevent SQ overflow and/or processing of a bad WR */
+ err_wr = wr->num_sge > qp->sq.max_sges;
+ wq_is_full = qedr_wq_is_full(&qp->sq);
+ pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+ QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ if (wq_is_full || err_wr || pbl_is_full) {
+ if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+ DP_ERR(dev,
+ "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+ }
+
+ if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+ DP_ERR(dev,
+ "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+ }
+
+ if (pbl_is_full &&
+ !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+ DP_ERR(dev,
+ "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+ }
+ return false;
+ }
+ return true;
+}
+
+int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct rdma_sq_atomic_wqe_1st *awqe1;
+ struct rdma_sq_atomic_wqe_2nd *awqe2;
+ struct rdma_sq_atomic_wqe_3rd *awqe3;
+ struct rdma_sq_send_wqe_2st *swqe2;
+ struct rdma_sq_local_inv_wqe *iwqe;
+ struct rdma_sq_rdma_wqe_2nd *rwqe2;
+ struct rdma_sq_send_wqe_1st *swqe;
+ struct rdma_sq_rdma_wqe_1st *rwqe;
+ struct rdma_sq_fmr_wqe_1st *fwqe1;
+ struct rdma_sq_common_wqe *wqe;
+ u32 length;
+ int rc = 0;
+ bool comp;
+
+ if (!qedr_can_post_send(qp, wr)) {
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = qed_chain_produce(&qp->sq.pbl);
+ qp->wqe_wr_id[qp->sq.prod].signaled =
+ !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+ wqe->flags = 0;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+ comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+ !!(wr->send_flags & IB_SEND_FENCE));
+ wqe->prev_wqe_size = qp->prev_wqe_size;
+
+ qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ swqe->wqe_size = 2;
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ DP_ERR(dev,
+ "RDMA READ WITH INVALIDATE not supported\n");
+ *bad_wr = wr;
+ rc = -EINVAL;
+ break;
+
+ case IB_WR_RDMA_READ:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+ awqe1->wqe_size = 4;
+
+ awqe2 = qed_chain_produce(&qp->sq.pbl);
+ DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+ awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+ awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+ if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->compare_add);
+ } else {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->swap);
+ DMA_REGPAIR_LE(awqe3->cmp_data,
+ atomic_wr(wr)->compare_add);
+ }
+
+ qedr_prepare_sq_sges(qp, NULL, wr);
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+ qp->prev_wqe_size = awqe1->wqe_size;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+ iwqe->wqe_size = 1;
+
+ iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+ iwqe->inv_l_key = wr->ex.invalidate_rkey;
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+ qp->prev_wqe_size = iwqe->wqe_size;
+ break;
+ case IB_WR_REG_MR:
+ DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+ wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+ fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+ fwqe1->wqe_size = 2;
+
+ rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+ if (rc) {
+ DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+ *bad_wr = wr;
+ break;
+ }
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+ qp->prev_wqe_size = fwqe1->wqe_size;
+ break;
+ default:
+ DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (*bad_wr) {
+ u16 value;
+
+ /* Restore prod to its position before
+ * this WR was processed
+ */
+ value = le16_to_cpu(qp->sq.db_data.data.value);
+ qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+ /* Restore prev_wqe_size */
+ qp->prev_wqe_size = wqe->prev_wqe_size;
+ rc = -EINVAL;
+ DP_ERR(dev, "POST SEND FAILED\n");
+ }
+
+ return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ *bad_wr = NULL;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
+
+ if (!wr) {
+ DP_ERR(dev, "Got an empty post send.\n");
+ return -EINVAL;
+ }
+
+ while (wr) {
+ rc = __qedr_post_send(ibqp, wr, bad_wr);
+ if (rc)
+ break;
+
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->sq);
+
+ qp->sq.db_data.data.value++;
+
+ wr = wr->next;
+ }
+
+ /* Trigger doorbell
+ * If there was a failure in the first WR then it will be triggered in
+ * vane. However this is not harmful (as long as the producer value is
+ * unchanged). For performance reasons we avoid checking for this
+ * redundant doorbell.
+ */
+ wmb();
+ writel(qp->sq.db_data.raw, qp->sq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int status = 0;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ int i;
+
+ if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+ wr->num_sge > qp->rq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
+ qed_chain_get_elem_left_u32(&qp->rq.pbl),
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+ qp->rq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ if (!i)
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+ wr->num_sge);
+
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+ wr->sg_list[i].lkey);
+
+ RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+ wr->sg_list[i].length, flags);
+ }
+
+ /* Special case of no sges. FW requires between 1-4 sges...
+ * in this case we need to post 1 sge with length zero. this is
+ * because rdma write with immediate consumes an RQ.
+ */
+ if (!wr->num_sge) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+ RQ_SGE_SET(rqe, 0, 0, flags);
+ i = 1;
+ }
+
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+ qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ /* Flush all the writes before signalling doorbell */
+ wmb();
+
+ qp->rq.db_data.data.value++;
+
+ writel(qp->rq.db_data.raw, qp->rq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+ cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+ struct qedr_qp *qp;
+
+ qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+ resp_cqe->qp_handle.lo,
+ u64);
+ return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+ return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+ if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+ int force)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->sq.wqe_cons != hw_cons) {
+ if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+ qedr_chk_if_fmr(qp);
+ /* skip WC */
+ goto next_cqe;
+ }
+
+ /* fill WC */
+ wc->status = status;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+
+ wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+ switch (wc->opcode) {
+ case IB_WC_RDMA_WRITE:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
+ case IB_WC_COMP_SWAP:
+ case IB_WC_FETCH_ADD:
+ wc->byte_len = 8;
+ break;
+ case IB_WC_REG_MR:
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+ break;
+ default:
+ break;
+ }
+
+ num_entries--;
+ wc++;
+ cnt++;
+next_cqe:
+ while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+ qed_chain_consume(&qp->sq.pbl);
+ qedr_inc_sw_cons(&qp->sq);
+ }
+
+ return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc,
+ struct rdma_cqe_requester *req)
+{
+ int cnt = 0;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_OK:
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_SUCCESS, 0);
+ break;
+ case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_WR_FLUSH_ERR, 0);
+ break;
+ default:
+ /* process all WQE before the cosumer */
+ qp->state = QED_ROCE_QP_STATE_ERR;
+ cnt = process_req(dev, qp, cq, num_entries, wc,
+ req->sq_cons - 1, IB_WC_SUCCESS, 0);
+ wc += cnt;
+ /* if we have extra WC fill it with actual error info */
+ if (cnt < num_entries) {
+ enum ib_wc_status wc_status;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ default:
+ DP_ERR(dev,
+ "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_GENERAL_ERR;
+ }
+ cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+ wc_status, 1);
+ }
+ }
+
+ return cnt;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp, u64 wr_id)
+{
+ enum ib_wc_status wc_status = IB_WC_SUCCESS;
+ u8 flags;
+
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+
+ switch (resp->status) {
+ case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+ wc_status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+ wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_OK:
+ wc_status = IB_WC_SUCCESS;
+ wc->byte_len = le32_to_cpu(resp->length);
+
+ flags = resp->flags & QEDR_RESP_RDMA_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM)
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
+ wc->ex.imm_data =
+ le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ }
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ DP_ERR(dev, "Invalid CQE status detected\n");
+ }
+
+ /* fill WC */
+ wc->status = wc_status;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+}
+
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+
+ return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->rq.wqe_cons != hw_cons) {
+ /* fill WC */
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->byte_len = 0;
+ wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc->qp = &qp->ibqp;
+ num_entries--;
+ wc++;
+ cnt++;
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+ }
+
+ return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_responder *resp, int *update)
+{
+ if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, struct rdma_cqe_responder *resp,
+ int *update)
+{
+ int cnt;
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ cnt = process_resp_flush(qp, cq, num_entries, wc,
+ resp->rq_cons);
+ try_consume_resp_cqe(cq, qp, resp, update);
+ } else {
+ cnt = process_resp_one(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+ *update |= 1;
+ }
+
+ return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_requester *req, int *update)
+{
+ if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ union rdma_cqe *cqe = cq->latest_cqe;
+ u32 old_cons, new_cons;
+ unsigned long flags;
+ int update = 0;
+ int done = 0;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ while (num_entries && is_valid_cqe(cq, cqe)) {
+ struct qedr_qp *qp;
+ int cnt = 0;
+
+ /* prevent speculative reads of any field of CQE */
+ rmb();
+
+ qp = cqe_get_qp(cqe);
+ if (!qp) {
+ WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+ break;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ switch (cqe_get_type(cqe)) {
+ case RDMA_CQE_TYPE_REQUESTER:
+ cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+ &cqe->req);
+ try_consume_req_cqe(cq, qp, &cqe->req, &update);
+ break;
+ case RDMA_CQE_TYPE_RESPONDER_RQ:
+ cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+ &cqe->resp, &update);
+ break;
+ case RDMA_CQE_TYPE_INVALID:
+ default:
+ DP_ERR(dev, "Error: invalid CQE type = %d\n",
+ cqe_get_type(cqe));
+ }
+ num_entries -= cnt;
+ wc += cnt;
+ done += cnt;
+
+ cqe = get_cqe(cq);
+ }
+ new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ cq->cq_cons += new_cons - old_cons;
+
+ if (update)
+ /* doorbell notifies abount latest VALID entry,
+ * but chain already point to the next INVALID one
+ */
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num,
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *mad_hdr,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
+ mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
+ mad_hdr->class_specific, mad_hdr->class_version,
+ mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
+ return IB_MAD_RESULT_SUCCESS;
+}
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644
index 000000000000..a9b5e67bb81e
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -0,0 +1,101 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_modify_port(struct ib_device *, u8 port, int mask,
+ struct ib_port_modify *props);
+
+int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
+int qedr_dealloc_ucontext(struct ib_ucontext *);
+
+int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context);
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context);
+struct ib_pd *qedr_alloc_pd(struct ib_device *,
+ struct ib_ucontext *, struct ib_udata *);
+int qedr_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx,
+ struct ib_udata *udata);
+int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int qedr_destroy_cq(struct ib_cq *);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+ struct ib_udata *);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp);
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+int qedr_destroy_ah(struct ib_ah *ibah);
+
+int qedr_dereg_mr(struct ib_mr *);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
+ struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
+ struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+#endif
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index c3edc033f7c4..f1e66efea98a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -64,7 +64,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_blocks = 0;
- inode->i_atime = CURRENT_TIME;
+ inode->i_atime = current_time(inode);
inode->i_mtime = inode->i_atime;
inode->i_ctime = inode->i_atime;
inode->i_private = data;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f253111e682e..1730aa839a47 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -614,8 +614,8 @@ static int qib_create_workqueues(struct qib_devdata *dd)
snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
dd->unit, pidx);
- ppd->qib_wq =
- create_singlethread_workqueue(wq_name);
+ ppd->qib_wq = alloc_ordered_workqueue(wq_name,
+ WQ_MEM_RECLAIM);
if (!ppd->qib_wq)
goto wq_error;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 876ebb442d38..954f15064514 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1370,7 +1370,8 @@ static int qib_modify_device(struct ib_device *device,
}
if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
+ memcpy(device->node_desc, device_modify->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
for (i = 0; i < dd->num_pports; i++) {
struct qib_ibport *ibp = &dd->pport[i].ibport_data;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
+ unsigned int gup_flags;
if (!can_do_mlock())
return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index f2f229efbe64..6d9904a4a0ab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
if (likely(worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
+ kthread_queue_work(worker, &cq->comptask);
}
}
@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = entries;
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
+ kthread_init_work(&cq->comptask, send_complete);
cq->queue = wc;
ret = &cq->ibcq;
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
- flush_kthread_work(&cq->comptask);
+ kthread_flush_work(&cq->comptask);
spin_lock(&rdi->n_cqs_lock);
rdi->n_cqs_allocated--;
spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
if (!rdi->worker)
return -ENOMEM;
- init_kthread_worker(rdi->worker);
+ kthread_init_worker(rdi->worker);
task = kthread_create_on_node(
kthread_worker_fn,
rdi->worker,
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
/* blocks future queuing from send_complete() */
rdi->worker = NULL;
smp_wmb(); /* See rdi_cq_enter */
- flush_kthread_worker(worker);
+ kthread_flush_worker(worker);
kthread_stop(worker->task);
kfree(worker);
}
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 33076a5eee2f..f2cefb0d9180 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
- if (offset + size > PAGE_SIZE)
- return BAD_DMA_ADDRESS;
-
addr = (u64)page_address(page);
if (addr)
addr += offset;
@@ -138,6 +135,21 @@ static void rvt_unmap_sg(struct ib_device *dev,
/* This is a stub, nothing to be done here */
}
+static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rvt_map_sg(dev, sgl, nents, direction);
+}
+
+static void rvt_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rvt_unmap_sg(dev, sg, nents, direction);
+}
+
static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir)
{
@@ -177,6 +189,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg,
+ .map_sg_attrs = rvt_map_sg_attrs,
+ .unmap_sg_attrs = rvt_unmap_sg_attrs,
.sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent,
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index ddd59270ff6d..ab6c3c25d7ff 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -358,38 +358,16 @@ static int __init rxe_module_init(void)
/* initialize slab caches for managed objects */
err = rxe_cache_init();
if (err) {
- pr_err("rxe: unable to init object pools\n");
+ pr_err("unable to init object pools\n");
return err;
}
- err = rxe_net_ipv4_init();
- if (err) {
- pr_err("rxe: unable to init ipv4 tunnel\n");
- rxe_cache_exit();
- goto exit;
- }
-
- err = rxe_net_ipv6_init();
- if (err) {
- pr_err("rxe: unable to init ipv6 tunnel\n");
- rxe_cache_exit();
- goto exit;
- }
-
- err = register_netdevice_notifier(&rxe_net_notifier);
- if (err) {
- pr_err("rxe: Failed to rigister netdev notifier\n");
- goto exit;
- }
-
- pr_info("rxe: loaded\n");
+ err = rxe_net_init();
+ if (err)
+ return err;
+ pr_info("loaded\n");
return 0;
-
-exit:
- rxe_release_udp_tunnel(recv_sockets.sk4);
- rxe_release_udp_tunnel(recv_sockets.sk6);
- return err;
}
static void __exit rxe_module_exit(void)
@@ -398,8 +376,8 @@ static void __exit rxe_module_exit(void)
rxe_net_exit();
rxe_cache_exit();
- pr_info("rxe: unloaded\n");
+ pr_info("unloaded\n");
}
-module_init(rxe_module_init);
+late_initcall(rxe_module_init);
module_exit(rxe_module_exit);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 12c71c549f97..a696af81e4a5 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -34,6 +34,11 @@
#ifndef RXE_H
#define RXE_H
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 5c9474212d4e..604f6fee96bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -39,7 +39,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
struct rxe_port *port;
if (attr->port_num != 1) {
- pr_info("rxe: invalid port_num = %d\n", attr->port_num);
+ pr_info("invalid port_num = %d\n", attr->port_num);
return -EINVAL;
}
@@ -47,7 +47,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
if (attr->ah_flags & IB_AH_GRH) {
if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
- pr_info("rxe: invalid sgid index = %d\n",
+ pr_info("invalid sgid index = %d\n",
attr->grh.sgid_index);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 1c59ef2c67aa..6c5e29db88e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -567,7 +567,8 @@ int rxe_completer(void *arg)
state = COMPST_GET_ACK;
while (1) {
- pr_debug("state = %s\n", comp_state_name[state]);
+ pr_debug("qp#%d state = %s\n", qp_num(qp),
+ comp_state_name[state]);
switch (state) {
case COMPST_GET_ACK:
skb = skb_dequeue(&qp->resp_pkts);
@@ -709,7 +710,8 @@ int rxe_completer(void *arg)
qp->comp.rnr_retry--;
qp->req.need_retry = 1;
- pr_debug("set rnr nak timer\n");
+ pr_debug("qp#%d set rnr nak timer\n",
+ qp_num(qp));
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
index 7634c1a81b2b..a0f8af5851ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_dma.c
+++ b/drivers/infiniband/sw/rxe/rxe_dma.c
@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev,
WARN_ON(!valid_dma_direction(direction));
}
+static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ return rxe_map_sg(dev, sgl, nents, direction);
+}
+
+static void rxe_unmap_sg_attrs(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ rxe_unmap_sg(dev, sg, nents, direction);
+}
+
static void rxe_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size, enum dma_data_direction dir)
@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
.unmap_page = rxe_dma_unmap_page,
.map_sg = rxe_map_sg,
.unmap_sg = rxe_unmap_sg,
+ .map_sg_attrs = rxe_map_sg_attrs,
+ .unmap_sg_attrs = rxe_unmap_sg_attrs,
.sync_single_for_cpu = rxe_sync_single_for_cpu,
.sync_single_for_device = rxe_sync_single_for_device,
.alloc_coherent = rxe_dma_alloc_coherent,
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4a5484ef604f..73849a5a91b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
qp->resp.res_head++;
- if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic))
+ if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
qp->resp.res_head = 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 54b3c7c99eff..c572a4c09359 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -126,7 +126,7 @@ found_it:
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret) {
- pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
+ pr_err("err %d from remap_vmalloc_range\n", ret);
goto done;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f3dab6574504..1869152f1d23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -39,7 +39,7 @@
*/
static u8 rxe_get_key(void)
{
- static unsigned key = 1;
+ static u32 key = 1;
key = key << 1;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index eedf2f1cafdf..ffff5a54cb34 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -65,7 +65,7 @@ struct rxe_dev *net_to_rxe(struct net_device *ndev)
return found;
}
-struct rxe_dev *get_rxe_by_name(const char* name)
+struct rxe_dev *get_rxe_by_name(const char *name)
{
struct rxe_dev *rxe;
struct rxe_dev *found = NULL;
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
{
int err;
struct socket *sock;
- struct udp_port_cfg udp_cfg;
- struct udp_tunnel_sock_cfg tnl_cfg;
-
- memset(&udp_cfg, 0, sizeof(udp_cfg));
+ struct udp_port_cfg udp_cfg = {0};
+ struct udp_tunnel_sock_cfg tnl_cfg = {0};
if (ipv6) {
udp_cfg.family = AF_INET6;
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return ERR_PTR(err);
}
- tnl_cfg.sk_user_data = NULL;
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
- tnl_cfg.encap_destroy = NULL;
/* Setup UDP tunnel */
setup_udp_tunnel_sock(net, sock, &tnl_cfg);
@@ -350,14 +346,14 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, struct rxe_av *av)
{
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route4(rxe->ndev, saddr, daddr);
if (!dst) {
@@ -376,12 +372,12 @@ static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
return 0;
}
-static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, struct rxe_av *av)
{
struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route6(rxe->ndev, saddr, daddr);
if (!dst) {
@@ -408,9 +404,9 @@ static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(rxe, skb, av);
+ err = prepare4(rxe, pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(rxe, skb, av);
+ err = prepare6(rxe, pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb);
@@ -601,8 +597,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
- pr_info("rxe: set %s active\n", rxe->ib_dev.name);
- return;
+ pr_info("set %s active\n", rxe->ib_dev.name);
}
/* Caller must hold net_info_lock */
@@ -615,8 +610,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
- pr_info("rxe: set %s down\n", rxe->ib_dev.name);
- return;
+ pr_info("set %s down\n", rxe->ib_dev.name);
}
static int rxe_notify(struct notifier_block *not_blk,
@@ -641,7 +635,7 @@ static int rxe_notify(struct notifier_block *not_blk,
rxe_port_down(rxe);
break;
case NETDEV_CHANGEMTU:
- pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
+ pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
case NETDEV_REBOOT:
@@ -651,7 +645,7 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
default:
- pr_info("rxe: ignoring netdev event = %ld for %s\n",
+ pr_info("ignoring netdev event = %ld for %s\n",
event, ndev->name);
break;
}
@@ -671,7 +665,7 @@ int rxe_net_ipv4_init(void)
htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
recv_sockets.sk4 = NULL;
- pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
+ pr_err("Failed to create IPv4 UDP tunnel\n");
return -1;
}
@@ -688,7 +682,7 @@ int rxe_net_ipv6_init(void)
htons(ROCE_V2_UDP_DPORT), true);
if (IS_ERR(recv_sockets.sk6)) {
recv_sockets.sk6 = NULL;
- pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ pr_err("Failed to create IPv6 UDP tunnel\n");
return -1;
}
#endif
@@ -701,3 +695,26 @@ void rxe_net_exit(void)
rxe_release_udp_tunnel(recv_sockets.sk4);
unregister_netdevice_notifier(&rxe_net_notifier);
}
+
+int rxe_net_init(void)
+{
+ int err;
+
+ recv_sockets.sk6 = NULL;
+
+ err = rxe_net_ipv4_init();
+ if (err)
+ return err;
+ err = rxe_net_ipv6_init();
+ if (err)
+ goto err_out;
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("Failed to register netdev notifier\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ rxe_net_exit();
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 0daf7f09e5b5..1c06b3bfe1b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -49,8 +49,7 @@ void rxe_release_udp_tunnel(struct socket *sk);
struct rxe_dev *rxe_net_add(struct net_device *ndev);
-int rxe_net_ipv4_init(void);
-int rxe_net_ipv6_init(void);
+int rxe_net_init(void);
void rxe_net_exit(void);
#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 22ba24f2a2c1..c3e60e4bde6e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) {
int i;
- for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res);
@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
struct resp_res *res;
if (qp->resp.resources) {
- for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res);
}
@@ -298,8 +298,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
wqe_size = rcv_wqe_size(qp->rq.max_sge);
- pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
- qp->rq.max_wr, qp->rq.max_sge, wqe_size);
+ pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
+ qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
qp->rq.queue = rxe_queue_init(rxe,
&qp->rq.max_wr,
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
+ rxe_queue_reset(qp->sq.queue);
}
/* cleanup attributes */
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
rxe_run_task(&qp->resp.task, 1);
@@ -596,14 +598,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ int max_dest_rd_atomic =
+ __roundup_pow_of_two(attr->max_dest_rd_atomic);
+
+ qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
+
free_rd_atomic_resources(qp);
- err = alloc_rd_atomic_resources(qp, max_rd_atomic);
+ err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
if (err)
return err;
-
- qp->attr.max_rd_atomic = max_rd_atomic;
- atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_CUR_STATE)
@@ -673,24 +682,27 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_RETRY_CNT) {
qp->attr.retry_cnt = attr->retry_cnt;
qp->comp.retry_cnt = attr->retry_cnt;
- pr_debug("set retry count = %d\n", attr->retry_cnt);
+ pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
+ attr->retry_cnt);
}
if (mask & IB_QP_RNR_RETRY) {
qp->attr.rnr_retry = attr->rnr_retry;
qp->comp.rnr_retry = attr->rnr_retry;
- pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
+ pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
+ attr->rnr_retry);
}
if (mask & IB_QP_RQ_PSN) {
qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
qp->resp.psn = qp->attr.rq_psn;
- pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
+ pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
+ qp->resp.psn);
}
if (mask & IB_QP_MIN_RNR_TIMER) {
qp->attr.min_rnr_timer = attr->min_rnr_timer;
- pr_debug("set min rnr timer = 0x%x\n",
+ pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
attr->min_rnr_timer);
}
@@ -698,12 +710,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
qp->req.psn = qp->attr.sq_psn;
qp->comp.psn = qp->attr.sq_psn;
- pr_debug("set req psn = 0x%x\n", qp->req.psn);
- }
-
- if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- qp->attr.max_dest_rd_atomic =
- __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
}
if (mask & IB_QP_PATH_MIG_STATE)
@@ -717,38 +724,38 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
switch (attr->qp_state) {
case IB_QPS_RESET:
- pr_debug("qp state -> RESET\n");
+ pr_debug("qp#%d state -> RESET\n", qp_num(qp));
rxe_qp_reset(qp);
break;
case IB_QPS_INIT:
- pr_debug("qp state -> INIT\n");
+ pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
- pr_debug("qp state -> RTR\n");
+ pr_debug("qp#%d state -> RTR\n", qp_num(qp));
qp->resp.state = QP_STATE_READY;
break;
case IB_QPS_RTS:
- pr_debug("qp state -> RTS\n");
+ pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
- pr_debug("qp state -> SQD\n");
+ pr_debug("qp#%d state -> SQD\n", qp_num(qp));
rxe_qp_drain(qp);
break;
case IB_QPS_SQE:
- pr_warn("qp state -> SQE !!?\n");
+ pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
/* Not possible from modify_qp. */
break;
case IB_QPS_ERR:
- pr_debug("qp state -> ERR\n");
+ pr_debug("qp#%d state -> ERR\n", qp_num(qp));
rxe_qp_error(qp);
break;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 08274254eb88..d14bf496d62d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -84,6 +84,15 @@ err1:
return -EINVAL;
}
+inline void rxe_queue_reset(struct rxe_queue *q)
+{
+ /* queue is comprised from header and the memory
+ * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
+ * reset only the queue itself and not the management header
+ */
+ memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
+}
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 239fd609c31e..8c8641c87817 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
size_t buf_size,
struct rxe_mmap_info **ip_p);
+void rxe_queue_reset(struct rxe_queue *q);
+
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
int *num_elem,
unsigned int elem_size);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 144d2f129fcd..46f062842a9a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -387,7 +387,8 @@ int rxe_rcv(struct sk_buff *skb)
pack_icrc = be32_to_cpu(*icrcp);
calc_icrc = rxe_icrc_hdr(pkt, skb);
- calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
+ calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
+ payload_size(pkt));
calc_icrc = cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
char saddr[sizeof(struct in6_addr)];
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 13a848a518e8..22bd9630dcd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -38,7 +38,7 @@
#include "rxe_queue.h"
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- unsigned opcode);
+ u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
@@ -121,7 +121,7 @@ void rnr_nak_timer(unsigned long data)
{
struct rxe_qp *qp = (struct rxe_qp *)data;
- pr_debug("rnr nak timer fired\n");
+ pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
rxe_run_task(&qp->req.task, 1);
}
@@ -187,7 +187,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
return wqe;
}
-static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -259,7 +259,7 @@ static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
return -EINVAL;
}
-static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
+static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -311,7 +311,7 @@ static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
}
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- unsigned opcode)
+ u32 opcode)
{
int fits = (wqe->dma.resid <= qp->mtu);
@@ -588,7 +588,7 @@ int rxe_requester(void *arg)
struct rxe_pkt_info pkt;
struct sk_buff *skb;
struct rxe_send_wqe *wqe;
- unsigned mask;
+ enum rxe_hdr_mask mask;
int payload;
int mtu;
int opcode;
@@ -626,7 +626,8 @@ next_wqe:
rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8);
if (!rmr) {
- pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey);
+ pr_err("No mr for key %#x\n",
+ wqe->wr.ex.invalidate_rkey);
wqe->state = wqe_state_error;
wqe->status = IB_WC_MW_BIND_ERR;
goto exit;
@@ -695,19 +696,20 @@ next_wqe:
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- goto complete;
+ __rxe_do_task(&qp->comp.task);
+ return 0;
}
payload = mtu;
}
skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) {
- pr_err("Failed allocating skb\n");
+ pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
goto err;
}
if (fill_packet(qp, wqe, &pkt, skb, payload)) {
- pr_debug("Error during fill packet\n");
+ pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
goto err;
}
@@ -744,13 +746,17 @@ err:
wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
-complete:
- if (qp_type(qp) != IB_QPT_RC) {
- while (rxe_completer(qp) == 0)
- ;
- }
-
- return 0;
+ /*
+ * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
+ * ---------8<---------8<-------------
+ * ...Note that if a completion error occurs, a Work Completion
+ * will always be generated, even if the signaling
+ * indicator requests an Unsignaled Completion.
+ * ---------8<---------8<-------------
+ */
+ wqe->wr.send_flags |= IB_SEND_SIGNALED;
+ __rxe_do_task(&qp->comp.task);
+ return -EAGAIN;
exit:
return -EAGAIN;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3e0f0f2baace..dd3d88adc003 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
* too many read/atomic ops, we just
* recycle the responder resource queue
*/
- if (likely(qp->attr.max_rd_atomic > 0))
+ if (likely(qp->attr.max_dest_rd_atomic > 0))
return RESPST_CHK_LENGTH;
else
return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
@@ -749,6 +749,18 @@ static enum resp_states read_reply(struct rxe_qp *qp,
return state;
}
+static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
+ struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ memset(hdr, 0, sizeof(*hdr));
+ if (skb->protocol == htons(ETH_P_IP))
+ memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
+}
+
/* Executes a new request. A retried request never reach that function (send
* and writes are discarded, and reads and atomics are retried elsewhere.
*/
@@ -761,13 +773,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
union rdma_network_hdr hdr;
- struct sk_buff *skb = PKT_TO_SKB(pkt);
- memset(&hdr, 0, sizeof(hdr));
- if (skb->protocol == htons(ETH_P_IP))
- memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
- else if (skb->protocol == htons(ETH_P_IPV6))
- memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
+ build_rdma_network_hdr(&hdr, pkt);
err = send_data_in(qp, &hdr, sizeof(hdr));
if (err)
@@ -881,7 +888,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
rmr = rxe_pool_get_index(&rxe->mr_pool,
wc->ex.invalidate_rkey >> 8);
if (unlikely(!rmr)) {
- pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
+ pr_err("Bad rkey %#x invalidation\n",
+ wc->ex.invalidate_rkey);
return RESPST_ERROR;
}
rmr->state = RXE_MEM_STATE_FREE;
@@ -1208,7 +1216,8 @@ int rxe_responder(void *arg)
}
while (1) {
- pr_debug("state = %s\n", resp_state_name[state]);
+ pr_debug("qp#%d state = %s\n", qp_num(qp),
+ resp_state_name[state]);
switch (state) {
case RESPST_GET_REQ:
state = get_req(qp, &pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index cf8e77800046..d5ed7571128f 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -79,7 +79,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
- pr_err("rxe: add: invalid interface name\n");
+ pr_err("add: invalid interface name\n");
err = -EINVAL;
goto err;
}
@@ -92,20 +92,20 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
}
if (net_to_rxe(ndev)) {
- pr_err("rxe: already configured on %s\n", intf);
+ pr_err("already configured on %s\n", intf);
err = -EINVAL;
goto err;
}
rxe = rxe_net_add(ndev);
if (!rxe) {
- pr_err("rxe: failed to add %s\n", intf);
+ pr_err("failed to add %s\n", intf);
err = -EINVAL;
goto err;
}
rxe_set_port_state(ndev);
- pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf);
+ pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
err:
if (ndev)
dev_put(ndev);
@@ -120,7 +120,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
- pr_err("rxe: add: invalid interface name\n");
+ pr_err("add: invalid interface name\n");
return -EINVAL;
}
@@ -133,7 +133,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
rxe = get_rxe_by_name(intf);
if (!rxe) {
- pr_err("rxe: not configured on %s\n", intf);
+ pr_err("not configured on %s\n", intf);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 4552be960c6a..19841c863daf 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -100,10 +100,12 @@ static int rxe_query_port(struct ib_device *dev,
rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
speed = cmd.speed;
} else {
- pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
+ pr_warn("%s speed is unknown, defaulting to 1000\n",
+ rxe->ndev->name);
speed = 1000;
}
- rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
+ rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
+ &attr->active_width);
mutex_unlock(&rxe->usdev_lock);
return 0;
@@ -761,7 +763,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
}
static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
- unsigned mask, u32 length)
+ unsigned int mask, u32 length)
{
int err;
struct rxe_sq *sq = &qp->sq;
@@ -801,26 +803,15 @@ err1:
return err;
}
-static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
{
int err = 0;
- struct rxe_qp *qp = to_rqp(ibqp);
unsigned int mask;
unsigned int length = 0;
int i;
int must_sched;
- if (unlikely(!qp->valid)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
- if (unlikely(qp->req.state < QP_STATE_READY)) {
- *bad_wr = wr;
- return -EINVAL;
- }
-
while (wr) {
mask = wr_opcode_mask(wr->opcode, qp);
if (unlikely(!mask)) {
@@ -861,6 +852,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err;
}
+static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ if (unlikely(!qp->valid)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (unlikely(qp->req.state < QP_STATE_READY)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (qp->is_user) {
+ /* Utilize process context to do protocol processing */
+ rxe_run_task(&qp->req.task, 0);
+ return 0;
+ } else
+ return rxe_post_send_kernel(qp, wr, bad_wr);
+}
+
static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
@@ -1133,8 +1147,8 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
- unsigned int *sg_offset)
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
{
struct rxe_mem *mr = to_rmr(ibmr);
int n;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 9dbfcc0ab577..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
@@ -772,7 +780,13 @@ static inline void ipoib_unregister_debugfs(void) { }
#define ipoib_printk(level, priv, format, arg...) \
printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
#define ipoib_warn(priv, format, arg...) \
- ipoib_printk(KERN_WARNING, priv, format , ## arg)
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ 10 * HZ /*10 seconds */, \
+ 100); \
+ if (__ratelimit(&_rs)) \
+ ipoib_printk(KERN_WARNING, priv, format , ## arg);\
+} while (0)
extern int ipoib_sendq_size;
extern int ipoib_recvq_size;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cc1c1b062ea5..b58d9dca5c93 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -964,7 +967,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1086,11 +1095,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
@@ -2196,7 +2208,8 @@ static int __init ipoib_init_module(void)
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+ ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush",
+ WQ_MEM_RECLAIM);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c55ecb2c3736..189dcd1709d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -147,7 +147,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
int ret, size;
int i;
- priv->pd = ib_alloc_pd(priv->ca);
+ priv->pd = ib_alloc_pd(priv->ca, 0);
if (IS_ERR(priv->pd)) {
printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
return -ENODEV;
@@ -157,7 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
* the various IPoIB tasks assume they will never race against
* themselves, so always use a single thread workqueue
*/
- priv->wq = create_singlethread_workqueue("ipoib_wq");
+ priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
if (!priv->wq) {
printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
goto out_free_pd;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0351059783b1..0be6a7c5ddb5 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -374,7 +374,6 @@ struct iser_reg_ops {
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_mr *mr;
struct ib_event_handler event_handler;
struct list_head ig_list;
int refcount;
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 90be56893414..9c3e9ab53a41 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -199,7 +199,11 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
* FIXME: rework the registration code path to differentiate
* rkey/lkey use cases
*/
- reg->rkey = device->mr ? device->mr->rkey : 0;
+
+ if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
+ reg->rkey = device->pd->unsafe_global_rkey;
+ else
+ reg->rkey = 0;
reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 1b4945367e4f..a4b791dfaa1d 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -88,7 +88,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
device->comps_used, ib_dev->name,
ib_dev->num_comp_vectors, max_cqe);
- device->pd = ib_alloc_pd(ib_dev);
+ device->pd = ib_alloc_pd(ib_dev,
+ iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(device->pd))
goto pd_err;
@@ -103,26 +104,13 @@ static int iser_create_device_ib_res(struct iser_device *device)
}
}
- if (!iser_always_reg) {
- int access = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ;
-
- device->mr = ib_get_dma_mr(device->pd, access);
- if (IS_ERR(device->mr))
- goto cq_err;
- }
-
INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
iser_event_handler);
if (ib_register_event_handler(&device->event_handler))
- goto handler_err;
+ goto cq_err;
return 0;
-handler_err:
- if (device->mr)
- ib_dereg_mr(device->mr);
cq_err:
for (i = 0; i < device->comps_used; i++) {
struct iser_comp *comp = &device->comps[i];
@@ -154,14 +142,10 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
(void)ib_unregister_event_handler(&device->event_handler);
- if (device->mr)
- (void)ib_dereg_mr(device->mr);
ib_dealloc_pd(device->pd);
kfree(device->comps);
device->comps = NULL;
-
- device->mr = NULL;
device->pd = NULL;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index cae9bbcc27e7..6dd43f63238e 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -309,7 +309,7 @@ isert_create_device_ib_res(struct isert_device *device)
if (ret)
goto out;
- device->pd = ib_alloc_pd(ib_dev);
+ device->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR(device->pd)) {
ret = PTR_ERR(device->pd);
isert_err("failed to allocate pd, device %p, ret=%d\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 3322ed750172..d980fb458ad4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1262,6 +1262,7 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_pd *pd = target->pd;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
@@ -1273,9 +1274,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
if (state->npages == 0)
return 0;
- if (state->npages == 1 && target->global_mr) {
+ if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_mr->rkey);
+ pd->unsafe_global_rkey);
goto reset_state;
}
@@ -1315,6 +1316,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ struct ib_pd *pd = target->pd;
struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
@@ -1326,12 +1328,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && target->global_mr) {
+ if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
- target->global_mr->rkey);
+ pd->unsafe_global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
@@ -1386,7 +1388,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_rdma_ch *ch,
- struct scatterlist *sg, int sg_index)
+ struct scatterlist *sg)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1400,7 +1402,9 @@ static int srp_map_sg_entry(struct srp_map_state *state,
while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask;
- if (state->npages == dev->max_pages_per_mr || offset != 0) {
+
+ if (state->npages == dev->max_pages_per_mr ||
+ (state->npages > 0 && offset != 0)) {
ret = srp_map_finish_fmr(state, ch);
if (ret)
return ret;
@@ -1417,12 +1421,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
}
/*
- * If the last entry of the MR wasn't a full page, then we need to
+ * If the end of the MR is not on a page boundary then we need to
* close it out and start a new one -- we can only merge at page
* boundaries.
*/
ret = 0;
- if (len != dev->mr_page_size)
+ if ((dma_addr & ~dev->mr_page_mask) != 0)
ret = srp_map_finish_fmr(state, ch);
return ret;
}
@@ -1439,7 +1443,7 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg, i);
+ ret = srp_map_sg_entry(state, ch, sg);
if (ret)
return ret;
}
@@ -1491,7 +1495,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
- target->global_mr->rkey);
+ target->pd->unsafe_global_rkey);
}
return 0;
@@ -1591,6 +1595,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
+ struct ib_pd *pd = target->pd;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count, ret;
@@ -1626,7 +1631,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && target->global_mr) {
+ if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1636,7 +1641,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(target->global_mr->rkey);
+ buf->key = cpu_to_be32(pd->unsafe_global_rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1709,14 +1714,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
- if (!target->global_mr) {
+ if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
- idb_rkey = cpu_to_be32(target->global_mr->rkey);
+ idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -3268,8 +3273,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
+ target->pd = host->srp_dev->pd;
target->lkey = host->srp_dev->pd->local_dma_lkey;
- target->global_mr = host->srp_dev->global_mr;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3524,6 +3529,7 @@ static void srp_add_one(struct ib_device *device)
struct srp_host *host;
int mr_page_shift, p;
u64 max_pages_per_mr;
+ unsigned int flags = 0;
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
@@ -3558,6 +3564,10 @@ static void srp_add_one(struct ib_device *device)
srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
}
+ if (never_register || !register_always ||
+ (!srp_dev->has_fmr && !srp_dev->has_fr))
+ flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
+
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
@@ -3573,19 +3583,10 @@ static void srp_add_one(struct ib_device *device)
INIT_LIST_HEAD(&srp_dev->dev_list);
srp_dev->dev = device;
- srp_dev->pd = ib_alloc_pd(device);
+ srp_dev->pd = ib_alloc_pd(device, flags);
if (IS_ERR(srp_dev->pd))
goto free_dev;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr)) {
- srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(srp_dev->global_mr))
- goto err_pd;
- }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
@@ -3596,9 +3597,6 @@ static void srp_add_one(struct ib_device *device)
ib_set_client_data(device, &srp_client, srp_dev);
return;
-err_pd:
- ib_dealloc_pd(srp_dev->pd);
-
free_dev:
kfree(srp_dev);
}
@@ -3638,8 +3636,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
kfree(host);
}
- if (srp_dev->global_mr)
- ib_dereg_mr(srp_dev->global_mr);
ib_dealloc_pd(srp_dev->pd);
kfree(srp_dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 26bb9b0a7a63..21c69695f9d4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,7 +90,6 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *global_mr;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
@@ -179,7 +178,7 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
- struct ib_mr *global_mr;
+ struct ib_pd *pd;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 883bbfe08e0e..0b1f69ed2e92 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2480,7 +2480,7 @@ static void srpt_add_one(struct ib_device *device)
init_waitqueue_head(&sdev->ch_releaseQ);
mutex_init(&sdev->mutex);
- sdev->pd = ib_alloc_pd(device);
+ sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
goto free_dev;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 936f07a4e35f..6d7de9bfed9a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
6-byte ALPS packet */
#define ALPS_STICK_BITS 0x100 /* separate stick button bits */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
+#define ALPS_DUALPOINT_WITH_PRESSURE 0x400 /* device can report trackpoint pressure */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */
@@ -1156,15 +1157,28 @@ static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
unsigned char pkt_id = SS4_PACKET_ID_IDLE;
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) {
- pkt_id = SS4_PACKET_ID_IDLE;
- } else if (!(byte[3] & 0x10)) {
- pkt_id = SS4_PACKET_ID_ONE;
- } else if (!(byte[3] & 0x20)) {
+ switch (byte[3] & 0x30) {
+ case 0x00:
+ if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
+ (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
+ byte[5] == 0x00) {
+ pkt_id = SS4_PACKET_ID_IDLE;
+ } else {
+ pkt_id = SS4_PACKET_ID_ONE;
+ }
+ break;
+ case 0x10:
+ /* two-finger finger positions */
pkt_id = SS4_PACKET_ID_TWO;
- } else {
+ break;
+ case 0x20:
+ /* stick pointer */
+ pkt_id = SS4_PACKET_ID_STICK;
+ break;
+ case 0x30:
+ /* third and fourth finger positions */
pkt_id = SS4_PACKET_ID_MULTI;
+ break;
}
return pkt_id;
@@ -1185,7 +1199,13 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
f->mt[0].x = SS4_1F_X_V2(p);
f->mt[0].y = SS4_1F_Y_V2(p);
f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
- f->fingers = 1;
+ /*
+ * When a button is held the device will give us events
+ * with x, y, and pressure of 0. This causes annoying jumps
+ * if a touch is released while the button is held.
+ * Handle this by claiming zero contacts.
+ */
+ f->fingers = f->pressure > 0 ? 1 : 0;
f->first_mp = 0;
f->is_mp = 0;
break;
@@ -1246,16 +1266,40 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
}
break;
+ case SS4_PACKET_ID_STICK:
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ } else {
+ int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
+ int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
+ int pressure = (s8)(p[4] & 0x7f);
+
+ input_report_rel(priv->dev2, REL_X, x);
+ input_report_rel(priv->dev2, REL_Y, -y);
+ input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
+ }
+ break;
+
case SS4_PACKET_ID_IDLE:
default:
memset(f, 0, sizeof(struct alps_fields));
break;
}
- f->left = !!(SS4_BTN_V2(p) & 0x01);
- if (!(priv->flags & ALPS_BUTTONPAD)) {
- f->right = !!(SS4_BTN_V2(p) & 0x02);
- f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ /* handle buttons */
+ if (pkt_id == SS4_PACKET_ID_STICK) {
+ f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+ f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
+ } else {
+ f->left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->right = !!(SS4_BTN_V2(p) & 0x02);
+ f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
}
return 0;
@@ -1266,6 +1310,7 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
struct alps_fields *f = &priv->f;
memset(f, 0, sizeof(struct alps_fields));
@@ -1311,6 +1356,13 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
+
+ if (priv->flags & ALPS_DUALPOINT) {
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+ input_sync(dev2);
+ }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
@@ -2695,6 +2747,10 @@ static int alps_set_protocol(struct psmouse *psmouse,
if (alps_set_defaults_ss4_v2(psmouse, priv))
return -EIO;
+ if (priv->fw_ver[1] == 0x1)
+ priv->flags |= ALPS_DUALPOINT |
+ ALPS_DUALPOINT_WITH_PRESSURE;
+
break;
}
@@ -2767,6 +2823,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
e7[2] == 0x14 && ec[1] == 0x02) {
protocol = &alps_v8_protocol_data;
+ } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
+ e7[2] == 0x28 && ec[1] == 0x01) {
+ protocol = &alps_v8_protocol_data;
} else {
psmouse_dbg(psmouse,
"Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
@@ -2949,6 +3008,10 @@ int alps_init(struct psmouse *psmouse)
input_set_capability(dev2, EV_REL, REL_X);
input_set_capability(dev2, EV_REL, REL_Y);
+ if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) {
+ input_set_capability(dev2, EV_ABS, ABS_PRESSURE);
+ input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0);
+ }
input_set_capability(dev2, EV_KEY, BTN_LEFT);
input_set_capability(dev2, EV_KEY, BTN_RIGHT);
input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index d37f814dc447..b9417e2d7ad3 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -37,12 +37,14 @@
* or there's button activities.
* SS4_PACKET_ID_TWO: There's two or more fingers on touchpad
* SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad
+ * SS4_PACKET_ID_STICK: A stick pointer packet
*/
enum SS4_PACKET_ID {
SS4_PACKET_ID_IDLE = 0,
SS4_PACKET_ID_ONE,
SS4_PACKET_ID_TWO,
SS4_PACKET_ID_MULTI,
+ SS4_PACKET_ID_STICK,
};
#define SS4_COUNT_PER_ELECTRODE 256
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 08e252a42480..db7d1d666ac1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1134,7 +1134,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* System76 Pangolin 0x250f01 ? 2 hw buttons
* (*) + 3 trackpoint buttons
* (**) + 0 trackpoint buttons
- * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
+ * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps
*/
static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
{
@@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu H760 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ },
+ },
#endif
{ }
};
@@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ /* Fujitsu H760 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
{
@@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
index 54eceb30ede5..a7d39689bbfb 100644
--- a/drivers/input/mouse/focaltech.c
+++ b/drivers/input/mouse/focaltech.c
@@ -43,7 +43,7 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties)
if (set_properties) {
psmouse->vendor = "FocalTech";
- psmouse->name = "FocalTech Touchpad";
+ psmouse->name = "Touchpad";
}
return 0;
@@ -146,8 +146,8 @@ static void focaltech_report_state(struct psmouse *psmouse)
}
input_mt_report_pointer_emulation(dev, true);
- input_report_key(psmouse->dev, BTN_LEFT, state->pressed);
- input_sync(psmouse->dev);
+ input_report_key(dev, BTN_LEFT, state->pressed);
+ input_sync(dev);
}
static void focaltech_process_touch_packet(struct psmouse *psmouse,
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index f73df2495fed..4c8a55857e00 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -61,3 +61,14 @@ config RMI4_F30
Function 30 provides GPIO and LED support for RMI4 devices. This
includes support for buttons on TouchPads and ClickPads.
+
+config RMI4_F54
+ bool "RMI4 Function 54 (Analog diagnostics)"
+ depends on RMI4_CORE
+ depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
+ select VIDEOBUF2_VMALLOC
+ help
+ Say Y here if you want to add support for RMI4 function 54
+
+ Function 54 provides access to various diagnostic features in certain
+ RMI4 touch sensors.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 95c00a783992..0bafc8502c4b 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -7,6 +7,7 @@ rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
# Transports
obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index a73580654c6b..ef8c747c35e7 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/rmi.h>
@@ -312,6 +311,9 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F30
&rmi_f30_handler,
#endif
+#ifdef CONFIG_RMI4_F54
+ &rmi_f54_handler,
+#endif
};
static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index c83bce89028b..4a88312fbd25 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,7 +17,6 @@
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/kconfig.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 6e140fa3cce1..8dfbebe9bf86 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -102,4 +102,5 @@ extern struct rmi_function_handler rmi_f01_handler;
extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f54_handler;
#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index fac81fc9bcf6..b5d2dfc23bad 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index 20c7134b3d3b..f798f427a46f 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -12,7 +12,6 @@
#include <linux/device.h>
#include <linux/input.h>
#include <linux/input/mt.h>
-#include <linux/kconfig.h>
#include <linux/rmi.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
new file mode 100644
index 000000000000..cf805b960866
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2012-2015 Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+#include "rmi_driver.h"
+
+#define F54_NAME "rmi4_f54"
+
+/* F54 data offsets */
+#define F54_REPORT_DATA_OFFSET 3
+#define F54_FIFO_OFFSET 1
+#define F54_NUM_TX_OFFSET 1
+#define F54_NUM_RX_OFFSET 0
+
+/* F54 commands */
+#define F54_GET_REPORT 1
+#define F54_FORCE_CAL 2
+
+/* Fixed sizes of reports */
+#define F54_QUERY_LEN 27
+
+/* F54 capabilities */
+#define F54_CAP_BASELINE (1 << 2)
+#define F54_CAP_IMAGE8 (1 << 3)
+#define F54_CAP_IMAGE16 (1 << 6)
+
+/**
+ * enum rmi_f54_report_type - RMI4 F54 report types
+ *
+ * @F54_8BIT_IMAGE: Normalized 8-Bit Image Report. The capacitance variance
+ * from baseline for each pixel.
+ *
+ * @F54_16BIT_IMAGE: Normalized 16-Bit Image Report. The capacitance variance
+ * from baseline for each pixel.
+ *
+ * @F54_RAW_16BIT_IMAGE:
+ * Raw 16-Bit Image Report. The raw capacitance for each
+ * pixel.
+ *
+ * @F54_TRUE_BASELINE: True Baseline Report. The baseline capacitance for each
+ * pixel.
+ *
+ * @F54_FULL_RAW_CAP: Full Raw Capacitance Report. The raw capacitance with
+ * low reference set to its minimum value and high
+ * reference set to its maximum value.
+ *
+ * @F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+ * Full Raw Capacitance with Receiver Offset Removed
+ * Report. Set Low reference to its minimum value and high
+ * references to its maximum value, then report the raw
+ * capacitance for each pixel.
+ */
+enum rmi_f54_report_type {
+ F54_REPORT_NONE = 0,
+ F54_8BIT_IMAGE = 1,
+ F54_16BIT_IMAGE = 2,
+ F54_RAW_16BIT_IMAGE = 3,
+ F54_TRUE_BASELINE = 9,
+ F54_FULL_RAW_CAP = 19,
+ F54_FULL_RAW_CAP_RX_OFFSET_REMOVED = 20,
+ F54_MAX_REPORT_TYPE,
+};
+
+const char *rmi_f54_report_type_names[] = {
+ [F54_REPORT_NONE] = "Unknown",
+ [F54_8BIT_IMAGE] = "Normalized 8-Bit Image",
+ [F54_16BIT_IMAGE] = "Normalized 16-Bit Image",
+ [F54_RAW_16BIT_IMAGE] = "Raw 16-Bit Image",
+ [F54_TRUE_BASELINE] = "True Baseline",
+ [F54_FULL_RAW_CAP] = "Full Raw Capacitance",
+ [F54_FULL_RAW_CAP_RX_OFFSET_REMOVED]
+ = "Full Raw Capacitance RX Offset Removed",
+};
+
+struct rmi_f54_reports {
+ int start;
+ int size;
+};
+
+struct f54_data {
+ struct rmi_function *fn;
+
+ u8 qry[F54_QUERY_LEN];
+ u8 num_rx_electrodes;
+ u8 num_tx_electrodes;
+ u8 capabilities;
+ u16 clock_rate;
+ u8 family;
+
+ enum rmi_f54_report_type report_type;
+ u8 *report_data;
+ int report_size;
+ struct rmi_f54_reports standard_report[2];
+
+ bool is_busy;
+ struct mutex status_mutex;
+ struct mutex data_mutex;
+
+ struct workqueue_struct *workqueue;
+ struct delayed_work work;
+ unsigned long timeout;
+
+ struct completion cmd_done;
+
+ /* V4L2 support */
+ struct v4l2_device v4l2;
+ struct v4l2_pix_format format;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct mutex lock;
+ int input;
+ enum rmi_f54_report_type inputs[F54_MAX_REPORT_TYPE];
+};
+
+/*
+ * Basic checks on report_type to ensure we write a valid type
+ * to the sensor.
+ */
+static bool is_f54_report_type_valid(struct f54_data *f54,
+ enum rmi_f54_report_type reptype)
+{
+ switch (reptype) {
+ case F54_8BIT_IMAGE:
+ return f54->capabilities & F54_CAP_IMAGE8;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ return f54->capabilities & F54_CAP_IMAGE16;
+ case F54_TRUE_BASELINE:
+ return f54->capabilities & F54_CAP_IMAGE16;
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum rmi_f54_report_type rmi_f54_get_reptype(struct f54_data *f54,
+ unsigned int i)
+{
+ if (i >= F54_MAX_REPORT_TYPE)
+ return F54_REPORT_NONE;
+
+ return f54->inputs[i];
+}
+
+static void rmi_f54_create_input_map(struct f54_data *f54)
+{
+ int i = 0;
+ enum rmi_f54_report_type reptype;
+
+ for (reptype = 1; reptype < F54_MAX_REPORT_TYPE; reptype++) {
+ if (!is_f54_report_type_valid(f54, reptype))
+ continue;
+
+ f54->inputs[i++] = reptype;
+ }
+
+ /* Remaining values are zero via kzalloc */
+}
+
+static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
+{
+ struct f54_data *f54 = dev_get_drvdata(&fn->dev);
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+
+ /* Write Report Type into F54_AD_Data0 */
+ if (f54->report_type != report_type) {
+ error = rmi_write(rmi_dev, f54->fn->fd.data_base_addr,
+ report_type);
+ if (error)
+ return error;
+ f54->report_type = report_type;
+ }
+
+ /*
+ * Small delay after disabling interrupts to avoid race condition
+ * in firmare. This value is a bit higher than absolutely necessary.
+ * Should be removed once issue is resolved in firmware.
+ */
+ usleep_range(2000, 3000);
+
+ mutex_lock(&f54->data_mutex);
+
+ error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
+ if (error < 0)
+ return error;
+
+ init_completion(&f54->cmd_done);
+
+ f54->is_busy = 1;
+ f54->timeout = jiffies + msecs_to_jiffies(100);
+
+ queue_delayed_work(f54->workqueue, &f54->work, 0);
+
+ mutex_unlock(&f54->data_mutex);
+
+ return 0;
+}
+
+static size_t rmi_f54_get_report_size(struct f54_data *f54)
+{
+ u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
+ u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+ size_t size;
+
+ switch (rmi_f54_get_reptype(f54, f54->input)) {
+ case F54_8BIT_IMAGE:
+ size = rx * tx;
+ break;
+ case F54_16BIT_IMAGE:
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+ size = sizeof(u16) * rx * tx;
+ break;
+ default:
+ size = 0;
+ }
+
+ return size;
+}
+
+static int rmi_f54_get_pixel_fmt(enum rmi_f54_report_type reptype, u32 *pixfmt)
+{
+ int ret = 0;
+
+ switch (reptype) {
+ case F54_8BIT_IMAGE:
+ *pixfmt = V4L2_TCH_FMT_DELTA_TD08;
+ break;
+
+ case F54_16BIT_IMAGE:
+ *pixfmt = V4L2_TCH_FMT_DELTA_TD16;
+ break;
+
+ case F54_RAW_16BIT_IMAGE:
+ case F54_TRUE_BASELINE:
+ case F54_FULL_RAW_CAP:
+ case F54_FULL_RAW_CAP_RX_OFFSET_REMOVED:
+ *pixfmt = V4L2_TCH_FMT_TU16;
+ break;
+
+ case F54_REPORT_NONE:
+ case F54_MAX_REPORT_TYPE:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rmi_f54_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct f54_data *f54 = q->drv_priv;
+
+ if (*nplanes)
+ return sizes[0] < rmi_f54_get_report_size(f54) ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = rmi_f54_get_report_size(f54);
+
+ return 0;
+}
+
+static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
+{
+ struct f54_data *f54 = vb2_get_drv_priv(vb->vb2_queue);
+ u16 *ptr;
+ enum vb2_buffer_state state;
+ enum rmi_f54_report_type reptype;
+ int ret;
+
+ mutex_lock(&f54->status_mutex);
+
+ reptype = rmi_f54_get_reptype(f54, f54->input);
+ if (reptype == F54_REPORT_NONE) {
+ state = VB2_BUF_STATE_ERROR;
+ goto done;
+ }
+
+ if (f54->is_busy) {
+ state = VB2_BUF_STATE_ERROR;
+ goto done;
+ }
+
+ ret = rmi_f54_request_report(f54->fn, reptype);
+ if (ret) {
+ dev_err(&f54->fn->dev, "Error requesting F54 report\n");
+ state = VB2_BUF_STATE_ERROR;
+ goto done;
+ }
+
+ /* get frame data */
+ mutex_lock(&f54->data_mutex);
+
+ while (f54->is_busy) {
+ mutex_unlock(&f54->data_mutex);
+ if (!wait_for_completion_timeout(&f54->cmd_done,
+ msecs_to_jiffies(1000))) {
+ dev_err(&f54->fn->dev, "Timed out\n");
+ state = VB2_BUF_STATE_ERROR;
+ goto done;
+ }
+ mutex_lock(&f54->data_mutex);
+ }
+
+ ptr = vb2_plane_vaddr(vb, 0);
+ if (!ptr) {
+ dev_err(&f54->fn->dev, "Error acquiring frame ptr\n");
+ state = VB2_BUF_STATE_ERROR;
+ goto data_done;
+ }
+
+ memcpy(ptr, f54->report_data, f54->report_size);
+ vb2_set_plane_payload(vb, 0, rmi_f54_get_report_size(f54));
+ state = VB2_BUF_STATE_DONE;
+
+data_done:
+ mutex_unlock(&f54->data_mutex);
+done:
+ vb2_buffer_done(vb, state);
+ mutex_unlock(&f54->status_mutex);
+}
+
+/* V4L2 structures */
+static const struct vb2_ops rmi_f54_queue_ops = {
+ .queue_setup = rmi_f54_queue_setup,
+ .buf_queue = rmi_f54_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct vb2_queue rmi_f54_queue = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
+ .buf_struct_size = sizeof(struct vb2_buffer),
+ .ops = &rmi_f54_queue_ops,
+ .mem_ops = &vb2_vmalloc_memops,
+ .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
+ .min_buffers_needed = 1,
+};
+
+static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct f54_data *f54 = video_drvdata(file);
+
+ strlcpy(cap->driver, F54_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, SYNAPTICS_INPUT_DEVICE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "rmi4:%s", dev_name(&f54->fn->dev));
+
+ return 0;
+}
+
+static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct f54_data *f54 = video_drvdata(file);
+ enum rmi_f54_report_type reptype;
+
+ reptype = rmi_f54_get_reptype(f54, i->index);
+ if (reptype == F54_REPORT_NONE)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_TOUCH;
+
+ strlcpy(i->name, rmi_f54_report_type_names[reptype], sizeof(i->name));
+ return 0;
+}
+
+static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
+{
+ struct v4l2_pix_format *f = &f54->format;
+ enum rmi_f54_report_type reptype;
+ int ret;
+
+ reptype = rmi_f54_get_reptype(f54, i);
+ if (reptype == F54_REPORT_NONE)
+ return -EINVAL;
+
+ ret = rmi_f54_get_pixel_fmt(reptype, &f->pixelformat);
+ if (ret)
+ return ret;
+
+ f54->input = i;
+
+ f->width = f54->num_rx_electrodes;
+ f->height = f54->num_tx_electrodes;
+ f->field = V4L2_FIELD_NONE;
+ f->colorspace = V4L2_COLORSPACE_RAW;
+ f->bytesperline = f->width * sizeof(u16);
+ f->sizeimage = f->width * f->height * sizeof(u16);
+
+ return 0;
+}
+
+static int rmi_f54_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return rmi_f54_set_input(video_drvdata(file), i);
+}
+
+static int rmi_f54_vidioc_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct f54_data *f54 = video_drvdata(file);
+
+ *i = f54->input;
+
+ return 0;
+}
+
+static int rmi_f54_vidioc_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct f54_data *f54 = video_drvdata(file);
+
+ f->fmt.pix = f54->format;
+
+ return 0;
+}
+
+static int rmi_f54_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (fmt->index) {
+ case 0:
+ fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+ break;
+
+ case 1:
+ fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD08;
+ break;
+
+ case 2:
+ fmt->pixelformat = V4L2_TCH_FMT_TU16;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rmi_f54_vidioc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->parm.capture.readbuffers = 1;
+ a->parm.capture.timeperframe.numerator = 1;
+ a->parm.capture.timeperframe.denominator = 10;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rmi_f54_video_ioctl_ops = {
+ .vidioc_querycap = rmi_f54_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = rmi_f54_vidioc_enum_fmt,
+ .vidioc_s_fmt_vid_cap = rmi_f54_vidioc_fmt,
+ .vidioc_g_fmt_vid_cap = rmi_f54_vidioc_fmt,
+ .vidioc_try_fmt_vid_cap = rmi_f54_vidioc_fmt,
+ .vidioc_g_parm = rmi_f54_vidioc_g_parm,
+
+ .vidioc_enum_input = rmi_f54_vidioc_enum_input,
+ .vidioc_g_input = rmi_f54_vidioc_g_input,
+ .vidioc_s_input = rmi_f54_vidioc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct video_device rmi_f54_video_device = {
+ .name = "Synaptics RMI4",
+ .fops = &rmi_f54_video_fops,
+ .ioctl_ops = &rmi_f54_video_ioctl_ops,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING,
+};
+
+static void rmi_f54_work(struct work_struct *work)
+{
+ struct f54_data *f54 = container_of(work, struct f54_data, work.work);
+ struct rmi_function *fn = f54->fn;
+ u8 fifo[2];
+ struct rmi_f54_reports *report;
+ int report_size;
+ u8 command;
+ u8 *data;
+ int error;
+
+ data = f54->report_data;
+ report_size = rmi_f54_get_report_size(f54);
+ if (report_size == 0) {
+ dev_err(&fn->dev, "Bad report size, report type=%d\n",
+ f54->report_type);
+ error = -EINVAL;
+ goto error; /* retry won't help */
+ }
+ f54->standard_report[0].size = report_size;
+ report = f54->standard_report;
+
+ mutex_lock(&f54->data_mutex);
+
+ /*
+ * Need to check if command has completed.
+ * If not try again later.
+ */
+ error = rmi_read(fn->rmi_dev, f54->fn->fd.command_base_addr,
+ &command);
+ if (error) {
+ dev_err(&fn->dev, "Failed to read back command\n");
+ goto error;
+ }
+ if (command & F54_GET_REPORT) {
+ if (time_after(jiffies, f54->timeout)) {
+ dev_err(&fn->dev, "Get report command timed out\n");
+ error = -ETIMEDOUT;
+ }
+ report_size = 0;
+ goto error;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
+
+ report_size = 0;
+ for (; report->size; report++) {
+ fifo[0] = report->start & 0xff;
+ fifo[1] = (report->start >> 8) & 0xff;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET, data,
+ report->size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, report->size, error);
+ goto abort;
+ }
+ data += report->size;
+ report_size += report->size;
+ }
+
+abort:
+ f54->report_size = error ? 0 : report_size;
+error:
+ if (error)
+ report_size = 0;
+
+ if (report_size == 0 && !error) {
+ queue_delayed_work(f54->workqueue, &f54->work,
+ msecs_to_jiffies(1));
+ } else {
+ f54->is_busy = false;
+ complete(&f54->cmd_done);
+ }
+
+ mutex_unlock(&f54->data_mutex);
+}
+
+static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
+{
+ return 0;
+}
+
+static int rmi_f54_config(struct rmi_function *fn)
+{
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f54_detect(struct rmi_function *fn)
+{
+ int error;
+ struct f54_data *f54;
+
+ f54 = dev_get_drvdata(&fn->dev);
+
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+ &f54->qry, sizeof(f54->qry));
+ if (error) {
+ dev_err(&fn->dev, "%s: Failed to query F54 properties\n",
+ __func__);
+ return error;
+ }
+
+ f54->num_rx_electrodes = f54->qry[0];
+ f54->num_tx_electrodes = f54->qry[1];
+ f54->capabilities = f54->qry[2];
+ f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8);
+ f54->family = f54->qry[5];
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n",
+ f54->num_rx_electrodes);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_tx_electrodes: %d\n",
+ f54->num_tx_electrodes);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 capabilities: 0x%x\n",
+ f54->capabilities);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 clock rate: 0x%x\n",
+ f54->clock_rate);
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 family: 0x%x\n",
+ f54->family);
+
+ f54->is_busy = false;
+
+ return 0;
+}
+
+static int rmi_f54_probe(struct rmi_function *fn)
+{
+ struct f54_data *f54;
+ int ret;
+ u8 rx, tx;
+
+ f54 = devm_kzalloc(&fn->dev, sizeof(struct f54_data), GFP_KERNEL);
+ if (!f54)
+ return -ENOMEM;
+
+ f54->fn = fn;
+ dev_set_drvdata(&fn->dev, f54);
+
+ ret = rmi_f54_detect(fn);
+ if (ret)
+ return ret;
+
+ mutex_init(&f54->data_mutex);
+ mutex_init(&f54->status_mutex);
+
+ rx = f54->num_rx_electrodes;
+ tx = f54->num_tx_electrodes;
+ f54->report_data = devm_kzalloc(&fn->dev,
+ sizeof(u16) * tx * rx,
+ GFP_KERNEL);
+ if (f54->report_data == NULL)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&f54->work, rmi_f54_work);
+
+ f54->workqueue = create_singlethread_workqueue("rmi4-poller");
+ if (!f54->workqueue)
+ return -ENOMEM;
+
+ rmi_f54_create_input_map(f54);
+
+ /* register video device */
+ strlcpy(f54->v4l2.name, F54_NAME, sizeof(f54->v4l2.name));
+ ret = v4l2_device_register(&fn->dev, &f54->v4l2);
+ if (ret) {
+ dev_err(&fn->dev, "Unable to register video dev.\n");
+ goto remove_wq;
+ }
+
+ /* initialize the queue */
+ mutex_init(&f54->lock);
+ f54->queue = rmi_f54_queue;
+ f54->queue.drv_priv = f54;
+ f54->queue.lock = &f54->lock;
+ f54->queue.dev = &fn->dev;
+
+ ret = vb2_queue_init(&f54->queue);
+ if (ret)
+ goto remove_v4l2;
+
+ f54->vdev = rmi_f54_video_device;
+ f54->vdev.v4l2_dev = &f54->v4l2;
+ f54->vdev.lock = &f54->lock;
+ f54->vdev.vfl_dir = VFL_DIR_RX;
+ f54->vdev.queue = &f54->queue;
+ video_set_drvdata(&f54->vdev, f54);
+
+ ret = video_register_device(&f54->vdev, VFL_TYPE_TOUCH, -1);
+ if (ret) {
+ dev_err(&fn->dev, "Unable to register video subdevice.");
+ goto remove_v4l2;
+ }
+
+ return 0;
+
+remove_v4l2:
+ v4l2_device_unregister(&f54->v4l2);
+remove_wq:
+ cancel_delayed_work_sync(&f54->work);
+ flush_workqueue(f54->workqueue);
+ destroy_workqueue(f54->workqueue);
+ return ret;
+}
+
+static void rmi_f54_remove(struct rmi_function *fn)
+{
+ struct f54_data *f54 = dev_get_drvdata(&fn->dev);
+
+ video_unregister_device(&f54->vdev);
+ v4l2_device_unregister(&f54->v4l2);
+}
+
+struct rmi_function_handler rmi_f54_handler = {
+ .driver = {
+ .name = F54_NAME,
+ },
+ .func = 0x54,
+ .probe = rmi_f54_probe,
+ .config = rmi_f54_config,
+ .attention = rmi_f54_attention,
+ .remove = rmi_f54_remove,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 6f2e0e4f0296..1ebc2c1debae 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
#endif
+static void rmi_i2c_regulator_bulk_disable(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+}
+
+static void rmi_i2c_unregister_transport(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ rmi_unregister_transport_device(&rmi_i2c->xport);
+}
+
static int rmi_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (retval < 0)
return retval;
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_regulator_bulk_disable,
+ rmi_i2c);
+ if (retval)
+ return retval;
+
of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
&rmi_i2c->startup_delay);
@@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client,
client->addr);
return retval;
}
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_unregister_transport,
+ rmi_i2c);
+ if (retval)
+ return retval;
retval = rmi_i2c_init_irq(client);
if (retval < 0)
@@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
return 0;
}
-static int rmi_i2c_remove(struct i2c_client *client)
-{
- struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
-
- rmi_unregister_transport_device(&rmi_i2c->xport);
- regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
- rmi_i2c->supplies);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_i2c_suspend(struct device *dev)
{
@@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = {
},
.id_table = rmi_id,
.probe = rmi_i2c_probe,
- .remove = rmi_i2c_remove,
};
module_i2c_driver(rmi_i2c_driver);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 55bd1b34970c..4ebef607e214 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi,
}
#endif
+static void rmi_spi_unregister_transport(void *data)
+{
+ struct rmi_spi_xport *rmi_spi = data;
+
+ rmi_unregister_transport_device(&rmi_spi->xport);
+}
+
static int rmi_spi_probe(struct spi_device *spi)
{
struct rmi_spi_xport *rmi_spi;
@@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi)
dev_err(&spi->dev, "failed to register transport.\n");
return retval;
}
+ retval = devm_add_action_or_reset(&spi->dev,
+ rmi_spi_unregister_transport,
+ rmi_spi);
+ if (retval)
+ return retval;
retval = rmi_spi_init_irq(spi);
if (retval < 0)
@@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi)
return 0;
}
-static int rmi_spi_remove(struct spi_device *spi)
-{
- struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
-
- rmi_unregister_transport_device(&rmi_spi->xport);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_spi_suspend(struct device *dev)
{
@@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = {
},
.id_table = rmi_id,
.probe = rmi_spi_probe,
- .remove = rmi_spi_remove,
};
module_spi_driver(rmi_spi_driver);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index a5eed2ade53d..34da81c006b6 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
index ee1ad27d6ed0..08a1c10a1448 100644
--- a/drivers/input/serio/i8042-ip22io.h
+++ b/drivers/input/serio/i8042-ip22io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
index f708c75d16f1..1aabea43329e 100644
--- a/drivers/input/serio/i8042-ppcio.h
+++ b/drivers/input/serio/i8042-ppcio.h
@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
static inline int i8042_platform_init(void)
{
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index afcd1c1a05b2..6231d63860ee 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
}
}
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
index 73f5cc124a36..455747552f85 100644
--- a/drivers/input/serio/i8042-unicore32io.h
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
return -EBUSY;
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 68f5f4a0f1e7..073246c7d163 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+/*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ },
+ },
+ { }
+};
static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
/* MSI Wind U-100 */
@@ -793,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
@@ -1072,12 +1163,18 @@ static int __init i8042_platform_init(void)
return retval;
#if defined(__ia64__)
- i8042_reset = true;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (dmi_check_system(i8042_dmi_reset_table))
+ i8042_reset = I8042_RESET_ALWAYS;
+
+ if (dmi_check_system(i8042_dmi_noselftest_table))
+ i8042_reset = I8042_RESET_NEVER;
+ }
if (dmi_check_system(i8042_dmi_noloop_table))
i8042_noloop = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 405252a884dd..89abfdb539ac 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -48,9 +48,39 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
-static bool i8042_reset;
-module_param_named(reset, i8042_reset, bool, 0);
-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+enum i8042_controller_reset_mode {
+ I8042_RESET_NEVER,
+ I8042_RESET_ALWAYS,
+ I8042_RESET_ON_S2RAM,
+#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
+};
+static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
+static int i8042_set_reset(const char *val, const struct kernel_param *kp)
+{
+ enum i8042_controller_reset_mode *arg = kp->arg;
+ int error;
+ bool reset;
+
+ if (val) {
+ error = kstrtobool(val, &reset);
+ if (error)
+ return error;
+ } else {
+ reset = true;
+ }
+
+ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_reset_param = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = i8042_set_reset,
+};
+#define param_check_reset_param(name, p) \
+ __param_check(name, p, enum i8042_controller_reset_mode)
+module_param_named(reset, i8042_reset, reset_param, 0);
+MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
static bool i8042_direct;
module_param_named(direct, i8042_direct, bool, 0);
@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(bool force_reset)
+static void i8042_controller_reset(bool s2r_wants_reset)
{
i8042_flush();
@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
* Reset the controller if requested.
*/
- if (i8042_reset || force_reset)
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
i8042_controller_selftest();
+ }
/*
* Restore the original control register setting.
@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
* before suspending.
*/
-static int i8042_controller_resume(bool force_reset)
+static int i8042_controller_resume(bool s2r_wants_reset)
{
int error;
@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
if (error)
return error;
- if (i8042_reset || force_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
error = i8042_controller_selftest();
if (error)
return error;
@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
static int i8042_pm_resume(struct device *dev)
{
- bool force_reset;
+ bool want_reset;
int i;
for (i = 0; i < I8042_NUM_PORTS; i++) {
@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
* off control to the platform firmware, otherwise we can simply restore
* the mode.
*/
- force_reset = pm_resume_via_firmware();
+ want_reset = pm_resume_via_firmware();
- return i8042_controller_resume(force_reset);
+ return i8042_controller_resume(want_reset);
}
static int i8042_pm_thaw(struct device *dev)
@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
i8042_platform_device = dev;
- if (i8042_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
return error;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 507981356921..efca0133e266 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -115,6 +115,15 @@ config TOUCHSCREEN_ATMEL_MXT
To compile this driver as a module, choose M here: the
module will be called atmel_mxt_ts.
+config TOUCHSCREEN_ATMEL_MXT_T37
+ bool "Support T37 Diagnostic Data"
+ depends on TOUCHSCREEN_ATMEL_MXT
+ depends on VIDEO_V4L2=y || (TOUCHSCREEN_ATMEL_MXT=m && VIDEO_V4L2=m)
+ select VIDEOBUF2_VMALLOC
+ help
+ Say Y here if you want support to output data from the T37
+ Diagnostic Data object using a V4L device.
+
config TOUCHSCREEN_AUO_PIXCIR
tristate "AUO in-cell touchscreen using Pixcir ICs"
depends on I2C
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 5af7907d0af4..e5d185fe69b9 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -4,6 +4,7 @@
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Copyright (C) 2011-2014 Atmel Corporation
* Copyright (C) 2012 Google, Inc.
+ * Copyright (C) 2016 Zodiac Inflight Innovations
*
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
@@ -28,6 +29,10 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
/* Firmware files */
#define MXT_FW_NAME "maxtouch.fw"
@@ -99,6 +104,8 @@ struct t7_config {
/* MXT_TOUCH_MULTI_T9 field */
#define MXT_T9_CTRL 0
+#define MXT_T9_XSIZE 3
+#define MXT_T9_YSIZE 4
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -119,11 +126,31 @@ struct t9_range {
/* MXT_TOUCH_MULTI_T9 orient */
#define MXT_T9_ORIENT_SWITCH (1 << 0)
+#define MXT_T9_ORIENT_INVERTX (1 << 1)
+#define MXT_T9_ORIENT_INVERTY (1 << 2)
/* MXT_SPT_COMMSCONFIG_T18 */
#define MXT_COMMS_CTRL 0
#define MXT_COMMS_CMD 1
+/* MXT_DEBUG_DIAGNOSTIC_T37 */
+#define MXT_DIAGNOSTIC_PAGEUP 0x01
+#define MXT_DIAGNOSTIC_DELTAS 0x10
+#define MXT_DIAGNOSTIC_REFS 0x11
+#define MXT_DIAGNOSTIC_SIZE 128
+
+#define MXT_FAMILY_1386 160
+#define MXT1386_COLUMNS 3
+#define MXT1386_PAGES_PER_COLUMN 8
+
+struct t37_debug {
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+ u8 mode;
+ u8 page;
+ u8 data[MXT_DIAGNOSTIC_SIZE];
+#endif
+};
+
/* Define for MXT_GEN_COMMAND_T6 */
#define MXT_BOOT_VALUE 0xa5
#define MXT_RESET_VALUE 0x01
@@ -133,10 +160,14 @@ struct t9_range {
#define MXT_T100_CTRL 0
#define MXT_T100_CFG1 1
#define MXT_T100_TCHAUX 3
+#define MXT_T100_XSIZE 9
#define MXT_T100_XRANGE 13
+#define MXT_T100_YSIZE 20
#define MXT_T100_YRANGE 24
#define MXT_T100_CFG_SWITCHXY BIT(5)
+#define MXT_T100_CFG_INVERTY BIT(6)
+#define MXT_T100_CFG_INVERTX BIT(7)
#define MXT_T100_TCHAUX_VECT BIT(0)
#define MXT_T100_TCHAUX_AMPL BIT(1)
@@ -205,6 +236,37 @@ struct mxt_object {
u8 num_report_ids;
} __packed;
+struct mxt_dbg {
+ u16 t37_address;
+ u16 diag_cmd_address;
+ struct t37_debug *t37_buf;
+ unsigned int t37_pages;
+ unsigned int t37_nodes;
+
+ struct v4l2_device v4l2;
+ struct v4l2_pix_format format;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct mutex lock;
+ int input;
+};
+
+enum v4l_dbg_inputs {
+ MXT_V4L_INPUT_DELTAS,
+ MXT_V4L_INPUT_REFS,
+ MXT_V4L_INPUT_MAX,
+};
+
+static const struct v4l2_file_operations mxt_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
@@ -216,7 +278,11 @@ struct mxt_data {
unsigned int irq;
unsigned int max_x;
unsigned int max_y;
+ bool invertx;
+ bool inverty;
bool xy_switch;
+ u8 xsize;
+ u8 ysize;
bool in_bootloader;
u16 mem_size;
u8 t100_aux_ampl;
@@ -233,6 +299,7 @@ struct mxt_data {
u8 num_touchids;
u8 multitouch;
struct t7_config t7_cfg;
+ struct mxt_dbg dbg;
/* Cached parameters from object table */
u16 T5_address;
@@ -257,6 +324,11 @@ struct mxt_data {
struct completion crc_completion;
};
+struct mxt_vb2_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
static size_t mxt_obj_size(const struct mxt_object *obj)
{
return obj->size_minus_one + 1;
@@ -1503,6 +1575,11 @@ static void mxt_free_input_device(struct mxt_data *data)
static void mxt_free_object_table(struct mxt_data *data)
{
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+ video_unregister_device(&data->dbg.vdev);
+ v4l2_device_unregister(&data->dbg.v4l2);
+#endif
+
kfree(data->object_table);
data->object_table = NULL;
kfree(data->msg_buf);
@@ -1661,6 +1738,18 @@ static int mxt_read_t9_resolution(struct mxt_data *data)
return -EINVAL;
error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_XSIZE,
+ sizeof(data->xsize), &data->xsize);
+ if (error)
+ return error;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T9_YSIZE,
+ sizeof(data->ysize), &data->ysize);
+ if (error)
+ return error;
+
+ error = __mxt_read_reg(client,
object->start_address + MXT_T9_RANGE,
sizeof(range), &range);
if (error)
@@ -1676,6 +1765,8 @@ static int mxt_read_t9_resolution(struct mxt_data *data)
return error;
data->xy_switch = orient & MXT_T9_ORIENT_SWITCH;
+ data->invertx = orient & MXT_T9_ORIENT_INVERTX;
+ data->inverty = orient & MXT_T9_ORIENT_INVERTY;
return 0;
}
@@ -1710,6 +1801,18 @@ static int mxt_read_t100_config(struct mxt_data *data)
data->max_y = get_unaligned_le16(&range_y);
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_XSIZE,
+ sizeof(data->xsize), &data->xsize);
+ if (error)
+ return error;
+
+ error = __mxt_read_reg(client,
+ object->start_address + MXT_T100_YSIZE,
+ sizeof(data->ysize), &data->ysize);
+ if (error)
+ return error;
+
/* read orientation config */
error = __mxt_read_reg(client,
object->start_address + MXT_T100_CFG1,
@@ -1718,6 +1821,8 @@ static int mxt_read_t100_config(struct mxt_data *data)
return error;
data->xy_switch = cfg & MXT_T100_CFG_SWITCHXY;
+ data->invertx = cfg & MXT_T100_CFG_INVERTX;
+ data->inverty = cfg & MXT_T100_CFG_INVERTY;
/* allocate aux bytes */
error = __mxt_read_reg(client,
@@ -2043,6 +2148,420 @@ recheck:
return 0;
}
+#ifdef CONFIG_TOUCHSCREEN_ATMEL_MXT_T37
+static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
+ unsigned int y)
+{
+ struct mxt_info *info = &data->info;
+ struct mxt_dbg *dbg = &data->dbg;
+ unsigned int ofs, page;
+ unsigned int col = 0;
+ unsigned int col_width;
+
+ if (info->family_id == MXT_FAMILY_1386) {
+ col_width = info->matrix_ysize / MXT1386_COLUMNS;
+ col = y / col_width;
+ y = y % col_width;
+ } else {
+ col_width = info->matrix_ysize;
+ }
+
+ ofs = (y + (x * col_width)) * sizeof(u16);
+ page = ofs / MXT_DIAGNOSTIC_SIZE;
+ ofs %= MXT_DIAGNOSTIC_SIZE;
+
+ if (info->family_id == MXT_FAMILY_1386)
+ page += col * MXT1386_PAGES_PER_COLUMN;
+
+ return get_unaligned_le16(&dbg->t37_buf[page].data[ofs]);
+}
+
+static int mxt_convert_debug_pages(struct mxt_data *data, u16 *outbuf)
+{
+ struct mxt_dbg *dbg = &data->dbg;
+ unsigned int x = 0;
+ unsigned int y = 0;
+ unsigned int i, rx, ry;
+
+ for (i = 0; i < dbg->t37_nodes; i++) {
+ /* Handle orientation */
+ rx = data->xy_switch ? y : x;
+ ry = data->xy_switch ? x : y;
+ rx = data->invertx ? (data->xsize - 1 - rx) : rx;
+ ry = data->inverty ? (data->ysize - 1 - ry) : ry;
+
+ outbuf[i] = mxt_get_debug_value(data, rx, ry);
+
+ /* Next value */
+ if (++x >= (data->xy_switch ? data->ysize : data->xsize)) {
+ x = 0;
+ y++;
+ }
+ }
+
+ return 0;
+}
+
+static int mxt_read_diagnostic_debug(struct mxt_data *data, u8 mode,
+ u16 *outbuf)
+{
+ struct mxt_dbg *dbg = &data->dbg;
+ int retries = 0;
+ int page;
+ int ret;
+ u8 cmd = mode;
+ struct t37_debug *p;
+ u8 cmd_poll;
+
+ for (page = 0; page < dbg->t37_pages; page++) {
+ p = dbg->t37_buf + page;
+
+ ret = mxt_write_reg(data->client, dbg->diag_cmd_address,
+ cmd);
+ if (ret)
+ return ret;
+
+ retries = 0;
+ msleep(20);
+wait_cmd:
+ /* Read back command byte */
+ ret = __mxt_read_reg(data->client, dbg->diag_cmd_address,
+ sizeof(cmd_poll), &cmd_poll);
+ if (ret)
+ return ret;
+
+ /* Field is cleared once the command has been processed */
+ if (cmd_poll) {
+ if (retries++ > 100)
+ return -EINVAL;
+
+ msleep(20);
+ goto wait_cmd;
+ }
+
+ /* Read T37 page */
+ ret = __mxt_read_reg(data->client, dbg->t37_address,
+ sizeof(struct t37_debug), p);
+ if (ret)
+ return ret;
+
+ if (p->mode != mode || p->page != page) {
+ dev_err(&data->client->dev, "T37 page mismatch\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&data->client->dev, "%s page:%d retries:%d\n",
+ __func__, page, retries);
+
+ /* For remaining pages, write PAGEUP rather than mode */
+ cmd = MXT_DIAGNOSTIC_PAGEUP;
+ }
+
+ return mxt_convert_debug_pages(data, outbuf);
+}
+
+static int mxt_queue_setup(struct vb2_queue *q,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct mxt_data *data = q->drv_priv;
+ size_t size = data->dbg.t37_nodes * sizeof(u16);
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static void mxt_buffer_queue(struct vb2_buffer *vb)
+{
+ struct mxt_data *data = vb2_get_drv_priv(vb->vb2_queue);
+ u16 *ptr;
+ int ret;
+ u8 mode;
+
+ ptr = vb2_plane_vaddr(vb, 0);
+ if (!ptr) {
+ dev_err(&data->client->dev, "Error acquiring frame ptr\n");
+ goto fault;
+ }
+
+ switch (data->dbg.input) {
+ case MXT_V4L_INPUT_DELTAS:
+ default:
+ mode = MXT_DIAGNOSTIC_DELTAS;
+ break;
+
+ case MXT_V4L_INPUT_REFS:
+ mode = MXT_DIAGNOSTIC_REFS;
+ break;
+ }
+
+ ret = mxt_read_diagnostic_debug(data, mode, ptr);
+ if (ret)
+ goto fault;
+
+ vb2_set_plane_payload(vb, 0, data->dbg.t37_nodes * sizeof(u16));
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ return;
+
+fault:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+/* V4L2 structures */
+static const struct vb2_ops mxt_queue_ops = {
+ .queue_setup = mxt_queue_setup,
+ .buf_queue = mxt_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static const struct vb2_queue mxt_queue = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
+ .buf_struct_size = sizeof(struct mxt_vb2_buffer),
+ .ops = &mxt_queue_ops,
+ .mem_ops = &vb2_vmalloc_memops,
+ .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
+ .min_buffers_needed = 1,
+};
+
+static int mxt_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mxt_data *data = video_drvdata(file);
+
+ strlcpy(cap->driver, "atmel_mxt_ts", sizeof(cap->driver));
+ strlcpy(cap->card, "atmel_mxt_ts touch", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "I2C:%s", dev_name(&data->client->dev));
+ return 0;
+}
+
+static int mxt_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index >= MXT_V4L_INPUT_MAX)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_TOUCH;
+
+ switch (i->index) {
+ case MXT_V4L_INPUT_REFS:
+ strlcpy(i->name, "Mutual Capacitance References",
+ sizeof(i->name));
+ break;
+ case MXT_V4L_INPUT_DELTAS:
+ strlcpy(i->name, "Mutual Capacitance Deltas", sizeof(i->name));
+ break;
+ }
+
+ return 0;
+}
+
+static int mxt_set_input(struct mxt_data *data, unsigned int i)
+{
+ struct v4l2_pix_format *f = &data->dbg.format;
+
+ if (i >= MXT_V4L_INPUT_MAX)
+ return -EINVAL;
+
+ if (i == MXT_V4L_INPUT_DELTAS)
+ f->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+ else
+ f->pixelformat = V4L2_TCH_FMT_TU16;
+
+ f->width = data->xy_switch ? data->ysize : data->xsize;
+ f->height = data->xy_switch ? data->xsize : data->ysize;
+ f->field = V4L2_FIELD_NONE;
+ f->colorspace = V4L2_COLORSPACE_RAW;
+ f->bytesperline = f->width * sizeof(u16);
+ f->sizeimage = f->width * f->height * sizeof(u16);
+
+ data->dbg.input = i;
+
+ return 0;
+}
+
+static int mxt_vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ return mxt_set_input(video_drvdata(file), i);
+}
+
+static int mxt_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct mxt_data *data = video_drvdata(file);
+
+ *i = data->dbg.input;
+
+ return 0;
+}
+
+static int mxt_vidioc_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct mxt_data *data = video_drvdata(file);
+
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->fmt.pix = data->dbg.format;
+
+ return 0;
+}
+
+static int mxt_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (fmt->index) {
+ case 0:
+ fmt->pixelformat = V4L2_TCH_FMT_TU16;
+ break;
+
+ case 1:
+ fmt->pixelformat = V4L2_TCH_FMT_DELTA_TD16;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxt_vidioc_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->parm.capture.readbuffers = 1;
+ a->parm.capture.timeperframe.numerator = 1;
+ a->parm.capture.timeperframe.denominator = 10;
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mxt_video_ioctl_ops = {
+ .vidioc_querycap = mxt_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = mxt_vidioc_enum_fmt,
+ .vidioc_s_fmt_vid_cap = mxt_vidioc_fmt,
+ .vidioc_g_fmt_vid_cap = mxt_vidioc_fmt,
+ .vidioc_try_fmt_vid_cap = mxt_vidioc_fmt,
+ .vidioc_g_parm = mxt_vidioc_g_parm,
+
+ .vidioc_enum_input = mxt_vidioc_enum_input,
+ .vidioc_g_input = mxt_vidioc_g_input,
+ .vidioc_s_input = mxt_vidioc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static const struct video_device mxt_video_device = {
+ .name = "Atmel maxTouch",
+ .fops = &mxt_video_fops,
+ .ioctl_ops = &mxt_video_ioctl_ops,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING,
+};
+
+static void mxt_debug_init(struct mxt_data *data)
+{
+ struct mxt_info *info = &data->info;
+ struct mxt_dbg *dbg = &data->dbg;
+ struct mxt_object *object;
+ int error;
+
+ object = mxt_get_object(data, MXT_GEN_COMMAND_T6);
+ if (!object)
+ goto error;
+
+ dbg->diag_cmd_address = object->start_address + MXT_COMMAND_DIAGNOSTIC;
+
+ object = mxt_get_object(data, MXT_DEBUG_DIAGNOSTIC_T37);
+ if (!object)
+ goto error;
+
+ if (mxt_obj_size(object) != sizeof(struct t37_debug)) {
+ dev_warn(&data->client->dev, "Bad T37 size");
+ goto error;
+ }
+
+ dbg->t37_address = object->start_address;
+
+ /* Calculate size of data and allocate buffer */
+ dbg->t37_nodes = data->xsize * data->ysize;
+
+ if (info->family_id == MXT_FAMILY_1386)
+ dbg->t37_pages = MXT1386_COLUMNS * MXT1386_PAGES_PER_COLUMN;
+ else
+ dbg->t37_pages = DIV_ROUND_UP(data->xsize *
+ data->info.matrix_ysize *
+ sizeof(u16),
+ sizeof(dbg->t37_buf->data));
+
+ dbg->t37_buf = devm_kmalloc_array(&data->client->dev, dbg->t37_pages,
+ sizeof(struct t37_debug), GFP_KERNEL);
+ if (!dbg->t37_buf)
+ goto error;
+
+ /* init channel to zero */
+ mxt_set_input(data, 0);
+
+ /* register video device */
+ snprintf(dbg->v4l2.name, sizeof(dbg->v4l2.name), "%s", "atmel_mxt_ts");
+ error = v4l2_device_register(&data->client->dev, &dbg->v4l2);
+ if (error)
+ goto error;
+
+ /* initialize the queue */
+ mutex_init(&dbg->lock);
+ dbg->queue = mxt_queue;
+ dbg->queue.drv_priv = data;
+ dbg->queue.lock = &dbg->lock;
+ dbg->queue.dev = &data->client->dev;
+
+ error = vb2_queue_init(&dbg->queue);
+ if (error)
+ goto error_unreg_v4l2;
+
+ dbg->vdev = mxt_video_device;
+ dbg->vdev.v4l2_dev = &dbg->v4l2;
+ dbg->vdev.lock = &dbg->lock;
+ dbg->vdev.vfl_dir = VFL_DIR_RX;
+ dbg->vdev.queue = &dbg->queue;
+ video_set_drvdata(&dbg->vdev, data);
+
+ error = video_register_device(&dbg->vdev, VFL_TYPE_TOUCH, -1);
+ if (error)
+ goto error_unreg_v4l2;
+
+ return;
+
+error_unreg_v4l2:
+ v4l2_device_unregister(&dbg->v4l2);
+error:
+ dev_warn(&data->client->dev, "Error initializing T37\n");
+}
+#else
+static void mxt_debug_init(struct mxt_data *data)
+{
+}
+#endif
+
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -2070,6 +2589,8 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "No touch object detected\n");
}
+ mxt_debug_init(data);
+
dev_info(dev,
"Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
info->family_id, info->variant_id, info->version >> 4,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index fb5fb9140ca9..552a3773f79d 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -157,6 +157,7 @@ struct mip4_ts {
char phys[32];
char product_name[16];
+ char ic_name[4];
unsigned int max_x;
unsigned int max_y;
@@ -263,6 +264,18 @@ static int mip4_query_device(struct mip4_ts *ts)
dev_dbg(&ts->client->dev, "product name: %.*s\n",
(int)sizeof(ts->product_name), ts->product_name);
+ /* IC name */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_IC_NAME;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+ ts->ic_name, sizeof(ts->ic_name));
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve IC name: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "IC name: %.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
/* Firmware version */
error = mip4_get_fw_version(ts);
if (error)
@@ -1326,7 +1339,7 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
* paired with current firmware in the chip.
*/
count = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)sizeof(ts->product_name), ts->product_name);
+ (int)sizeof(ts->product_name), ts->product_name);
mutex_unlock(&ts->input->mutex);
@@ -1335,9 +1348,30 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
+
static struct attribute *mip4_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_hw_version.attr,
+ &dev_attr_ic_name.attr,
&dev_attr_update_fw.attr,
NULL,
};
@@ -1538,6 +1572,6 @@ static struct i2c_driver mip4_driver = {
module_i2c_driver(mip4_driver);
MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.03.12");
+MODULE_VERSION("2016.09.28");
MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 4ea475775d58..aefb6e11f88a 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -139,6 +139,27 @@ struct sur40_image_header {
#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
+static const struct v4l2_pix_format sur40_pix_format[] = {
+ {
+ .pixelformat = V4L2_TCH_FMT_TU08,
+ .width = SENSOR_RES_X / 2,
+ .height = SENSOR_RES_Y / 2,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bytesperline = SENSOR_RES_X / 2,
+ .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ .width = SENSOR_RES_X / 2,
+ .height = SENSOR_RES_Y / 2,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .bytesperline = SENSOR_RES_X / 2,
+ .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
+ }
+};
+
/* master device state */
struct sur40_state {
@@ -149,6 +170,7 @@ struct sur40_state {
struct v4l2_device v4l2;
struct video_device vdev;
struct mutex lock;
+ struct v4l2_pix_format pix_fmt;
struct vb2_queue queue;
struct list_head buf_list;
@@ -169,7 +191,6 @@ struct sur40_buffer {
/* forward declarations */
static const struct video_device sur40_video_device;
-static const struct v4l2_pix_format sur40_video_format;
static const struct vb2_queue sur40_queue;
static void sur40_process_video(struct sur40_state *sur40);
@@ -420,7 +441,7 @@ static void sur40_process_video(struct sur40_state *sur40)
goto err_poll;
}
- if (le32_to_cpu(img->size) != sur40_video_format.sizeimage) {
+ if (le32_to_cpu(img->size) != sur40->pix_fmt.sizeimage) {
dev_err(sur40->dev, "image size mismatch\n");
goto err_poll;
}
@@ -431,7 +452,7 @@ static void sur40_process_video(struct sur40_state *sur40)
result = usb_sg_init(&sgr, sur40->usbdev,
usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0,
- sgt->sgl, sgt->nents, sur40_video_format.sizeimage, 0);
+ sgt->sgl, sgt->nents, sur40->pix_fmt.sizeimage, 0);
if (result < 0) {
dev_err(sur40->dev, "error %d in usb_sg_init\n", result);
goto err_poll;
@@ -586,13 +607,14 @@ static int sur40_probe(struct usb_interface *interface,
if (error)
goto err_unreg_v4l2;
+ sur40->pix_fmt = sur40_pix_format[0];
sur40->vdev = sur40_video_device;
sur40->vdev.v4l2_dev = &sur40->v4l2;
sur40->vdev.lock = &sur40->lock;
sur40->vdev.queue = &sur40->queue;
video_set_drvdata(&sur40->vdev, sur40);
- error = video_register_device(&sur40->vdev, VFL_TYPE_GRABBER, -1);
+ error = video_register_device(&sur40->vdev, VFL_TYPE_TOUCH, -1);
if (error) {
dev_err(&interface->dev,
"Unable to register video subdevice.");
@@ -647,14 +669,16 @@ static int sur40_queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
+ struct sur40_state *sur40 = vb2_get_drv_priv(q);
+
if (q->num_buffers + *nbuffers < 3)
*nbuffers = 3 - q->num_buffers;
if (*nplanes)
- return sizes[0] < sur40_video_format.sizeimage ? -EINVAL : 0;
+ return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0;
*nplanes = 1;
- sizes[0] = sur40_video_format.sizeimage;
+ sizes[0] = sur40->pix_fmt.sizeimage;
return 0;
}
@@ -666,7 +690,7 @@ static int sur40_queue_setup(struct vb2_queue *q,
static int sur40_buffer_prepare(struct vb2_buffer *vb)
{
struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size = sur40_video_format.sizeimage;
+ unsigned long size = sur40->pix_fmt.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n",
@@ -741,7 +765,7 @@ static int sur40_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver));
strlcpy(cap->card, DRIVER_LONG, sizeof(cap->card));
usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
@@ -753,7 +777,7 @@ static int sur40_vidioc_enum_input(struct file *file, void *priv,
{
if (i->index != 0)
return -EINVAL;
- i->type = V4L2_INPUT_TYPE_CAMERA;
+ i->type = V4L2_INPUT_TYPE_TOUCH;
i->std = V4L2_STD_UNKNOWN;
strlcpy(i->name, "In-Cell Sensor", sizeof(i->name));
i->capabilities = 0;
@@ -771,19 +795,70 @@ static int sur40_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
return 0;
}
-static int sur40_vidioc_fmt(struct file *file, void *priv,
+static int sur40_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_GREY:
+ f->fmt.pix = sur40_pix_format[1];
+ break;
+
+ default:
+ f->fmt.pix = sur40_pix_format[0];
+ break;
+ }
+
+ return 0;
+}
+
+static int sur40_vidioc_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sur40_state *sur40 = video_drvdata(file);
+
+ switch (f->fmt.pix.pixelformat) {
+ case V4L2_PIX_FMT_GREY:
+ sur40->pix_fmt = sur40_pix_format[1];
+ break;
+
+ default:
+ sur40->pix_fmt = sur40_pix_format[0];
+ break;
+ }
+
+ f->fmt.pix = sur40->pix_fmt;
+ return 0;
+}
+
+static int sur40_vidioc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- f->fmt.pix = sur40_video_format;
+ struct sur40_state *sur40 = video_drvdata(file);
+
+ f->fmt.pix = sur40->pix_fmt;
+ return 0;
+}
+
+static int sur40_ioctl_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ p->parm.capture.timeperframe.numerator = 1;
+ p->parm.capture.timeperframe.denominator = 60;
+ p->parm.capture.readbuffers = 3;
return 0;
}
static int sur40_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index != 0)
+ if (f->index >= ARRAY_SIZE(sur40_pix_format))
return -EINVAL;
- f->pixelformat = V4L2_PIX_FMT_GREY;
+
+ f->pixelformat = sur40_pix_format[f->index].pixelformat;
f->flags = 0;
return 0;
}
@@ -791,25 +866,31 @@ static int sur40_vidioc_enum_fmt(struct file *file, void *priv,
static int sur40_vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *f)
{
- if ((f->index != 0) || (f->pixel_format != V4L2_PIX_FMT_GREY))
+ struct sur40_state *sur40 = video_drvdata(file);
+
+ if ((f->index != 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08)
+ && (f->pixel_format != V4L2_PIX_FMT_GREY)))
return -EINVAL;
f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
- f->discrete.width = sur40_video_format.width;
- f->discrete.height = sur40_video_format.height;
+ f->discrete.width = sur40->pix_fmt.width;
+ f->discrete.height = sur40->pix_fmt.height;
return 0;
}
static int sur40_vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *f)
{
- if ((f->index > 1) || (f->pixel_format != V4L2_PIX_FMT_GREY)
- || (f->width != sur40_video_format.width)
- || (f->height != sur40_video_format.height))
- return -EINVAL;
+ struct sur40_state *sur40 = video_drvdata(file);
+
+ if ((f->index > 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08)
+ && (f->pixel_format != V4L2_PIX_FMT_GREY))
+ || (f->width != sur40->pix_fmt.width)
+ || (f->height != sur40->pix_fmt.height))
+ return -EINVAL;
f->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- f->discrete.denominator = 60/(f->index+1);
+ f->discrete.denominator = 60;
f->discrete.numerator = 1;
return 0;
}
@@ -862,13 +943,16 @@ static const struct v4l2_ioctl_ops sur40_video_ioctl_ops = {
.vidioc_querycap = sur40_vidioc_querycap,
.vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt,
- .vidioc_try_fmt_vid_cap = sur40_vidioc_fmt,
- .vidioc_s_fmt_vid_cap = sur40_vidioc_fmt,
- .vidioc_g_fmt_vid_cap = sur40_vidioc_fmt,
+ .vidioc_try_fmt_vid_cap = sur40_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = sur40_vidioc_s_fmt,
+ .vidioc_g_fmt_vid_cap = sur40_vidioc_g_fmt,
.vidioc_enum_framesizes = sur40_vidioc_enum_framesizes,
.vidioc_enum_frameintervals = sur40_vidioc_enum_frameintervals,
+ .vidioc_g_parm = sur40_ioctl_parm,
+ .vidioc_s_parm = sur40_ioctl_parm,
+
.vidioc_enum_input = sur40_vidioc_enum_input,
.vidioc_g_input = sur40_vidioc_g_input,
.vidioc_s_input = sur40_vidioc_s_input,
@@ -891,16 +975,6 @@ static const struct video_device sur40_video_device = {
.release = video_device_release_empty,
};
-static const struct v4l2_pix_format sur40_video_format = {
- .pixelformat = V4L2_PIX_FMT_GREY,
- .width = SENSOR_RES_X / 2,
- .height = SENSOR_RES_Y / 2,
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .bytesperline = SENSOR_RES_X / 2,
- .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2),
-};
-
/* USB-specific object needed to register this driver with the USB subsystem. */
static struct usb_driver sur40_driver = {
.name = DRIVER_SHORT,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d432ca828472..8ee54d71c7eb 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -309,7 +309,7 @@ config ARM_SMMU
config ARM_SMMU_V3
bool "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
- depends on ARM64 && PCI
+ depends on ARM64
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 58fa8cc0262b..754595ee11b6 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -103,7 +103,7 @@ struct flush_queue {
struct flush_queue_entry *entries;
};
-DEFINE_PER_CPU(struct flush_queue, flush_queue);
+static DEFINE_PER_CPU(struct flush_queue, flush_queue);
static atomic_t queue_timer_on;
static struct timer_list queue_timer;
@@ -1361,7 +1361,8 @@ static u64 *alloc_pte(struct protection_domain *domain,
__npte = PM_LEVEL_PDE(level, virt_to_phys(page));
- if (cmpxchg64(pte, __pte, __npte)) {
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte) {
free_page((unsigned long)page);
continue;
}
@@ -1741,6 +1742,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
free_pagetable(&dom->domain);
+ if (dom->domain.id)
+ domain_id_free(dom->domain.id);
+
kfree(dom);
}
@@ -3649,7 +3653,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
table = irq_lookup_table[devid];
if (table)
- goto out;
+ goto out_unlock;
alias = amd_iommu_alias_table[devid];
table = irq_lookup_table[alias];
@@ -3663,7 +3667,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
/* Nothing there yet, allocate new irq remapping table */
table = kzalloc(sizeof(*table), GFP_ATOMIC);
if (!table)
- goto out;
+ goto out_unlock;
/* Initialize table spin-lock */
spin_lock_init(&table->lock);
@@ -3676,7 +3680,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
if (!table->table) {
kfree(table);
table = NULL;
- goto out;
+ goto out_unlock;
}
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
@@ -4153,6 +4157,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
+ ret = index;
goto out_free_parent;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index cd1713631a4a..157e93421fb8 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -20,6 +20,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/list.h>
+#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -2285,7 +2286,7 @@ static int __init early_amd_iommu_init(void)
* never allocate domain 0 because its used as the non-allocated and
* error value placeholder
*/
- amd_iommu_pd_alloc_bitmap[0] = 1;
+ __set_bit(0, amd_iommu_pd_alloc_bitmap);
spin_lock_init(&amd_iommu_pd_lock);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index faa3b4895cf0..7eb60c15c582 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -79,12 +79,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
int status, int tag);
-#ifndef CONFIG_AMD_IOMMU_STATS
-
-static inline void amd_iommu_stats_init(void) { }
-
-#endif /* !CONFIG_AMD_IOMMU_STATS */
-
static inline bool is_rd890_iommu(struct pci_dev *pdev)
{
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 641e88761319..e6f9b2d745ca 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -30,10 +30,13 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+
#include "io-pgtable.h"
/* MMIO registers */
@@ -123,6 +126,10 @@
#define CR2_RECINVSID (1 << 1)
#define CR2_E2H (1 << 0)
+#define ARM_SMMU_GBPA 0x44
+#define GBPA_ABORT (1 << 20)
+#define GBPA_UPDATE (1 << 31)
+
#define ARM_SMMU_IRQ_CTRL 0x50
#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
@@ -260,6 +267,9 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
+#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
+#define STRTAB_STE_1_PRIVCFG_SHIFT 48
+
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
@@ -606,12 +616,9 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg;
};
-/* SMMU private data for an IOMMU group */
-struct arm_smmu_group {
+/* SMMU private data for each master */
+struct arm_smmu_master_data {
struct arm_smmu_device *smmu;
- struct arm_smmu_domain *domain;
- int num_sids;
- u32 *sids;
struct arm_smmu_strtab_ent ste;
};
@@ -713,19 +720,15 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
writel(q->prod, q->prod_reg);
}
-static bool __queue_cons_before(struct arm_smmu_queue *q, u32 until)
-{
- if (Q_WRP(q, q->cons) == Q_WRP(q, until))
- return Q_IDX(q, q->cons) < Q_IDX(q, until);
-
- return Q_IDX(q, q->cons) >= Q_IDX(q, until);
-}
-
-static int queue_poll_cons(struct arm_smmu_queue *q, u32 until, bool wfe)
+/*
+ * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * is empty. Otherwise, wait until there is at least one free slot.
+ */
+static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
{
ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
- while (queue_sync_cons(q), __queue_cons_before(q, until)) {
+ while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
@@ -896,8 +899,8 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
- u32 until;
u64 cmd[CMDQ_ENT_DWORDS];
+ unsigned long flags;
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
struct arm_smmu_queue *q = &smmu->cmdq.q;
@@ -907,20 +910,15 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
return;
}
- spin_lock(&smmu->cmdq.lock);
- while (until = q->prod + 1, queue_insert_raw(q, cmd) == -ENOSPC) {
- /*
- * Keep the queue locked, otherwise the producer could wrap
- * twice and we could see a future consumer pointer that looks
- * like it's behind us.
- */
- if (queue_poll_cons(q, until, wfe))
+ spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ while (queue_insert_raw(q, cmd) == -ENOSPC) {
+ if (queue_poll_cons(q, false, wfe))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
}
- if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, until, wfe))
+ if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
- spin_unlock(&smmu->cmdq.lock);
+ spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
}
/* Context descriptor manipulation functions */
@@ -1073,7 +1071,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
- STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
+ STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
+ STRTAB_STE_1_PRIVCFG_UNPRIV <<
+ STRTAB_STE_1_PRIVCFG_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1161,36 +1161,66 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
struct arm_smmu_queue *q = &smmu->evtq.q;
u64 evt[EVTQ_ENT_DWORDS];
- while (!queue_remove_raw(q, evt)) {
- u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
+ do {
+ while (!queue_remove_raw(q, evt)) {
+ u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
- dev_info(smmu->dev, "event 0x%02x received:\n", id);
- for (i = 0; i < ARRAY_SIZE(evt); ++i)
- dev_info(smmu->dev, "\t0x%016llx\n",
- (unsigned long long)evt[i]);
- }
+ dev_info(smmu->dev, "event 0x%02x received:\n", id);
+ for (i = 0; i < ARRAY_SIZE(evt); ++i)
+ dev_info(smmu->dev, "\t0x%016llx\n",
+ (unsigned long long)evt[i]);
+
+ }
+
+ /*
+ * Not much we can do on overflow, so scream and pretend we're
+ * trying harder.
+ */
+ if (queue_sync_prod(q) == -EOVERFLOW)
+ dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
+ } while (!queue_empty(q));
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
return IRQ_HANDLED;
}
-static irqreturn_t arm_smmu_evtq_handler(int irq, void *dev)
-{
- irqreturn_t ret = IRQ_WAKE_THREAD;
- struct arm_smmu_device *smmu = dev;
- struct arm_smmu_queue *q = &smmu->evtq.q;
-
- /*
- * Not much we can do on overflow, so scream and pretend we're
- * trying harder.
- */
- if (queue_sync_prod(q) == -EOVERFLOW)
- dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
- else if (queue_empty(q))
- ret = IRQ_NONE;
+static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
+{
+ u32 sid, ssid;
+ u16 grpid;
+ bool ssv, last;
+
+ sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
+ ssv = evt[0] & PRIQ_0_SSID_V;
+ ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
+ last = evt[0] & PRIQ_0_PRG_LAST;
+ grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
+
+ dev_info(smmu->dev, "unexpected PRI request received:\n");
+ dev_info(smmu->dev,
+ "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
+ sid, ssid, grpid, last ? "L" : "",
+ evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
+ evt[0] & PRIQ_0_PERM_READ ? "R" : "",
+ evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
+ evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
+ evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
+
+ if (last) {
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_PRI_RESP,
+ .substream_valid = ssv,
+ .pri = {
+ .sid = sid,
+ .ssid = ssid,
+ .grpid = grpid,
+ .resp = PRI_RESP_DENY,
+ },
+ };
- return ret;
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ }
}
static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
@@ -1199,63 +1229,19 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
struct arm_smmu_queue *q = &smmu->priq.q;
u64 evt[PRIQ_ENT_DWORDS];
- while (!queue_remove_raw(q, evt)) {
- u32 sid, ssid;
- u16 grpid;
- bool ssv, last;
-
- sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
- ssv = evt[0] & PRIQ_0_SSID_V;
- ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
- last = evt[0] & PRIQ_0_PRG_LAST;
- grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
-
- dev_info(smmu->dev, "unexpected PRI request received:\n");
- dev_info(smmu->dev,
- "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
- sid, ssid, grpid, last ? "L" : "",
- evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
- evt[0] & PRIQ_0_PERM_READ ? "R" : "",
- evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
- evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
- evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
-
- if (last) {
- struct arm_smmu_cmdq_ent cmd = {
- .opcode = CMDQ_OP_PRI_RESP,
- .substream_valid = ssv,
- .pri = {
- .sid = sid,
- .ssid = ssid,
- .grpid = grpid,
- .resp = PRI_RESP_DENY,
- },
- };
+ do {
+ while (!queue_remove_raw(q, evt))
+ arm_smmu_handle_ppr(smmu, evt);
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- }
- }
+ if (queue_sync_prod(q) == -EOVERFLOW)
+ dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
+ } while (!queue_empty(q));
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
return IRQ_HANDLED;
}
-static irqreturn_t arm_smmu_priq_handler(int irq, void *dev)
-{
- irqreturn_t ret = IRQ_WAKE_THREAD;
- struct arm_smmu_device *smmu = dev;
- struct arm_smmu_queue *q = &smmu->priq.q;
-
- /* PRIQ overflow indicates a programming error */
- if (queue_sync_prod(q) == -EOVERFLOW)
- dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
- else if (queue_empty(q))
- ret = IRQ_NONE;
-
- return ret;
-}
-
static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
{
/* We don't actually use CMD_SYNC interrupts for anything */
@@ -1288,15 +1274,11 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
if (active & GERROR_MSI_GERROR_ABT_ERR)
dev_warn(smmu->dev, "GERROR MSI write aborted\n");
- if (active & GERROR_MSI_PRIQ_ABT_ERR) {
+ if (active & GERROR_MSI_PRIQ_ABT_ERR)
dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
- arm_smmu_priq_handler(irq, smmu->dev);
- }
- if (active & GERROR_MSI_EVTQ_ABT_ERR) {
+ if (active & GERROR_MSI_EVTQ_ABT_ERR)
dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
- arm_smmu_evtq_handler(irq, smmu->dev);
- }
if (active & GERROR_MSI_CMDQ_ABT_ERR) {
dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
@@ -1569,6 +1551,8 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return -ENOMEM;
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->geometry.aperture_end = (1UL << ias) - 1;
+ domain->geometry.force_aperture = true;
smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
@@ -1578,20 +1562,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return ret;
}
-static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
-{
- struct iommu_group *group;
- struct arm_smmu_group *smmu_group;
-
- group = iommu_group_get(dev);
- if (!group)
- return NULL;
-
- smmu_group = iommu_group_get_iommudata(group);
- iommu_group_put(group);
- return smmu_group;
-}
-
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
{
__le64 *step;
@@ -1614,27 +1584,17 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step;
}
-static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
+static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
int i;
- struct arm_smmu_domain *smmu_domain = smmu_group->domain;
- struct arm_smmu_strtab_ent *ste = &smmu_group->ste;
- struct arm_smmu_device *smmu = smmu_group->smmu;
-
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- ste->s1_cfg = &smmu_domain->s1_cfg;
- ste->s2_cfg = NULL;
- arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
- } else {
- ste->s1_cfg = NULL;
- ste->s2_cfg = &smmu_domain->s2_cfg;
- }
+ struct arm_smmu_master_data *master = fwspec->iommu_priv;
+ struct arm_smmu_device *smmu = master->smmu;
- for (i = 0; i < smmu_group->num_sids; ++i) {
- u32 sid = smmu_group->sids[i];
+ for (i = 0; i < fwspec->num_ids; ++i) {
+ u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
- arm_smmu_write_strtab_ent(smmu, sid, step, ste);
+ arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
return 0;
@@ -1642,13 +1602,11 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
static void arm_smmu_detach_dev(struct device *dev)
{
- struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+ struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
- smmu_group->ste.bypass = true;
- if (arm_smmu_install_ste_for_group(smmu_group) < 0)
+ master->ste.bypass = true;
+ if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
dev_warn(dev, "failed to install bypass STE\n");
-
- smmu_group->domain = NULL;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1656,16 +1614,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret = 0;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+ struct arm_smmu_master_data *master;
+ struct arm_smmu_strtab_ent *ste;
- if (!smmu_group)
+ if (!dev->iommu_fwspec)
return -ENOENT;
+ master = dev->iommu_fwspec->iommu_priv;
+ smmu = master->smmu;
+ ste = &master->ste;
+
/* Already attached to a different domain? */
- if (smmu_group->domain && smmu_group->domain != smmu_domain)
+ if (!ste->bypass)
arm_smmu_detach_dev(dev);
- smmu = smmu_group->smmu;
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
@@ -1684,21 +1646,21 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
- /* Group already attached to this domain? */
- if (smmu_group->domain)
- goto out_unlock;
+ ste->bypass = false;
+ ste->valid = true;
- smmu_group->domain = smmu_domain;
-
- /*
- * FIXME: This should always be "false" once we have IOMMU-backed
- * DMA ops for all devices behind the SMMU.
- */
- smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ ste->s1_cfg = &smmu_domain->s1_cfg;
+ ste->s2_cfg = NULL;
+ arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
+ } else {
+ ste->s1_cfg = NULL;
+ ste->s2_cfg = &smmu_domain->s2_cfg;
+ }
- ret = arm_smmu_install_ste_for_group(smmu_group);
+ ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
if (ret < 0)
- smmu_group->domain = NULL;
+ ste->valid = false;
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
@@ -1757,40 +1719,19 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
return ret;
}
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp)
-{
- *(u32 *)sidp = alias;
- return 0; /* Continue walking */
-}
+static struct platform_driver arm_smmu_driver;
-static void __arm_smmu_release_pci_iommudata(void *data)
+static int arm_smmu_match_node(struct device *dev, void *data)
{
- kfree(data);
+ return dev->of_node == data;
}
-static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev)
+static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
{
- struct device_node *of_node;
- struct platform_device *smmu_pdev;
- struct arm_smmu_device *smmu = NULL;
- struct pci_bus *bus = pdev->bus;
-
- /* Walk up to the root bus */
- while (!pci_is_root_bus(bus))
- bus = bus->parent;
-
- /* Follow the "iommus" phandle from the host controller */
- of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
- if (!of_node)
- return NULL;
-
- /* See if we can find an SMMU corresponding to the phandle */
- smmu_pdev = of_find_device_by_node(of_node);
- if (smmu_pdev)
- smmu = platform_get_drvdata(smmu_pdev);
-
- of_node_put(of_node);
- return smmu;
+ struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
+ np, arm_smmu_match_node);
+ put_device(dev);
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
@@ -1803,94 +1744,91 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit;
}
+static struct iommu_ops arm_smmu_ops;
+
static int arm_smmu_add_device(struct device *dev)
{
int i, ret;
- u32 sid, *sids;
- struct pci_dev *pdev;
- struct iommu_group *group;
- struct arm_smmu_group *smmu_group;
struct arm_smmu_device *smmu;
+ struct arm_smmu_master_data *master;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct iommu_group *group;
- /* We only support PCI, for now */
- if (!dev_is_pci(dev))
+ if (!fwspec || fwspec->ops != &arm_smmu_ops)
return -ENODEV;
-
- pdev = to_pci_dev(dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- smmu_group = iommu_group_get_iommudata(group);
- if (!smmu_group) {
- smmu = arm_smmu_get_for_pci_dev(pdev);
- if (!smmu) {
- ret = -ENOENT;
- goto out_remove_dev;
- }
-
- smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
- if (!smmu_group) {
- ret = -ENOMEM;
- goto out_remove_dev;
- }
-
- smmu_group->ste.valid = true;
- smmu_group->smmu = smmu;
- iommu_group_set_iommudata(group, smmu_group,
- __arm_smmu_release_pci_iommudata);
+ /*
+ * We _can_ actually withstand dodgy bus code re-calling add_device()
+ * without an intervening remove_device()/of_xlate() sequence, but
+ * we're not going to do so quietly...
+ */
+ if (WARN_ON_ONCE(fwspec->iommu_priv)) {
+ master = fwspec->iommu_priv;
+ smmu = master->smmu;
} else {
- smmu = smmu_group->smmu;
- }
+ smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ if (!smmu)
+ return -ENODEV;
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
- /* Assume SID == RID until firmware tells us otherwise */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
- for (i = 0; i < smmu_group->num_sids; ++i) {
- /* If we already know about this SID, then we're done */
- if (smmu_group->sids[i] == sid)
- goto out_put_group;
+ master->smmu = smmu;
+ fwspec->iommu_priv = master;
}
- /* Check the SID is in range of the SMMU and our stream table */
- if (!arm_smmu_sid_in_range(smmu, sid)) {
- ret = -ERANGE;
- goto out_remove_dev;
- }
+ /* Check the SIDs are in range of the SMMU and our stream table */
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u32 sid = fwspec->ids[i];
- /* Ensure l2 strtab is initialised */
- if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
- ret = arm_smmu_init_l2_strtab(smmu, sid);
- if (ret)
- goto out_remove_dev;
- }
+ if (!arm_smmu_sid_in_range(smmu, sid))
+ return -ERANGE;
- /* Resize the SID array for the group */
- smmu_group->num_sids++;
- sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids),
- GFP_KERNEL);
- if (!sids) {
- smmu_group->num_sids--;
- ret = -ENOMEM;
- goto out_remove_dev;
+ /* Ensure l2 strtab is initialised */
+ if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
+ ret = arm_smmu_init_l2_strtab(smmu, sid);
+ if (ret)
+ return ret;
+ }
}
- /* Add the new SID */
- sids[smmu_group->num_sids - 1] = sid;
- smmu_group->sids = sids;
-
-out_put_group:
- iommu_group_put(group);
- return 0;
+ group = iommu_group_get_for_dev(dev);
+ if (!IS_ERR(group))
+ iommu_group_put(group);
-out_remove_dev:
- iommu_group_remove_device(dev);
- iommu_group_put(group);
- return ret;
+ return PTR_ERR_OR_ZERO(group);
}
static void arm_smmu_remove_device(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_master_data *master;
+
+ if (!fwspec || fwspec->ops != &arm_smmu_ops)
+ return;
+
+ master = fwspec->iommu_priv;
+ if (master && master->ste.valid)
+ arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
+ kfree(master);
+ iommu_fwspec_free(dev);
+}
+
+static struct iommu_group *arm_smmu_device_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ /*
+ * We don't support devices sharing stream IDs other than PCI RID
+ * aliases, since the necessary ID-to-device lookup becomes rather
+ * impractical given a potential sparse 32-bit stream ID space.
+ */
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+
+ return group;
}
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
@@ -1937,6 +1875,11 @@ out_unlock:
return ret;
}
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ return iommu_fwspec_add_ids(dev, args->args, 1);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1948,9 +1891,10 @@ static struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
- .device_group = pci_device_group,
+ .device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -2151,6 +2095,24 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
1, ARM_SMMU_POLL_TIMEOUT_US);
}
+/* GBPA is "special" */
+static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
+{
+ int ret;
+ u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
+
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ reg &= ~clr;
+ reg |= set;
+ writel_relaxed(reg | GBPA_UPDATE, gbpa);
+ return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+}
+
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
@@ -2235,10 +2197,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
/* Request interrupt lines */
irq = smmu->evtq.q.irq;
if (irq) {
- ret = devm_request_threaded_irq(smmu->dev, irq,
- arm_smmu_evtq_handler,
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
arm_smmu_evtq_thread,
- 0, "arm-smmu-v3-evtq", smmu);
+ IRQF_ONESHOT,
+ "arm-smmu-v3-evtq", smmu);
if (ret < 0)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
@@ -2263,10 +2225,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_PRI) {
irq = smmu->priq.q.irq;
if (irq) {
- ret = devm_request_threaded_irq(smmu->dev, irq,
- arm_smmu_priq_handler,
+ ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
arm_smmu_priq_thread,
- 0, "arm-smmu-v3-priq",
+ IRQF_ONESHOT,
+ "arm-smmu-v3-priq",
smmu);
if (ret < 0)
dev_warn(smmu->dev,
@@ -2296,7 +2258,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
return ret;
}
-static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
+static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
{
int ret;
u32 reg, enables;
@@ -2397,8 +2359,17 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
return ret;
}
- /* Enable the SMMU interface */
- enables |= CR0_SMMUEN;
+
+ /* Enable the SMMU interface, or ensure bypass */
+ if (!bypass || disable_bypass) {
+ enables |= CR0_SMMUEN;
+ } else {
+ ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
+ if (ret) {
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
+ }
+ }
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
if (ret) {
@@ -2597,6 +2568,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
+ bool bypass = true;
+ u32 cells;
+
+ if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
+ dev_err(dev, "missing #iommu-cells property\n");
+ else if (cells != 1)
+ dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
+ else
+ bypass = false;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -2649,7 +2629,33 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smmu);
/* Reset the device */
- return arm_smmu_device_reset(smmu);
+ ret = arm_smmu_device_reset(smmu, bypass);
+ if (ret)
+ return ret;
+
+ /* And we're up. Go go go! */
+ of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+#ifdef CONFIG_PCI
+ if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
+ pci_request_acs();
+ ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+#endif
+#ifdef CONFIG_ARM_AMBA
+ if (amba_bustype.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+#endif
+ if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
+ ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -2677,22 +2683,14 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
- struct device_node *np;
- int ret;
-
- np = of_find_matching_node(NULL, arm_smmu_of_match);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = platform_driver_register(&arm_smmu_driver);
- if (ret)
- return ret;
-
- pci_request_acs();
+ static bool registered;
+ int ret = 0;
- return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (!registered) {
+ ret = platform_driver_register(&arm_smmu_driver);
+ registered = !ret;
+ }
+ return ret;
}
static void __exit arm_smmu_exit(void)
@@ -2703,6 +2701,20 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit);
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+ int ret = arm_smmu_init();
+
+ if (ret)
+ return ret;
+
+ if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
+ return -ENODEV;
+
+ return 0;
+}
+IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2db74ebc3240..8f7281444551 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
+#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
@@ -40,6 +41,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -49,15 +52,9 @@
#include "io-pgtable.h"
-/* Maximum number of stream IDs assigned to a single device */
-#define MAX_MASTER_STREAMIDS 128
-
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
-/* Maximum number of mapping groups per SMMU */
-#define ARM_SMMU_MAX_SMRS 128
-
/* SMMU global address space */
#define ARM_SMMU_GR0(smmu) ((smmu)->base)
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
@@ -165,21 +162,27 @@
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
#define SMR_VALID (1 << 31)
#define SMR_MASK_SHIFT 16
-#define SMR_MASK_MASK 0x7fff
#define SMR_ID_SHIFT 0
-#define SMR_ID_MASK 0x7fff
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff
#define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3
-#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
-#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
-#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
+enum arm_smmu_s2cr_type {
+ S2CR_TYPE_TRANS,
+ S2CR_TYPE_BYPASS,
+ S2CR_TYPE_FAULT,
+};
#define S2CR_PRIVCFG_SHIFT 24
-#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
+#define S2CR_PRIVCFG_MASK 0x3
+enum arm_smmu_s2cr_privcfg {
+ S2CR_PRIVCFG_DEFAULT,
+ S2CR_PRIVCFG_DIPAN,
+ S2CR_PRIVCFG_UNPRIV,
+ S2CR_PRIVCFG_PRIV,
+};
/* Context bank attribute registers */
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
@@ -217,6 +220,7 @@
#define ARM_SMMU_CB_TTBR0 0x20
#define ARM_SMMU_CB_TTBR1 0x28
#define ARM_SMMU_CB_TTBCR 0x30
+#define ARM_SMMU_CB_CONTEXTIDR 0x34
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
#define ARM_SMMU_CB_PAR 0x50
@@ -239,7 +243,6 @@
#define SCTLR_AFE (1 << 2)
#define SCTLR_TRE (1 << 1)
#define SCTLR_M (1 << 0)
-#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
@@ -296,23 +299,35 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2,
};
+struct arm_smmu_s2cr {
+ struct iommu_group *group;
+ int count;
+ enum arm_smmu_s2cr_type type;
+ enum arm_smmu_s2cr_privcfg privcfg;
+ u8 cbndx;
+};
+
+#define s2cr_init_val (struct arm_smmu_s2cr){ \
+ .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
+}
+
struct arm_smmu_smr {
- u8 idx;
u16 mask;
u16 id;
+ bool valid;
};
struct arm_smmu_master_cfg {
- int num_streamids;
- u16 streamids[MAX_MASTER_STREAMIDS];
- struct arm_smmu_smr *smrs;
-};
-
-struct arm_smmu_master {
- struct device_node *of_node;
- struct rb_node node;
- struct arm_smmu_master_cfg cfg;
+ struct arm_smmu_device *smmu;
+ s16 smendx[];
};
+#define INVALID_SMENDX -1
+#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
+#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+#define fwspec_smendx(fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
+#define for_each_cfg_sme(fw, i, idx) \
+ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
struct arm_smmu_device {
struct device *dev;
@@ -346,7 +361,11 @@ struct arm_smmu_device {
atomic_t irptndx;
u32 num_mapping_groups;
- DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+ u16 streamid_mask;
+ u16 smr_mask_mask;
+ struct arm_smmu_smr *smrs;
+ struct arm_smmu_s2cr *s2crs;
+ struct mutex stream_map_mutex;
unsigned long va_size;
unsigned long ipa_size;
@@ -357,9 +376,6 @@ struct arm_smmu_device {
u32 num_context_irqs;
unsigned int *irqs;
- struct list_head list;
- struct rb_root masters;
-
u32 cavium_id_base; /* Specific to Cavium */
};
@@ -397,15 +413,6 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
-struct arm_smmu_phandle_args {
- struct device_node *np;
- int args_count;
- uint32_t args[MAX_MASTER_STREAMIDS];
-};
-
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
-static LIST_HEAD(arm_smmu_devices);
-
struct arm_smmu_option_prop {
u32 opt;
const char *prop;
@@ -413,6 +420,8 @@ struct arm_smmu_option_prop {
static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
+static bool using_legacy_binding, using_generic_binding;
+
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ 0, NULL},
@@ -444,131 +453,86 @@ static struct device_node *dev_get_dev_node(struct device *dev)
while (!pci_is_root_bus(bus))
bus = bus->parent;
- return bus->bridge->parent->of_node;
+ return of_node_get(bus->bridge->parent->of_node);
}
- return dev->of_node;
+ return of_node_get(dev->of_node);
}
-static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
- struct device_node *dev_node)
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
{
- struct rb_node *node = smmu->masters.rb_node;
-
- while (node) {
- struct arm_smmu_master *master;
-
- master = container_of(node, struct arm_smmu_master, node);
-
- if (dev_node < master->of_node)
- node = node->rb_left;
- else if (dev_node > master->of_node)
- node = node->rb_right;
- else
- return master;
- }
-
- return NULL;
+ *((__be32 *)data) = cpu_to_be32(alias);
+ return 0; /* Continue walking */
}
-static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct device *dev)
+static int __find_legacy_master_phandle(struct device *dev, void *data)
{
- struct arm_smmu_master_cfg *cfg = NULL;
- struct iommu_group *group = iommu_group_get(dev);
-
- if (group) {
- cfg = iommu_group_get_iommudata(group);
- iommu_group_put(group);
- }
-
- return cfg;
+ struct of_phandle_iterator *it = *(void **)data;
+ struct device_node *np = it->node;
+ int err;
+
+ of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
+ "#stream-id-cells", 0)
+ if (it->node == np) {
+ *(void **)data = dev;
+ return 1;
+ }
+ it->node = np;
+ return err == -ENOENT ? 0 : err;
}
-static int insert_smmu_master(struct arm_smmu_device *smmu,
- struct arm_smmu_master *master)
+static struct platform_driver arm_smmu_driver;
+static struct iommu_ops arm_smmu_ops;
+
+static int arm_smmu_register_legacy_master(struct device *dev,
+ struct arm_smmu_device **smmu)
{
- struct rb_node **new, *parent;
-
- new = &smmu->masters.rb_node;
- parent = NULL;
- while (*new) {
- struct arm_smmu_master *this
- = container_of(*new, struct arm_smmu_master, node);
-
- parent = *new;
- if (master->of_node < this->of_node)
- new = &((*new)->rb_left);
- else if (master->of_node > this->of_node)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
+ struct device *smmu_dev;
+ struct device_node *np;
+ struct of_phandle_iterator it;
+ void *data = &it;
+ u32 *sids;
+ __be32 pci_sid;
+ int err;
+
+ np = dev_get_dev_node(dev);
+ if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
+ of_node_put(np);
+ return -ENODEV;
}
- rb_link_node(&master->node, parent, new);
- rb_insert_color(&master->node, &smmu->masters);
- return 0;
-}
-
-static int register_smmu_master(struct arm_smmu_device *smmu,
- struct device *dev,
- struct arm_smmu_phandle_args *masterspec)
-{
- int i;
- struct arm_smmu_master *master;
+ it.node = np;
+ err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
+ __find_legacy_master_phandle);
+ smmu_dev = data;
+ of_node_put(np);
+ if (err == 0)
+ return -ENODEV;
+ if (err < 0)
+ return err;
- master = find_smmu_master(smmu, masterspec->np);
- if (master) {
- dev_err(dev,
- "rejecting multiple registrations for master device %s\n",
- masterspec->np->name);
- return -EBUSY;
+ if (dev_is_pci(dev)) {
+ /* "mmu-masters" assumes Stream ID == Requester ID */
+ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
+ &pci_sid);
+ it.cur = &pci_sid;
+ it.cur_count = 1;
}
- if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
- dev_err(dev,
- "reached maximum number (%d) of stream IDs for master device %s\n",
- MAX_MASTER_STREAMIDS, masterspec->np->name);
- return -ENOSPC;
- }
+ err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
+ &arm_smmu_ops);
+ if (err)
+ return err;
- master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
- if (!master)
+ sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
+ if (!sids)
return -ENOMEM;
- master->of_node = masterspec->np;
- master->cfg.num_streamids = masterspec->args_count;
-
- for (i = 0; i < master->cfg.num_streamids; ++i) {
- u16 streamid = masterspec->args[i];
-
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
- (streamid >= smmu->num_mapping_groups)) {
- dev_err(dev,
- "stream ID for master device %s greater than maximum allowed (%d)\n",
- masterspec->np->name, smmu->num_mapping_groups);
- return -ERANGE;
- }
- master->cfg.streamids[i] = streamid;
- }
- return insert_smmu_master(smmu, master);
-}
-
-static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
-{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master *master = NULL;
- struct device_node *dev_node = dev_get_dev_node(dev);
-
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(smmu, &arm_smmu_devices, list) {
- master = find_smmu_master(smmu, dev_node);
- if (master)
- break;
- }
- spin_unlock(&arm_smmu_devices_lock);
-
- return master ? smmu : NULL;
+ *smmu = dev_get_drvdata(smmu_dev);
+ of_phandle_iterator_args(&it, sids, it.cur_count);
+ err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
+ kfree(sids);
+ return err;
}
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -738,7 +702,7 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
- u32 reg;
+ u32 reg, reg2;
u64 reg64;
bool stage1;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
@@ -781,14 +745,22 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
/* TTBRs */
if (stage1) {
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-
- reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
-
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+ u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
+
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
+ writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
+ } else {
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+ }
} else {
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
@@ -796,28 +768,36 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
/* TTBCR */
if (stage1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
- if (smmu->version > ARM_SMMU_V1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- reg |= TTBCR2_SEP_UPSTREAM;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ reg = pgtbl_cfg->arm_v7s_cfg.tcr;
+ reg2 = 0;
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+ reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+ reg2 |= TTBCR2_SEP_UPSTREAM;
}
+ if (smmu->version > ARM_SMMU_V1)
+ writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
} else {
reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
}
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
/* MAIRs (stage-1 only) */
if (stage1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ reg = pgtbl_cfg->arm_v7s_cfg.prrr;
+ reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ }
writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
+ writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
}
/* SCTLR */
- reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
@@ -841,12 +821,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- /* We're bypassing these SIDs, so don't allocate an actual context */
- if (domain->type == IOMMU_DOMAIN_DMA) {
- smmu_domain->smmu = smmu;
- goto out_unlock;
- }
-
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -880,6 +854,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
*/
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+ if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
+ !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
+ (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
+ (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
(smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
ARM_SMMU_FEAT_FMT_AARCH64_16K |
@@ -899,10 +878,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = smmu->ipa_size;
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
fmt = ARM_64_LPAE_S1;
- } else {
+ } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
fmt = ARM_32_LPAE_S1;
ias = min(ias, 32UL);
oas = min(oas, 40UL);
+ } else {
+ fmt = ARM_V7S;
+ ias = min(ias, 32UL);
+ oas = min(oas, 32UL);
}
break;
case ARM_SMMU_DOMAIN_NESTED:
@@ -958,6 +941,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
/* Update the domain's page sizes to reflect the page table format */
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->geometry.aperture_end = (1UL << ias) - 1;
+ domain->geometry.force_aperture = true;
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
@@ -996,7 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
+ if (!smmu)
return;
/*
@@ -1030,8 +1015,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
- if (type == IOMMU_DOMAIN_DMA &&
- iommu_get_dma_cookie(&smmu_domain->domain)) {
+ if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
+ iommu_get_dma_cookie(&smmu_domain->domain))) {
kfree(smmu_domain);
return NULL;
}
@@ -1055,162 +1040,207 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
{
- int i;
- struct arm_smmu_smr *smrs;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ struct arm_smmu_smr *smr = smmu->smrs + idx;
+ u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
- return 0;
+ if (smr->valid)
+ reg |= SMR_VALID;
+ writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
+}
- if (cfg->smrs)
- return -EEXIST;
+static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
+{
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
+ u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
+ (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
+ (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
- smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
- if (!smrs) {
- dev_err(smmu->dev, "failed to allocate %d SMRs\n",
- cfg->num_streamids);
- return -ENOMEM;
- }
+ writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
+}
- /* Allocate the SMRs on the SMMU */
- for (i = 0; i < cfg->num_streamids; ++i) {
- int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
- smmu->num_mapping_groups);
- if (idx < 0) {
- dev_err(smmu->dev, "failed to allocate free SMR\n");
- goto err_free_smrs;
- }
+static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
+{
+ arm_smmu_write_s2cr(smmu, idx);
+ if (smmu->smrs)
+ arm_smmu_write_smr(smmu, idx);
+}
- smrs[i] = (struct arm_smmu_smr) {
- .idx = idx,
- .mask = 0, /* We don't currently share SMRs */
- .id = cfg->streamids[i],
- };
- }
+static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
+{
+ struct arm_smmu_smr *smrs = smmu->smrs;
+ int i, free_idx = -ENOSPC;
- /* It worked! Now, poke the actual hardware */
- for (i = 0; i < cfg->num_streamids; ++i) {
- u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
- smrs[i].mask << SMR_MASK_SHIFT;
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
- }
+ /* Stream indexing is blissfully easy */
+ if (!smrs)
+ return id;
- cfg->smrs = smrs;
- return 0;
+ /* Validating SMRs is... less so */
+ for (i = 0; i < smmu->num_mapping_groups; ++i) {
+ if (!smrs[i].valid) {
+ /*
+ * Note the first free entry we come across, which
+ * we'll claim in the end if nothing else matches.
+ */
+ if (free_idx < 0)
+ free_idx = i;
+ continue;
+ }
+ /*
+ * If the new entry is _entirely_ matched by an existing entry,
+ * then reuse that, with the guarantee that there also cannot
+ * be any subsequent conflicting entries. In normal use we'd
+ * expect simply identical entries for this case, but there's
+ * no harm in accommodating the generalisation.
+ */
+ if ((mask & smrs[i].mask) == mask &&
+ !((id ^ smrs[i].id) & ~smrs[i].mask))
+ return i;
+ /*
+ * If the new entry has any other overlap with an existing one,
+ * though, then there always exists at least one stream ID
+ * which would cause a conflict, and we can't allow that risk.
+ */
+ if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
+ return -EINVAL;
+ }
-err_free_smrs:
- while (--i >= 0)
- __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
- kfree(smrs);
- return -ENOSPC;
+ return free_idx;
}
-static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
{
- int i;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- struct arm_smmu_smr *smrs = cfg->smrs;
-
- if (!smrs)
- return;
+ if (--smmu->s2crs[idx].count)
+ return false;
- /* Invalidate the SMRs before freeing back to the allocator */
- for (i = 0; i < cfg->num_streamids; ++i) {
- u8 idx = smrs[i].idx;
+ smmu->s2crs[idx] = s2cr_init_val;
+ if (smmu->smrs)
+ smmu->smrs[idx].valid = false;
- writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
- __arm_smmu_free_bitmap(smmu->smr_map, idx);
- }
-
- cfg->smrs = NULL;
- kfree(smrs);
+ return true;
}
-static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg)
+static int arm_smmu_master_alloc_smes(struct device *dev)
{
- int i, ret;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ struct arm_smmu_device *smmu = cfg->smmu;
+ struct arm_smmu_smr *smrs = smmu->smrs;
+ struct iommu_group *group;
+ int i, idx, ret;
- /*
- * FIXME: This won't be needed once we have IOMMU-backed DMA ops
- * for all devices behind the SMMU. Note that we need to take
- * care configuring SMRs for devices both a platform_device and
- * and a PCI device (i.e. a PCI host controller)
- */
- if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
- return 0;
+ mutex_lock(&smmu->stream_map_mutex);
+ /* Figure out a viable stream map entry allocation */
+ for_each_cfg_sme(fwspec, i, idx) {
+ u16 sid = fwspec->ids[i];
+ u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
- /* Devices in an IOMMU group may already be configured */
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
- if (ret)
- return ret == -EEXIST ? 0 : ret;
+ if (idx != INVALID_SMENDX) {
+ ret = -EEXIST;
+ goto out_err;
+ }
+
+ ret = arm_smmu_find_sme(smmu, sid, mask);
+ if (ret < 0)
+ goto out_err;
+
+ idx = ret;
+ if (smrs && smmu->s2crs[idx].count == 0) {
+ smrs[idx].id = sid;
+ smrs[idx].mask = mask;
+ smrs[idx].valid = true;
+ }
+ smmu->s2crs[idx].count++;
+ cfg->smendx[i] = (s16)idx;
+ }
- for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx, s2cr;
+ group = iommu_group_get_for_dev(dev);
+ if (!group)
+ group = ERR_PTR(-ENOMEM);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto out_err;
+ }
+ iommu_group_put(group);
- idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
- (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
- writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ /* It worked! Now, poke the actual hardware */
+ for_each_cfg_sme(fwspec, i, idx) {
+ arm_smmu_write_sme(smmu, idx);
+ smmu->s2crs[idx].group = group;
}
+ mutex_unlock(&smmu->stream_map_mutex);
return 0;
+
+out_err:
+ while (i--) {
+ arm_smmu_free_sme(smmu, cfg->smendx[i]);
+ cfg->smendx[i] = INVALID_SMENDX;
+ }
+ mutex_unlock(&smmu->stream_map_mutex);
+ return ret;
}
-static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
{
- int i;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- /* An IOMMU group is torn down by the first device to be removed */
- if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
- return;
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+ struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ int i, idx;
- /*
- * We *must* clear the S2CR first, because freeing the SMR means
- * that it can be re-allocated immediately.
- */
- for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
-
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ mutex_lock(&smmu->stream_map_mutex);
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (arm_smmu_free_sme(smmu, idx))
+ arm_smmu_write_sme(smmu, idx);
+ cfg->smendx[i] = INVALID_SMENDX;
}
-
- arm_smmu_master_free_smrs(smmu, cfg);
+ mutex_unlock(&smmu->stream_map_mutex);
}
-static void arm_smmu_detach_dev(struct device *dev,
- struct arm_smmu_master_cfg *cfg)
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ struct iommu_fwspec *fwspec)
{
- struct iommu_domain *domain = dev->archdata.iommu;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct arm_smmu_s2cr *s2cr = smmu->s2crs;
+ enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
+ u8 cbndx = smmu_domain->cfg.cbndx;
+ int i, idx;
- dev->archdata.iommu = NULL;
- arm_smmu_domain_remove_master(smmu_domain, cfg);
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
+ continue;
+
+ s2cr[idx].type = type;
+ s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
+ s2cr[idx].cbndx = cbndx;
+ arm_smmu_write_s2cr(smmu, idx);
+ }
+ return 0;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- smmu = find_smmu_for_device(dev);
- if (!smmu) {
+ if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO;
}
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and add_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ if (!fwspec->iommu_priv)
+ return -ENODEV;
+
+ smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (ret < 0)
@@ -1228,18 +1258,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- cfg = find_smmu_master_cfg(dev);
- if (!cfg)
- return -ENODEV;
-
- /* Detach the dev from its current domain */
- if (dev->archdata.iommu)
- arm_smmu_detach_dev(dev, cfg);
-
- ret = arm_smmu_domain_add_master(smmu_domain, cfg);
- if (!ret)
- dev->archdata.iommu = domain;
- return ret;
+ return arm_smmu_domain_add_master(smmu_domain, fwspec);
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
@@ -1358,110 +1377,113 @@ static bool arm_smmu_capable(enum iommu_cap cap)
}
}
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
+static int arm_smmu_match_node(struct device *dev, void *data)
{
- *((u16 *)data) = alias;
- return 0; /* Continue walking */
+ return dev->of_node == data;
}
-static void __arm_smmu_release_pci_iommudata(void *data)
+static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
{
- kfree(data);
+ struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
+ np, arm_smmu_match_node);
+ put_device(dev);
+ return dev ? dev_get_drvdata(dev) : NULL;
}
-static int arm_smmu_init_pci_device(struct pci_dev *pdev,
- struct iommu_group *group)
+static int arm_smmu_add_device(struct device *dev)
{
+ struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
- u16 sid;
- int i;
-
- cfg = iommu_group_get_iommudata(group);
- if (!cfg) {
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ int i, ret;
- iommu_group_set_iommudata(group, cfg,
- __arm_smmu_release_pci_iommudata);
+ if (using_legacy_binding) {
+ ret = arm_smmu_register_legacy_master(dev, &smmu);
+ fwspec = dev->iommu_fwspec;
+ if (ret)
+ goto out_free;
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
+ smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ } else {
+ return -ENODEV;
}
- if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
- return -ENOSPC;
-
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
- for (i = 0; i < cfg->num_streamids; ++i)
- if (cfg->streamids[i] == sid)
- break;
-
- /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
- if (i == cfg->num_streamids)
- cfg->streamids[cfg->num_streamids++] = sid;
+ ret = -EINVAL;
+ for (i = 0; i < fwspec->num_ids; i++) {
+ u16 sid = fwspec->ids[i];
+ u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
- return 0;
-}
-
-static int arm_smmu_init_platform_device(struct device *dev,
- struct iommu_group *group)
-{
- struct arm_smmu_device *smmu = find_smmu_for_device(dev);
- struct arm_smmu_master *master;
+ if (sid & ~smmu->streamid_mask) {
+ dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
+ sid, smmu->streamid_mask);
+ goto out_free;
+ }
+ if (mask & ~smmu->smr_mask_mask) {
+ dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
+ sid, smmu->smr_mask_mask);
+ goto out_free;
+ }
+ }
- if (!smmu)
- return -ENODEV;
+ ret = -ENOMEM;
+ cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
+ GFP_KERNEL);
+ if (!cfg)
+ goto out_free;
- master = find_smmu_master(smmu, dev->of_node);
- if (!master)
- return -ENODEV;
+ cfg->smmu = smmu;
+ fwspec->iommu_priv = cfg;
+ while (i--)
+ cfg->smendx[i] = INVALID_SMENDX;
- iommu_group_set_iommudata(group, &master->cfg, NULL);
+ ret = arm_smmu_master_alloc_smes(dev);
+ if (ret)
+ goto out_free;
return 0;
-}
-static int arm_smmu_add_device(struct device *dev)
-{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- return 0;
+out_free:
+ if (fwspec)
+ kfree(fwspec->iommu_priv);
+ iommu_fwspec_free(dev);
+ return ret;
}
static void arm_smmu_remove_device(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (!fwspec || fwspec->ops != &arm_smmu_ops)
+ return;
+
+ arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev);
+ kfree(fwspec->iommu_priv);
+ iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
- struct iommu_group *group;
- int ret;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+ struct iommu_group *group = NULL;
+ int i, idx;
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
- else
- group = generic_device_group(dev);
+ for_each_cfg_sme(fwspec, i, idx) {
+ if (group && smmu->s2crs[idx].group &&
+ group != smmu->s2crs[idx].group)
+ return ERR_PTR(-EINVAL);
+
+ group = smmu->s2crs[idx].group;
+ }
- if (IS_ERR(group))
+ if (group)
return group;
if (dev_is_pci(dev))
- ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
+ group = pci_device_group(dev);
else
- ret = arm_smmu_init_platform_device(dev, group);
-
- if (ret) {
- iommu_group_put(group);
- group = ERR_PTR(ret);
- }
+ group = generic_device_group(dev);
return group;
}
@@ -1510,6 +1532,19 @@ out_unlock:
return ret;
}
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ u32 fwid = 0;
+
+ if (args->args_count > 0)
+ fwid |= (u16)args->args[0];
+
+ if (args->args_count > 1)
+ fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+
+ return iommu_fwspec_add_ids(dev, &fwid, 1);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1524,6 +1559,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1531,19 +1567,19 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
{
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
void __iomem *cb_base;
- int i = 0;
+ int i;
u32 reg, major;
/* clear global FSR */
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
- /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
- reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
- for (i = 0; i < smmu->num_mapping_groups; ++i) {
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
- }
+ /*
+ * Reset stream mapping groups: Initial values mark all SMRn as
+ * invalid and all S2CRn as bypass unless overridden.
+ */
+ for (i = 0; i < smmu->num_mapping_groups; ++i)
+ arm_smmu_write_sme(smmu, i);
/*
* Before clearing ARM_MMU500_ACTLR_CPRE, need to
@@ -1632,6 +1668,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
bool cttw_dt, cttw_reg;
+ int i;
dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n",
@@ -1690,39 +1727,55 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev,
"\t(IDR0.CTTW overridden by dma-coherent property)\n");
+ /* Max. number of entries we have for stream matching/indexing */
+ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
- u32 smr, sid, mask;
+ u32 smr;
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
- smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
- ID0_NUMSMRG_MASK;
- if (smmu->num_mapping_groups == 0) {
+ size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
+ if (size == 0) {
dev_err(smmu->dev,
"stream-matching supported, but no SMRs present!\n");
return -ENODEV;
}
- smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
- smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+ /*
+ * SMR.ID bits may not be preserved if the corresponding MASK
+ * bits are set, so check each one separately. We can reject
+ * masters later if they try to claim IDs outside these masks.
+ */
+ smr = smmu->streamid_mask << SMR_ID_SHIFT;
writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->streamid_mask = smr >> SMR_ID_SHIFT;
- mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
- sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
- if ((mask & sid) != sid) {
- dev_err(smmu->dev,
- "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
- mask, sid);
- return -ENODEV;
- }
+ smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+
+ /* Zero-initialised to mark as invalid */
+ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
+ GFP_KERNEL);
+ if (!smmu->smrs)
+ return -ENOMEM;
dev_notice(smmu->dev,
- "\tstream matching with %u register groups, mask 0x%x",
- smmu->num_mapping_groups, mask);
- } else {
- smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
- ID0_NUMSIDB_MASK;
+ "\tstream matching with %lu register groups, mask 0x%x",
+ size, smmu->smr_mask_mask);
}
+ /* s2cr->type == 0 means translation, so initialise explicitly */
+ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
+ GFP_KERNEL);
+ if (!smmu->s2crs)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ smmu->s2crs[i] = s2cr_init_val;
+
+ smmu->num_mapping_groups = size;
+ mutex_init(&smmu->stream_map_mutex);
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
@@ -1855,15 +1908,24 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
const struct arm_smmu_match_data *data;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- struct rb_node *node;
- struct of_phandle_iterator it;
- struct arm_smmu_phandle_args *masterspec;
int num_irqs, i, err;
+ bool legacy_binding;
+
+ legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
+ if (legacy_binding && !using_generic_binding) {
+ if (!using_legacy_binding)
+ pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
+ using_legacy_binding = true;
+ } else if (!legacy_binding && !using_legacy_binding) {
+ using_generic_binding = true;
+ } else {
+ dev_err(dev, "not probing due to mismatched DT properties\n");
+ return -ENODEV;
+ }
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
@@ -1872,8 +1934,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
smmu->dev = dev;
- of_id = of_match_node(arm_smmu_of_match, dev->of_node);
- data = of_id->data;
+ data = of_device_get_match_data(dev);
smmu->version = data->version;
smmu->model = data->model;
@@ -1923,37 +1984,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err)
return err;
- i = 0;
- smmu->masters = RB_ROOT;
-
- err = -ENOMEM;
- /* No need to zero the memory for masterspec */
- masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
- if (!masterspec)
- goto out_put_masters;
-
- of_for_each_phandle(&it, err, dev->of_node,
- "mmu-masters", "#stream-id-cells", 0) {
- int count = of_phandle_iterator_args(&it, masterspec->args,
- MAX_MASTER_STREAMIDS);
- masterspec->np = of_node_get(it.node);
- masterspec->args_count = count;
-
- err = register_smmu_master(smmu, dev, masterspec);
- if (err) {
- dev_err(dev, "failed to add master %s\n",
- masterspec->np->name);
- kfree(masterspec);
- goto out_put_masters;
- }
-
- i++;
- }
-
- dev_notice(dev, "registered %d master devices\n", i);
-
- kfree(masterspec);
-
parse_driver_options(smmu);
if (smmu->version == ARM_SMMU_V2 &&
@@ -1961,8 +1991,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
- err = -ENODEV;
- goto out_put_masters;
+ return -ENODEV;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -1974,59 +2003,39 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]);
- goto out_put_masters;
+ return err;
}
}
- INIT_LIST_HEAD(&smmu->list);
- spin_lock(&arm_smmu_devices_lock);
- list_add(&smmu->list, &arm_smmu_devices);
- spin_unlock(&arm_smmu_devices_lock);
-
+ of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+ platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
- return 0;
-out_put_masters:
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
+ /* Oh, for a proper bus abstraction */
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
+ if (!iommu_present(&amba_bustype))
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+#ifdef CONFIG_PCI
+ if (!iommu_present(&pci_bus_type)) {
+ pci_request_acs();
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
-
- return err;
+#endif
+ return 0;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
{
- int i;
- struct device *dev = &pdev->dev;
- struct arm_smmu_device *curr, *smmu = NULL;
- struct rb_node *node;
-
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(curr, &arm_smmu_devices, list) {
- if (curr->dev == dev) {
- smmu = curr;
- list_del(&smmu->list);
- break;
- }
- }
- spin_unlock(&arm_smmu_devices_lock);
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
if (!smmu)
return -ENODEV;
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
- }
-
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
- dev_err(dev, "removing device with active domains!\n");
-
- for (i = 0; i < smmu->num_global_irqs; ++i)
- devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
+ dev_err(&pdev->dev, "removing device with active domains!\n");
/* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -2044,41 +2053,14 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
- struct device_node *np;
- int ret;
-
- /*
- * Play nice with systems that don't have an ARM SMMU by checking that
- * an ARM SMMU exists in the system before proceeding with the driver
- * and IOMMU bus operation registration.
- */
- np = of_find_matching_node(NULL, arm_smmu_of_match);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = platform_driver_register(&arm_smmu_driver);
- if (ret)
- return ret;
-
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype))
- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-#endif
+ static bool registered;
+ int ret = 0;
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- pci_request_acs();
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ if (!registered) {
+ ret = platform_driver_register(&arm_smmu_driver);
+ registered = !ret;
}
-#endif
-
- return 0;
+ return ret;
}
static void __exit arm_smmu_exit(void)
@@ -2089,6 +2071,25 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit);
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+ int ret = arm_smmu_init();
+
+ if (ret)
+ return ret;
+
+ if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
+ return -ENODEV;
+
+ return 0;
+}
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 00c8a08d56e7..c5ab8667e6f2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -25,10 +25,29 @@
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
+#include <linux/irq.h>
#include <linux/mm.h>
+#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+struct iommu_dma_msi_page {
+ struct list_head list;
+ dma_addr_t iova;
+ phys_addr_t phys;
+};
+
+struct iommu_dma_cookie {
+ struct iova_domain iovad;
+ struct list_head msi_page_list;
+ spinlock_t msi_lock;
+};
+
+static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
+{
+ return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+}
+
int iommu_dma_init(void)
{
return iova_cache_get();
@@ -43,15 +62,19 @@ int iommu_dma_init(void)
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
- struct iova_domain *iovad;
+ struct iommu_dma_cookie *cookie;
if (domain->iova_cookie)
return -EEXIST;
- iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
- domain->iova_cookie = iovad;
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return -ENOMEM;
- return iovad ? 0 : -ENOMEM;
+ spin_lock_init(&cookie->msi_lock);
+ INIT_LIST_HEAD(&cookie->msi_page_list);
+ domain->iova_cookie = cookie;
+ return 0;
}
EXPORT_SYMBOL(iommu_get_dma_cookie);
@@ -63,32 +86,58 @@ EXPORT_SYMBOL(iommu_get_dma_cookie);
*/
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iommu_dma_msi_page *msi, *tmp;
- if (!iovad)
+ if (!cookie)
return;
- if (iovad->granule)
- put_iova_domain(iovad);
- kfree(iovad);
+ if (cookie->iovad.granule)
+ put_iova_domain(&cookie->iovad);
+
+ list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
+ list_del(&msi->list);
+ kfree(msi);
+ }
+ kfree(cookie);
domain->iova_cookie = NULL;
}
EXPORT_SYMBOL(iommu_put_dma_cookie);
+static void iova_reserve_pci_windows(struct pci_dev *dev,
+ struct iova_domain *iovad)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ struct resource_entry *window;
+ unsigned long lo, hi;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_type(window->res) != IORESOURCE_MEM &&
+ resource_type(window->res) != IORESOURCE_IO)
+ continue;
+
+ lo = iova_pfn(iovad, window->res->start - window->offset);
+ hi = iova_pfn(iovad, window->res->end - window->offset);
+ reserve_iova(iovad, lo, hi);
+ }
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts
* @size: Size of IOVA space
+ * @dev: Device the domain is being initialised for
*
* @base and @size should be exact multiples of IOMMU page granularity to
* avoid rounding surprises. If necessary, we reserve the page at address 0
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
+int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+ u64 size, struct device *dev)
{
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
unsigned long order, base_pfn, end_pfn;
if (!iovad)
@@ -124,6 +173,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
iovad->dma_32bit_pfn = end_pfn;
} else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ if (dev && dev_is_pci(dev))
+ iova_reserve_pci_windows(to_pci_dev(dev), iovad);
}
return 0;
}
@@ -155,7 +206,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
dma_addr_t dma_limit)
{
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
@@ -171,7 +222,7 @@ static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
{
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
unsigned long pfn = dma_addr >> shift;
struct iova *iova = find_iova(iovad, pfn);
@@ -294,7 +345,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova;
struct page **pages;
struct sg_table sgt;
@@ -386,7 +437,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
@@ -495,7 +546,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = domain->iova_cookie;
+ struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova;
struct scatterlist *s, *prev = NULL;
dma_addr_t dma_addr;
@@ -587,3 +638,81 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == DMA_ERROR_CODE;
}
+
+static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
+ phys_addr_t msi_addr, struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iommu_dma_msi_page *msi_page;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iova *iova;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ msi_addr &= ~(phys_addr_t)iova_mask(iovad);
+ list_for_each_entry(msi_page, &cookie->msi_page_list, list)
+ if (msi_page->phys == msi_addr)
+ return msi_page;
+
+ msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+ if (!msi_page)
+ return NULL;
+
+ iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
+ if (!iova)
+ goto out_free_page;
+
+ msi_page->phys = msi_addr;
+ msi_page->iova = iova_dma_addr(iovad, iova);
+ if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
+ goto out_free_iova;
+
+ INIT_LIST_HEAD(&msi_page->list);
+ list_add(&msi_page->list, &cookie->msi_page_list);
+ return msi_page;
+
+out_free_iova:
+ __free_iova(iovad, iova);
+out_free_page:
+ kfree(msi_page);
+ return NULL;
+}
+
+void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_dma_cookie *cookie;
+ struct iommu_dma_msi_page *msi_page;
+ phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
+ unsigned long flags;
+
+ if (!domain || !domain->iova_cookie)
+ return;
+
+ cookie = domain->iova_cookie;
+
+ /*
+ * We disable IRQs to rule out a possible inversion against
+ * irq_desc_lock if, say, someone tries to retarget the affinity
+ * of an MSI from within an IPI handler.
+ */
+ spin_lock_irqsave(&cookie->msi_lock, flags);
+ msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+ spin_unlock_irqrestore(&cookie->msi_lock, flags);
+
+ if (WARN_ON(!msi_page)) {
+ /*
+ * We're called from a void callback, so the best we can do is
+ * 'fail' by filling the message with obviously bogus values.
+ * Since we got this far due to an IOMMU being present, it's
+ * not like the existing address would have worked anyway...
+ */
+ msg->address_hi = ~0U;
+ msg->address_lo = ~0U;
+ msg->data = ~0U;
+ } else {
+ msg->address_hi = upper_32_bits(msi_page->iova);
+ msg->address_lo &= iova_mask(&cookie->iovad);
+ msg->address_lo += lower_32_bits(msi_page->iova);
+ }
+}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 33dcc29ec200..30808e91b775 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1345,8 +1345,8 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
exynos_iommu_init();
pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ if (!pdev)
+ return -ENODEV;
/*
* use the first registered sysmmu device for performing
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ebb5bf3ddbd9..3965e73db51c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1711,6 +1711,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
+again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
@@ -1723,10 +1724,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain;
- dmar_remove_one_dev_info(domain, info->dev);
+ __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain))
+ if (!domain_type_is_vm_or_si(domain)) {
+ /*
+ * The domain_exit() function can't be called under
+ * device_domain_lock, as it takes this lock itself.
+ * So release the lock here and re-run the loop
+ * afterwards.
+ */
+ spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
+ goto again;
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -2452,20 +2462,15 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
-/* domain is initialized */
-static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
+static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info = NULL;
- struct dmar_domain *domain, *tmp;
+ struct dmar_domain *domain = NULL;
struct intel_iommu *iommu;
u16 req_id, dma_alias;
unsigned long flags;
u8 bus, devfn;
- domain = find_domain(dev);
- if (domain)
- return domain;
-
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return NULL;
@@ -2487,9 +2492,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
spin_unlock_irqrestore(&device_domain_lock, flags);
- /* DMA alias already has a domain, uses it */
+ /* DMA alias already has a domain, use it */
if (info)
- goto found_domain;
+ goto out;
}
/* Allocate and initialize new domain for the device */
@@ -2501,28 +2506,67 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
return NULL;
}
- /* register PCI DMA alias device */
- if (dev_is_pci(dev) && req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
+out:
- if (!tmp || tmp != domain) {
- domain_exit(domain);
- domain = tmp;
- }
+ return domain;
+}
- if (!domain)
- return NULL;
+static struct dmar_domain *set_domain_for_dev(struct device *dev,
+ struct dmar_domain *domain)
+{
+ struct intel_iommu *iommu;
+ struct dmar_domain *tmp;
+ u16 req_id, dma_alias;
+ u8 bus, devfn;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return NULL;
+
+ req_id = ((u16)bus << 8) | devfn;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
+
+ /* register PCI DMA alias device */
+ if (req_id != dma_alias) {
+ tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+ dma_alias & 0xff, NULL, domain);
+
+ if (!tmp || tmp != domain)
+ return tmp;
+ }
}
-found_domain:
tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
+ if (!tmp || tmp != domain)
+ return tmp;
+
+ return domain;
+}
+
+static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
+{
+ struct dmar_domain *domain, *tmp;
+
+ domain = find_domain(dev);
+ if (domain)
+ goto out;
- if (!tmp || tmp != domain) {
+ domain = find_or_alloc_domain(dev, gaw);
+ if (!domain)
+ goto out;
+
+ tmp = set_domain_for_dev(dev, domain);
+ if (!tmp || domain != tmp) {
domain_exit(domain);
domain = tmp;
}
+out:
+
return domain;
}
@@ -3394,17 +3438,18 @@ static unsigned long intel_alloc_iova(struct device *dev,
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
{
+ struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr;
- struct dmar_domain *domain;
struct device *i_dev;
int i, ret;
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain) {
- pr_err("Allocating domain for %s failed\n",
- dev_name(dev));
- return NULL;
- }
+ domain = find_domain(dev);
+ if (domain)
+ goto out;
+
+ domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain)
+ goto out;
/* We have a new domain - setup possible RMRRs for the device */
rcu_read_lock();
@@ -3423,6 +3468,18 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
}
rcu_read_unlock();
+ tmp = set_domain_for_dev(dev, domain);
+ if (!tmp || domain != tmp) {
+ domain_exit(domain);
+ domain = tmp;
+ }
+
+out:
+
+ if (!domain)
+ pr_err("Allocating domain for %s failed\n", dev_name(dev));
+
+
return domain;
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index def8ca1c982d..f50e51c1a9c8 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -633,6 +633,10 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
{
struct arm_v7s_io_pgtable *data;
+#ifdef PHYS_OFFSET
+ if (upper_32_bits(PHYS_OFFSET))
+ return NULL;
+#endif
if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
return NULL;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b06d93594436..9a2f1960873b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/property.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -1613,3 +1614,60 @@ out:
return ret;
}
+
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
+ const struct iommu_ops *ops)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (fwspec)
+ return ops == fwspec->ops ? 0 : -EINVAL;
+
+ fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return -ENOMEM;
+
+ of_node_get(to_of_node(iommu_fwnode));
+ fwspec->iommu_fwnode = iommu_fwnode;
+ fwspec->ops = ops;
+ dev->iommu_fwspec = fwspec;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_init);
+
+void iommu_fwspec_free(struct device *dev)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (fwspec) {
+ fwnode_handle_put(fwspec->iommu_fwnode);
+ kfree(fwspec);
+ dev->iommu_fwspec = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_free);
+
+int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
+{
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ size_t size;
+ int i;
+
+ if (!fwspec)
+ return -EINVAL;
+
+ size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
+ if (size > sizeof(*fwspec)) {
+ fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
+ if (!fwspec)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_ids; i++)
+ fwspec->ids[fwspec->num_ids + i] = ids[i];
+
+ fwspec->num_ids += num_ids;
+ dev->iommu_fwspec = fwspec;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2fdbac67a77f..ace331da6459 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -636,7 +636,7 @@ static int ipmmu_add_device(struct device *dev)
spin_unlock(&ipmmu_devices_lock);
if (ret < 0)
- return -ENODEV;
+ goto error;
for (i = 0; i < num_utlbs; ++i) {
if (utlbs[i] >= mmu->num_utlbs) {
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 57f23eaaa2f9..5b82862f571f 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -22,6 +22,7 @@
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/of_pci.h>
#include <linux/slab.h>
static const struct of_device_id __iommu_of_table_sentinel
@@ -134,6 +135,47 @@ const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct of_phandle_args *iommu_spec = data;
+
+ iommu_spec->args[0] = alias;
+ return iommu_spec->np == pdev->bus->dev.of_node;
+}
+
+static const struct iommu_ops
+*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np)
+{
+ const struct iommu_ops *ops;
+ struct of_phandle_args iommu_spec;
+
+ /*
+ * Start by tracing the RID alias down the PCI topology as
+ * far as the host bridge whose OF node we have...
+ * (we're not even attempting to handle multi-alias devices yet)
+ */
+ iommu_spec.args_count = 1;
+ iommu_spec.np = bridge_np;
+ pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec);
+ /*
+ * ...then find out what that becomes once it escapes the PCI
+ * bus into the system beyond, and which IOMMU it ends up at.
+ */
+ iommu_spec.np = NULL;
+ if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
+ "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
+ return NULL;
+
+ ops = of_iommu_get_ops(iommu_spec.np);
+ if (!ops || !ops->of_xlate ||
+ iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
+ ops->of_xlate(&pdev->dev, &iommu_spec))
+ ops = NULL;
+
+ of_node_put(iommu_spec.np);
+ return ops;
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -142,12 +184,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
const struct iommu_ops *ops = NULL;
int idx = 0;
- /*
- * We can't do much for PCI devices without knowing how
- * device IDs are wired up from the PCI bus to the IOMMU.
- */
if (dev_is_pci(dev))
- return NULL;
+ return of_pci_iommu_configure(to_pci_dev(dev), master_np);
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -160,7 +198,9 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops || !ops->of_xlate ||
+ iommu_fwspec_init(dev, &np->fwnode, ops) ||
+ ops->of_xlate(dev, &iommu_spec))
goto err_put_node;
of_node_put(np);
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index c0e7b624ce54..12102448fddd 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
idev->id_vendor, idev->id_device);
}
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 82b0b5daf3f5..bc0af3307bbf 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -158,8 +158,8 @@ config PIC32_EVIC
select IRQ_DOMAIN
config JCORE_AIC
- bool "J-Core integrated AIC"
- depends on OF && (SUPERH || COMPILE_TEST)
+ bool "J-Core integrated AIC" if COMPILE_TEST
+ depends on OF
select IRQ_DOMAIN
help
Support for the J-Core integrated AIC.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b372e792adc2..e4dbfc85abdb 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
-obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
+obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index b844c89a9506..daa4ae89e466 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -52,7 +52,6 @@
#include <linux/bitops.h>
#include <linux/cpumask.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 0fea985ef1dc..353c54986211 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -12,7 +12,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 0ec92631e23c..64c2692070ef 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 1d4a5b46d9ae..bddf169c4b37 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/of.h>
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4304b7..2a7a38830a8d 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
nps_ack_gic();
}
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
{
unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
.name = "NPS400 IC",
.irq_mask = nps400_irq_mask,
.irq_unmask = nps400_irq_unmask,
- .irq_eoi = nps400_irq_eoi,
+ .irq_ack = nps400_irq_ack,
};
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
static int __init nps400_of_init(struct device_node *node,
struct device_node *parent)
{
- static struct irq_domain *nps400_root_domain;
+ struct irq_domain *nps400_root_domain;
if (parent) {
pr_err("DeviceTree incore ic not a root irq controller\n");
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 35eb7ac5d21f..863e073c6f7f 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt
#include <linux/acpi.h>
+#include <linux/dma-iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
@@ -108,6 +109,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
+
+ iommu_dma_map_msi_msg(data->irq, msg);
}
static struct irq_chip gicv2m_irq_chip = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 35c851c14e49..c5dee300e8a3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -19,6 +19,7 @@
#include <linux/bitmap.h>
#include <linux/cpu.h>
#include <linux/delay.h>
+#include <linux/dma-iommu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/acpi_iort.h>
@@ -659,6 +660,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
msg->address_lo = addr & ((1UL << 32) - 1);
msg->address_hi = addr >> 32;
msg->data = its_get_event_id(d);
+
+ iommu_dma_map_msi_msg(d->irq, msg);
}
static struct irq_chip its_irq_chip = {
@@ -1020,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u64 typer = gic_read_typer(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
@@ -1195,7 +1198,7 @@ static void its_cpu_init_collection(void)
* We now have to bind each collection to its target
* redistributor.
*/
- if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
@@ -1205,7 +1208,7 @@ static void its_cpu_init_collection(void)
/*
* This ITS wants a linear CPU number.
*/
- target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
@@ -1688,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->numa_node = numa_node;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1760,7 +1763,7 @@ out_unmap:
static bool gic_rdists_supports_plpis(void)
{
- return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
}
int its_cpu_init(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8b929c..19d642eae096 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (count--) {
+ while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58e5b4e87056..d6c404b3584d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
*/
*base += 0xf000;
cpuif_res.start += 0xf000;
- pr_warn("GIC: Adjusting CPU interface base to %pa",
+ pr_warn("GIC: Adjusting CPU interface base to %pa\n",
&cpuif_res.start);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 6b304eb39bd2..1aec12c6d9ac 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -38,6 +38,7 @@ static void disable_8259A_irq(struct irq_data *d);
static void enable_8259A_irq(struct irq_data *d);
static void mask_and_ack_8259A(struct irq_data *d);
static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
@@ -51,6 +52,11 @@ static struct irq_chip i8259A_chip = {
* 8259A PIC functions to handle ISA devices:
*/
+void i8259_set_poll(int (*poll)(void))
+{
+ i8259_poll = poll;
+}
+
/*
* This contains the irq mask for both 8259A irq controllers,
*/
@@ -89,24 +95,6 @@ static void enable_8259A_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-int i8259A_irq_pending(unsigned int irq)
-{
- unsigned int mask;
- unsigned long flags;
- int ret;
-
- irq -= I8259A_IRQ_BASE;
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- if (irq < 8)
- ret = inb(PIC_MASTER_CMD) & mask;
- else
- ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- return ret;
-}
-
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
@@ -355,7 +343,7 @@ void __init init_i8259_irqs(void)
static void i8259_irq_dispatch(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- int hwirq = i8259_irq();
+ int hwirq = i8259_poll();
unsigned int irq;
if (hwirq < 0)
@@ -370,13 +358,15 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent)
struct irq_domain *domain;
unsigned int parent_irq;
+ domain = __init_i8259_irqs(node);
+
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
pr_err("Failed to map i8259 parent IRQ\n");
+ irq_domain_remove(domain);
return -ENODEV;
}
- domain = __init_i8259_irqs(node);
irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
domain);
return 0;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01dec277d..033bccb41455 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
static struct irq_chip jcore_aic;
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+ if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+ handle_percpu_irq(desc);
+ else
+ handle_simple_irq(desc);
+}
+
static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct irq_chip *aic = d->host_data;
- irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+ irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 8c38b3d92e1c..0cdd923d1535 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -436,7 +436,6 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
/**
* meta_intc_irq_demux() - external irq de-multiplexer
- * @irq: the virtual interrupt number
* @desc: the interrupt description structure for this irq
*
* The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index f811a7de5857..74192f62dd67 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -498,7 +498,6 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
* vic_init_cascaded() - initialise a cascaded vectored interrupt controller
* @base: iomem base address
* @parent_irq: the parent IRQ we're cascaded off
- * @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 61c68a1f054a..2f5d5f4a4c75 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK
+ depends on BLOCK && HAS_DMA
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22cf1a5..1f6b6521016a 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
# Makefile for Open-Channel SSDs.
#
-obj-$(CONFIG_NVM) := core.o sysblk.o
+obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index c784ddcd4405..1cac0f8bc0dc 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -27,6 +27,8 @@
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
+#include "lightnvm.h"
+
static LIST_HEAD(nvm_tgt_types);
static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
@@ -581,6 +583,8 @@ static int nvm_core_init(struct nvm_dev *dev)
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
+ blk_queue_logical_block_size(dev->q, dev->sec_size);
+
return 0;
err_fmtype:
kfree(dev->lun_map);
@@ -596,15 +600,19 @@ static void nvm_free_mgr(struct nvm_dev *dev)
dev->mt = NULL;
}
-static void nvm_free(struct nvm_dev *dev)
+void nvm_free(struct nvm_dev *dev)
{
if (!dev)
return;
nvm_free_mgr(dev);
+ if (dev->dma_pool)
+ dev->ops->destroy_dma_pool(dev->dma_pool);
+
kfree(dev->lptbl);
kfree(dev->lun_map);
+ kfree(dev);
}
static int nvm_init(struct nvm_dev *dev)
@@ -651,30 +659,19 @@ err:
static void nvm_exit(struct nvm_dev *dev)
{
- if (dev->dma_pool)
- dev->ops->destroy_dma_pool(dev->dma_pool);
- nvm_free(dev);
+ nvm_sysfs_unregister_dev(dev);
+}
- pr_info("nvm: successfully unloaded\n");
+struct nvm_dev *nvm_alloc_dev(int node)
+{
+ return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
}
+EXPORT_SYMBOL(nvm_alloc_dev);
-int nvm_register(struct request_queue *q, char *disk_name,
- struct nvm_dev_ops *ops)
+int nvm_register(struct nvm_dev *dev)
{
- struct nvm_dev *dev;
int ret;
- if (!ops->identity)
- return -EINVAL;
-
- dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->q = q;
- dev->ops = ops;
- strncpy(dev->name, disk_name, DISK_NAME_LEN);
-
ret = nvm_init(dev);
if (ret)
goto err_init;
@@ -694,6 +691,10 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
}
+ ret = nvm_sysfs_register_dev(dev);
+ if (ret)
+ goto err_ppalist;
+
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
ret = nvm_get_sysblock(dev, &dev->sb);
if (!ret)
@@ -710,31 +711,21 @@ int nvm_register(struct request_queue *q, char *disk_name,
up_write(&nvm_lock);
return 0;
+err_ppalist:
+ dev->ops->destroy_dma_pool(dev->dma_pool);
err_init:
kfree(dev->lun_map);
- kfree(dev);
return ret;
}
EXPORT_SYMBOL(nvm_register);
-void nvm_unregister(char *disk_name)
+void nvm_unregister(struct nvm_dev *dev)
{
- struct nvm_dev *dev;
-
down_write(&nvm_lock);
- dev = nvm_find_nvm_dev(disk_name);
- if (!dev) {
- pr_err("nvm: could not find device %s to unregister\n",
- disk_name);
- up_write(&nvm_lock);
- return;
- }
-
list_del(&dev->devices);
up_write(&nvm_lock);
nvm_exit(dev);
- kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
new file mode 100644
index 000000000000..305c181509a6
--- /dev/null
+++ b/drivers/lightnvm/lightnvm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 CNEX Labs. All rights reserved.
+ * Initial release: Matias Bjorling <matias@cnexlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#ifndef LIGHTNVM_H
+#define LIGHTNVM_H
+
+#include <linux/lightnvm.h>
+
+/* core -> sysfs.c */
+int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
+void nvm_sysfs_unregister_dev(struct nvm_dev *);
+int nvm_sysfs_register(void);
+void nvm_sysfs_unregister(void);
+
+/* sysfs > core */
+void nvm_free(struct nvm_dev *);
+
+#endif
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
new file mode 100644
index 000000000000..0338c27ab95a
--- /dev/null
+++ b/drivers/lightnvm/sysfs.c
@@ -0,0 +1,198 @@
+#include <linux/kernel.h>
+#include <linux/lightnvm.h>
+#include <linux/miscdevice.h>
+#include <linux/kobject.h>
+#include <linux/blk-mq.h>
+
+#include "lightnvm.h"
+
+static ssize_t nvm_dev_attr_show(struct device *dev,
+ struct device_attribute *dattr, char *page)
+{
+ struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
+ struct nvm_id *id = &ndev->identity;
+ struct nvm_id_group *grp = &id->groups[0];
+ struct attribute *attr = &dattr->attr;
+
+ if (strcmp(attr->name, "version") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+ } else if (strcmp(attr->name, "vendor_opcode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+ } else if (strcmp(attr->name, "capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+ } else if (strcmp(attr->name, "device_mode") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+ } else if (strcmp(attr->name, "media_manager") == 0) {
+ if (!ndev->mt)
+ return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+ return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+ } else if (strcmp(attr->name, "ppa_format") == 0) {
+ return scnprintf(page, PAGE_SIZE,
+ "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ id->ppaf.ch_offset, id->ppaf.ch_len,
+ id->ppaf.lun_offset, id->ppaf.lun_len,
+ id->ppaf.pln_offset, id->ppaf.pln_len,
+ id->ppaf.blk_offset, id->ppaf.blk_len,
+ id->ppaf.pg_offset, id->ppaf.pg_len,
+ id->ppaf.sect_offset, id->ppaf.sect_len);
+ } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+ } else if (strcmp(attr->name, "flash_media_type") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+ } else if (strcmp(attr->name, "num_channels") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+ } else if (strcmp(attr->name, "num_luns") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+ } else if (strcmp(attr->name, "num_planes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+ } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ } else if (strcmp(attr->name, "num_pages") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+ } else if (strcmp(attr->name, "page_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+ } else if (strcmp(attr->name, "hw_sector_size") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+ } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+ } else if (strcmp(attr->name, "read_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+ } else if (strcmp(attr->name, "read_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+ } else if (strcmp(attr->name, "prog_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+ } else if (strcmp(attr->name, "prog_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+ } else if (strcmp(attr->name, "erase_typ") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+ } else if (strcmp(attr->name, "erase_max") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+ } else if (strcmp(attr->name, "multiplane_modes") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+ } else if (strcmp(attr->name, "media_capabilities") == 0) {
+ return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+ } else if (strcmp(attr->name, "max_phys_secs") == 0) {
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ ndev->ops->max_phys_sect);
+ } else {
+ return scnprintf(page,
+ PAGE_SIZE,
+ "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+ attr->name);
+ }
+}
+
+#define NVM_DEV_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+
+static NVM_DEV_ATTR_RO(version);
+static NVM_DEV_ATTR_RO(vendor_opcode);
+static NVM_DEV_ATTR_RO(capabilities);
+static NVM_DEV_ATTR_RO(device_mode);
+static NVM_DEV_ATTR_RO(ppa_format);
+static NVM_DEV_ATTR_RO(media_manager);
+
+static NVM_DEV_ATTR_RO(media_type);
+static NVM_DEV_ATTR_RO(flash_media_type);
+static NVM_DEV_ATTR_RO(num_channels);
+static NVM_DEV_ATTR_RO(num_luns);
+static NVM_DEV_ATTR_RO(num_planes);
+static NVM_DEV_ATTR_RO(num_blocks);
+static NVM_DEV_ATTR_RO(num_pages);
+static NVM_DEV_ATTR_RO(page_size);
+static NVM_DEV_ATTR_RO(hw_sector_size);
+static NVM_DEV_ATTR_RO(oob_sector_size);
+static NVM_DEV_ATTR_RO(read_typ);
+static NVM_DEV_ATTR_RO(read_max);
+static NVM_DEV_ATTR_RO(prog_typ);
+static NVM_DEV_ATTR_RO(prog_max);
+static NVM_DEV_ATTR_RO(erase_typ);
+static NVM_DEV_ATTR_RO(erase_max);
+static NVM_DEV_ATTR_RO(multiplane_modes);
+static NVM_DEV_ATTR_RO(media_capabilities);
+static NVM_DEV_ATTR_RO(max_phys_secs);
+
+#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
+
+static struct attribute *nvm_dev_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_vendor_opcode.attr,
+ &dev_attr_capabilities.attr,
+ &dev_attr_device_mode.attr,
+ &dev_attr_media_manager.attr,
+
+ &dev_attr_ppa_format.attr,
+ &dev_attr_media_type.attr,
+ &dev_attr_flash_media_type.attr,
+ &dev_attr_num_channels.attr,
+ &dev_attr_num_luns.attr,
+ &dev_attr_num_planes.attr,
+ &dev_attr_num_blocks.attr,
+ &dev_attr_num_pages.attr,
+ &dev_attr_page_size.attr,
+ &dev_attr_hw_sector_size.attr,
+ &dev_attr_oob_sector_size.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ &dev_attr_prog_typ.attr,
+ &dev_attr_prog_max.attr,
+ &dev_attr_erase_typ.attr,
+ &dev_attr_erase_max.attr,
+ &dev_attr_multiplane_modes.attr,
+ &dev_attr_media_capabilities.attr,
+ &dev_attr_max_phys_secs.attr,
+ NULL,
+};
+
+static struct attribute_group nvm_dev_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+};
+
+static const struct attribute_group *nvm_dev_attr_groups[] = {
+ &nvm_dev_attr_group,
+ NULL,
+};
+
+static void nvm_dev_release(struct device *device)
+{
+ struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
+ struct request_queue *q = dev->q;
+
+ pr_debug("nvm/sysfs: `nvm_dev_release`\n");
+
+ blk_mq_unregister_dev(device, q);
+
+ nvm_free(dev);
+}
+
+static struct device_type nvm_type = {
+ .name = "lightnvm",
+ .groups = nvm_dev_attr_groups,
+ .release = nvm_dev_release,
+};
+
+int nvm_sysfs_register_dev(struct nvm_dev *dev)
+{
+ int ret;
+
+ if (!dev->parent_dev)
+ return 0;
+
+ dev->dev.parent = dev->parent_dev;
+ dev_set_name(&dev->dev, "%s", dev->name);
+ dev->dev.type = &nvm_type;
+ device_initialize(&dev->dev);
+ ret = device_add(&dev->dev);
+
+ if (!ret)
+ blk_mq_register_dev(&dev->dev, dev->q);
+
+ return ret;
+}
+
+void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
+{
+ if (dev && dev->parent_dev)
+ kobject_put(&dev->dev.kobj);
+}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6819f0fc608..3f041b187033 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -236,7 +236,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
unsigned int irq;
irq = irq_create_mapping(NULL, line);
- if (irq != NO_IRQ) {
+ if (!irq) {
dev->interrupt[index].start = irq;
dev->interrupt[index].flags = IORESOURCE_IRQ;
dev->interrupt[index].name = dev_name(&dev->ofdev.dev);
@@ -299,7 +299,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
break;
res = &dev->interrupt[j];
irq = irq_of_parse_and_map(np, i++);
- if (irq == NO_IRQ)
+ if (!irq)
break;
res->start = irq;
res->flags = IORESOURCE_IRQ;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 465c52219639..775527135b93 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -427,7 +427,7 @@ static int rackmeter_probe(struct macio_dev* mdev,
rm->irq = macio_irq(mdev, 1);
#else
rm->irq = irq_of_parse_and_map(i2s, 1);
- if (rm->irq == NO_IRQ ||
+ if (!rm->irq ||
of_address_to_resource(i2s, 0, &ri2s) ||
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index d6f72c826c1c..08edb2c25b60 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -279,7 +279,7 @@ int smu_queue_cmd(struct smu_cmd *cmd)
spin_unlock_irqrestore(&smu->lock, flags);
/* Workaround for early calls when irq isn't available */
- if (!smu_irq_inited || smu->db_irq == NO_IRQ)
+ if (!smu_irq_inited || !smu->db_irq)
smu_spinwait_cmd(cmd);
return 0;
@@ -498,8 +498,8 @@ int __init smu_init (void)
INIT_LIST_HEAD(&smu->cmd_list);
INIT_LIST_HEAD(&smu->cmd_i2c_list);
smu->of_node = np;
- smu->db_irq = NO_IRQ;
- smu->msg_irq = NO_IRQ;
+ smu->db_irq = 0;
+ smu->msg_irq = 0;
/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
* 32 bits value safely
@@ -587,13 +587,13 @@ static int smu_late_init(void)
if (smu->db_node) {
smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
- if (smu->db_irq == NO_IRQ)
+ if (!smu->db_irq)
printk(KERN_ERR "smu: failed to map irq for node %s\n",
smu->db_node->full_name);
}
if (smu->msg_node) {
smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
- if (smu->msg_irq == NO_IRQ)
+ if (!smu->msg_irq)
printk(KERN_ERR "smu: failed to map irq for node %s\n",
smu->msg_node->full_name);
}
@@ -602,23 +602,23 @@ static int smu_late_init(void)
* Try to request the interrupts
*/
- if (smu->db_irq != NO_IRQ) {
+ if (smu->db_irq) {
if (request_irq(smu->db_irq, smu_db_intr,
IRQF_SHARED, "SMU doorbell", smu) < 0) {
printk(KERN_WARNING "SMU: can't "
"request interrupt %d\n",
smu->db_irq);
- smu->db_irq = NO_IRQ;
+ smu->db_irq = 0;
}
}
- if (smu->msg_irq != NO_IRQ) {
+ if (smu->msg_irq) {
if (request_irq(smu->msg_irq, smu_msg_intr,
IRQF_SHARED, "SMU message", smu) < 0) {
printk(KERN_WARNING "SMU: can't "
"request interrupt %d\n",
smu->msg_irq);
- smu->msg_irq = NO_IRQ;
+ smu->msg_irq = 0;
}
}
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index bad18130f125..2088e23a8002 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -209,7 +209,7 @@ static int __init via_cuda_start(void)
cuda_irq = IRQ_MAC_ADB;
#else
cuda_irq = irq_of_parse_and_map(vias, 0);
- if (cuda_irq == NO_IRQ) {
+ if (!cuda_irq) {
printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
vias->full_name);
return -ENODEV;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index f8b6d1403c16..91081dcdc272 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -145,7 +145,7 @@ static int pmu_fully_inited;
static int pmu_has_adb;
static struct device_node *gpio_node;
static unsigned char __iomem *gpio_reg;
-static int gpio_irq = NO_IRQ;
+static int gpio_irq = 0;
static int gpio_irq_enabled = -1;
static volatile int pmu_suspended;
static spinlock_t pmu_lock;
@@ -402,7 +402,7 @@ static int __init via_pmu_start(void)
batt_req.complete = 1;
irq = irq_of_parse_and_map(vias, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "via-pmu: can't map interrupt\n");
return -ENODEV;
}
@@ -424,7 +424,7 @@ static int __init via_pmu_start(void)
if (gpio_node)
gpio_irq = irq_of_parse_and_map(gpio_node, 0);
- if (gpio_irq != NO_IRQ) {
+ if (gpio_irq) {
if (request_irq(gpio_irq, gpio1_interrupt,
IRQF_NO_SUSPEND, "GPIO1 ADB",
(void *)0))
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 08c87fadca8c..1f32688c312d 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -65,6 +65,7 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <acpi/pcc.h>
#include "mailbox.h"
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method |= TXDONE_BY_ACK;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
if (pcc_doorbell_irq[subspace_id] > 0) {
int rc;
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
pcc_doorbell_irq[subspace_id]);
+ pcc_mbox_free_channel(chan);
chan = ERR_PTR(rc);
}
}
- spin_unlock_irqrestore(&chan->lock, flags);
-
return chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
return;
}
+ if (pcc_doorbell_irq[id] > 0)
+ devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
chan->txdone_method = TXDONE_BY_POLL;
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
-
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
-
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 76f7534d1dd1..81d3db40cd7b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -361,12 +361,8 @@ static void __btree_node_write_done(struct closure *cl)
static void btree_node_write_done(struct closure *cl)
{
struct btree *b = container_of(cl, struct btree, io);
- struct bio_vec *bv;
- int n;
-
- bio_for_each_segment_all(bv, b->bio, n)
- __free_page(bv->bv_page);
+ bio_free_pages(b->bio);
__btree_node_write_done(cl);
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c28df164701e..333a1e5f6ae6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -107,9 +107,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct bio *check;
- struct bio_vec bv, *bv2;
+ struct bio_vec bv;
struct bvec_iter iter;
- int i;
check = bio_clone(bio, GFP_NOIO);
if (!check)
@@ -136,8 +135,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
kunmap_atomic(p1);
}
- bio_for_each_segment_all(bv2, check, i)
- __free_page(bv2->bv_page);
+ bio_free_pages(check);
out_put:
bio_put(check);
}
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 1881319f2298..5c4bddecfaf0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -44,11 +44,8 @@ static void write_moving_finish(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, bio, i)
- __free_page(bv->bv_page);
+ bio_free_pages(bio);
if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4b177fe11ebb..40ffe5e424b3 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -694,13 +694,8 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d);
- if (s->iop.bio) {
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, s->iop.bio, i)
- __free_page(bv->bv_page);
- }
+ if (s->iop.bio)
+ bio_free_pages(s->iop.bio);
cached_dev_bio_complete(cl);
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index d9fd2a62e5f6..e51644e503a5 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -128,11 +128,8 @@ static void write_dirty_finish(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, &io->bio, i)
- __free_page(bv->bv_page);
+ bio_free_pages(&io->bio);
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 8625040bae92..125aedc3875f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -191,19 +191,6 @@ static void dm_bufio_unlock(struct dm_bufio_client *c)
mutex_unlock(&c->lock);
}
-/*
- * FIXME Move to sched.h?
- */
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-# define dm_bufio_cond_resched() \
-do { \
- if (unlikely(need_resched())) \
- _cond_resched(); \
-} while (0)
-#else
-# define dm_bufio_cond_resched() do { } while (0)
-#endif
-
/*----------------------------------------------------------------*/
/*
@@ -741,7 +728,7 @@ static void __flush_write_list(struct list_head *write_list)
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
submit_io(b, WRITE, b->block, write_endio);
- dm_bufio_cond_resched();
+ cond_resched();
}
blk_finish_plug(&plug);
}
@@ -780,7 +767,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
__unlink_buffer(b);
return b;
}
- dm_bufio_cond_resched();
+ cond_resched();
}
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
@@ -791,7 +778,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
__unlink_buffer(b);
return b;
}
- dm_bufio_cond_resched();
+ cond_resched();
}
return NULL;
@@ -923,7 +910,7 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
return;
__write_dirty_buffer(b, write_list);
- dm_bufio_cond_resched();
+ cond_resched();
}
}
@@ -973,7 +960,7 @@ static void __check_watermark(struct dm_bufio_client *c,
return;
__free_buffer_wake(b);
- dm_bufio_cond_resched();
+ cond_resched();
}
if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
@@ -1170,7 +1157,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
submit_io(b, READ, b->block, read_endio);
dm_bufio_release(b);
- dm_bufio_cond_resched();
+ cond_resched();
if (!n_blocks)
goto flush_plug;
@@ -1291,7 +1278,7 @@ again:
!test_bit(B_WRITING, &b->state))
__relink_lru(b, LIST_CLEAN);
- dm_bufio_cond_resched();
+ cond_resched();
/*
* If we dropped the lock, the list is no longer consistent,
@@ -1574,7 +1561,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
freed++;
if (!--nr_to_scan || ((count - freed) <= retain_target))
return freed;
- dm_bufio_cond_resched();
+ cond_resched();
}
}
return freed;
@@ -1808,7 +1795,7 @@ static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
if (__try_evict_buffer(b, 0))
count--;
- dm_bufio_cond_resched();
+ cond_resched();
}
dm_bufio_unlock(c);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 3970cda10080..695577812cf6 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -140,6 +140,13 @@ struct dm_cache_metadata {
* the device.
*/
bool fail_io:1;
+
+ /*
+ * These structures are used when loading metadata. They're too
+ * big to put on the stack.
+ */
+ struct dm_array_cursor mapping_cursor;
+ struct dm_array_cursor hint_cursor;
};
/*-------------------------------------------------------------------
@@ -1171,31 +1178,37 @@ static bool hints_array_available(struct dm_cache_metadata *cmd,
hints_array_initialized(cmd);
}
-static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+static int __load_mapping(struct dm_cache_metadata *cmd,
+ uint64_t cb, bool hints_valid,
+ struct dm_array_cursor *mapping_cursor,
+ struct dm_array_cursor *hint_cursor,
+ load_mapping_fn fn, void *context)
{
int r = 0;
- bool dirty;
- __le64 value;
- __le32 hint_value = 0;
+
+ __le64 mapping;
+ __le32 hint = 0;
+
+ __le64 *mapping_value_le;
+ __le32 *hint_value_le;
+
dm_oblock_t oblock;
unsigned flags;
- struct thunk *thunk = context;
- struct dm_cache_metadata *cmd = thunk->cmd;
- memcpy(&value, leaf, sizeof(value));
- unpack_value(value, &oblock, &flags);
+ dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+ memcpy(&mapping, mapping_value_le, sizeof(mapping));
+ unpack_value(mapping, &oblock, &flags);
if (flags & M_VALID) {
- if (thunk->hints_valid) {
- r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
- cblock, &hint_value);
- if (r && r != -ENODATA)
- return r;
+ if (hints_valid) {
+ dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
+ memcpy(&hint, hint_value_le, sizeof(hint));
}
- dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
- r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
- dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
+ le32_to_cpu(hint), hints_valid);
+ if (r)
+ DMERR("policy couldn't load cblock");
}
return r;
@@ -1205,16 +1218,60 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy,
load_mapping_fn fn, void *context)
{
- struct thunk thunk;
+ int r;
+ uint64_t cb;
+
+ bool hints_valid = hints_array_available(cmd, policy);
+
+ if (from_cblock(cmd->cache_blocks) == 0)
+ /* Nothing to do */
+ return 0;
+
+ r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
+ if (r)
+ return r;
- thunk.fn = fn;
- thunk.context = context;
+ if (hints_valid) {
+ r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
+ if (r) {
+ dm_array_cursor_end(&cmd->mapping_cursor);
+ return r;
+ }
+ }
+
+ for (cb = 0; ; cb++) {
+ r = __load_mapping(cmd, cb, hints_valid,
+ &cmd->mapping_cursor, &cmd->hint_cursor,
+ fn, context);
+ if (r)
+ goto out;
+
+ /*
+ * We need to break out before we move the cursors.
+ */
+ if (cb >= (from_cblock(cmd->cache_blocks) - 1))
+ break;
- thunk.cmd = cmd;
- thunk.respect_dirty_flags = cmd->clean_when_opened;
- thunk.hints_valid = hints_array_available(cmd, policy);
+ r = dm_array_cursor_next(&cmd->mapping_cursor);
+ if (r) {
+ DMERR("dm_array_cursor_next for mapping failed");
+ goto out;
+ }
- return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+ if (hints_valid) {
+ r = dm_array_cursor_next(&cmd->hint_cursor);
+ if (r) {
+ DMERR("dm_array_cursor_next for hint failed");
+ goto out;
+ }
+ }
+ }
+out:
+ dm_array_cursor_end(&cmd->mapping_cursor);
+ if (hints_valid)
+ dm_array_cursor_end(&cmd->hint_cursor);
+
+ return r;
}
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
@@ -1368,10 +1425,24 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
/*----------------------------------------------------------------*/
-static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+static int get_hint(uint32_t index, void *value_le, void *context)
+{
+ uint32_t value;
+ struct dm_cache_policy *policy = context;
+
+ value = policy_get_hint(policy, to_cblock(index));
+ *((__le32 *) value_le) = cpu_to_le32(value);
+
+ return 0;
+}
+
+/*
+ * It's quicker to always delete the hint array, and recreate with
+ * dm_array_new().
+ */
+static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
{
int r;
- __le32 value;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
const unsigned *policy_version = dm_cache_policy_get_version(policy);
@@ -1380,63 +1451,23 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
return -EINVAL;
- if (!policy_unchanged(cmd, policy)) {
- strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
- memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
-
- hint_size = dm_cache_policy_get_hint_size(policy);
- if (!hint_size)
- return 0; /* short-circuit hints initialization */
- cmd->policy_hint_size = hint_size;
+ strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+ memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
- if (cmd->hint_root) {
- r = dm_array_del(&cmd->hint_info, cmd->hint_root);
- if (r)
- return r;
- }
+ hint_size = dm_cache_policy_get_hint_size(policy);
+ if (!hint_size)
+ return 0; /* short-circuit hints initialization */
+ cmd->policy_hint_size = hint_size;
- r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+ if (cmd->hint_root) {
+ r = dm_array_del(&cmd->hint_info, cmd->hint_root);
if (r)
return r;
-
- value = cpu_to_le32(0);
- __dm_bless_for_disk(&value);
- r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
- from_cblock(cmd->cache_blocks),
- &value, &cmd->hint_root);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint)
-{
- struct dm_cache_metadata *cmd = context;
- __le32 value = cpu_to_le32(hint);
- int r;
-
- __dm_bless_for_disk(&value);
-
- r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
- from_cblock(cblock), &value, &cmd->hint_root);
- cmd->changed = true;
-
- return r;
-}
-
-static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
-{
- int r;
-
- r = begin_hints(cmd, policy);
- if (r) {
- DMERR("begin_hints failed");
- return r;
}
- return policy_walk_mappings(policy, save_hint, cmd);
+ return dm_array_new(&cmd->hint_info, &cmd->hint_root,
+ from_cblock(cmd->cache_blocks),
+ get_hint, policy);
}
int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 14aaaf059f06..2e8a8f1d8358 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -395,7 +395,7 @@ static void init_policy_functions(struct policy *p)
p->policy.set_dirty = wb_set_dirty;
p->policy.clear_dirty = wb_clear_dirty;
p->policy.load_mapping = wb_load_mapping;
- p->policy.walk_mappings = NULL;
+ p->policy.get_hint = NULL;
p->policy.remove_mapping = wb_remove_mapping;
p->policy.writeback_work = wb_writeback_work;
p->policy.force_mapping = wb_force_mapping;
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 2816018faa7f..808ee0e2b2c4 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -48,10 +48,10 @@ static inline int policy_load_mapping(struct dm_cache_policy *p,
return p->load_mapping(p, oblock, cblock, hint, hint_valid);
}
-static inline int policy_walk_mappings(struct dm_cache_policy *p,
- policy_walk_fn fn, void *context)
+static inline uint32_t policy_get_hint(struct dm_cache_policy *p,
+ dm_cblock_t cblock)
{
- return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+ return p->get_hint ? p->get_hint(p, cblock) : 0;
}
static inline int policy_writeback_work(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index cf48a617a3a4..c33f4a6e1d7d 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1359,6 +1359,11 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
spin_unlock_irqrestore(&mq->lock, flags);
}
+static unsigned random_level(dm_cblock_t cblock)
+{
+ return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+}
+
static int smq_load_mapping(struct dm_cache_policy *p,
dm_oblock_t oblock, dm_cblock_t cblock,
uint32_t hint, bool hint_valid)
@@ -1369,47 +1374,21 @@ static int smq_load_mapping(struct dm_cache_policy *p,
e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
e->oblock = oblock;
e->dirty = false; /* this gets corrected in a minute */
- e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : 1;
+ e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
push(mq, e);
return 0;
}
-static int smq_save_hints(struct smq_policy *mq, struct queue *q,
- policy_walk_fn fn, void *context)
-{
- int r;
- unsigned level;
- struct entry *e;
-
- for (level = 0; level < q->nr_levels; level++)
- for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
- if (!e->sentinel) {
- r = fn(context, infer_cblock(mq, e),
- e->oblock, e->level);
- if (r)
- return r;
- }
- }
-
- return 0;
-}
-
-static int smq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context)
+static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
{
struct smq_policy *mq = to_smq_policy(p);
- int r = 0;
+ struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
- /*
- * We don't need to lock here since this method is only called once
- * the IO has stopped.
- */
- r = smq_save_hints(mq, &mq->clean, fn, context);
- if (!r)
- r = smq_save_hints(mq, &mq->dirty, fn, context);
+ if (!e->allocated)
+ return 0;
- return r;
+ return e->level;
}
static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock)
@@ -1616,7 +1595,7 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
mq->policy.set_dirty = smq_set_dirty;
mq->policy.clear_dirty = smq_clear_dirty;
mq->policy.load_mapping = smq_load_mapping;
- mq->policy.walk_mappings = smq_walk_mappings;
+ mq->policy.get_hint = smq_get_hint;
mq->policy.remove_mapping = smq_remove_mapping;
mq->policy.remove_cblock = smq_remove_cblock;
mq->policy.writeback_work = smq_writeback_work;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 05db56eedb6a..aa10b1493f34 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -90,9 +90,6 @@ struct policy_result {
dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
};
-typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
- dm_oblock_t oblock, uint32_t hint);
-
/*
* The cache policy object. Just a bunch of methods. It is envisaged that
* this structure will be embedded in a bigger, policy specific structure
@@ -158,8 +155,11 @@ struct dm_cache_policy {
int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
dm_cblock_t cblock, uint32_t hint, bool hint_valid);
- int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context);
+ /*
+ * Gets the hint for a given cblock. Called in a single threaded
+ * context. So no locking required.
+ */
+ uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
* Override functions used on the error paths of the core target.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 874295757caa..a2768835d394 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -113,8 +113,7 @@ struct iv_tcw_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
- DM_CRYPT_EXIT_THREAD};
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
/*
* The fields in here must be read only after initialization.
@@ -1136,7 +1135,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
+ bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1207,18 +1206,20 @@ continue_locked:
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
- if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
- spin_unlock_irq(&cc->write_thread_wait.lock);
- break;
- }
-
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
__add_wait_queue(&cc->write_thread_wait, &wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (unlikely(kthread_should_stop())) {
+ set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&cc->write_thread_wait, &wait);
+ break;
+ }
+
schedule();
+ set_task_state(current, TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
@@ -1533,13 +1534,8 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
- if (cc->write_thread) {
- spin_lock_irq(&cc->write_thread_wait.lock);
- set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
- wake_up_locked(&cc->write_thread_wait);
- spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (cc->write_thread)
kthread_stop(cc->write_thread);
- }
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 49e4d8d4558f..4dfe38655a49 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -149,8 +149,6 @@ static void put_io_block(struct log_writes_c *lc)
static void log_end_io(struct bio *bio)
{
struct log_writes_c *lc = bio->bi_private;
- struct bio_vec *bvec;
- int i;
if (bio->bi_error) {
unsigned long flags;
@@ -161,9 +159,7 @@ static void log_end_io(struct bio *bio)
spin_unlock_irqrestore(&lc->blocks_lock, flags);
}
- bio_for_each_segment_all(bvec, bio, i)
- __free_page(bvec->bv_page);
-
+ bio_free_pages(bio);
put_io_block(lc);
bio_put(bio);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ac734e5bbe48..e477af8596e2 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -550,9 +550,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!must_push_back_rq(m))
- r = -EIO; /* Failed */
- return r;
+ if (must_push_back_rq(m))
+ return DM_MAPIO_DELAY_REQUEUE;
+ return -EIO; /* Failed */
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
pg_init_all_paths(m);
@@ -680,9 +680,11 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
return __multipath_map_bio(m, bio, mpio);
}
-static void process_queued_bios_list(struct multipath *m)
+static void process_queued_io_list(struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
+ else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -752,7 +754,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
if (!queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
return 0;
@@ -1193,21 +1195,17 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&m->pg_init_wait, &wait);
+ DEFINE_WAIT(wait);
while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&m->pg_init_in_progress))
break;
io_schedule();
}
- set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&m->pg_init_wait, &wait);
+ finish_wait(&m->pg_init_wait, &wait);
}
static void flush_multipath_work(struct multipath *m)
@@ -1308,7 +1306,7 @@ out:
spin_unlock_irqrestore(&m->lock, flags);
if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
return r;
@@ -1506,7 +1504,7 @@ static void pg_init_done(void *data, int errors)
}
clear_bit(MPATHF_QUEUE_IO, &m->flags);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
/*
* Wake up any thread waiting to suspend.
@@ -1521,10 +1519,10 @@ static void activate_path(struct work_struct *work)
{
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
- if (pgpath->is_active)
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->is_active && !blk_queue_dying(q))
+ scsi_dh_activate(q, pg_init_done, pgpath);
else
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}
@@ -1532,6 +1530,14 @@ static void activate_path(struct work_struct *work)
static int noretry_error(int error)
{
switch (error) {
+ case -EBADE:
+ /*
+ * EBADE signals an reservation conflict.
+ * We shouldn't fail the path here as we can communicate with
+ * the target. We should failover to the next path, but in
+ * doing so we might be causing a ping-pong between paths.
+ * So just return the reservation conflict error.
+ */
case -EOPNOTSUPP:
case -EREMOTEIO:
case -EILSEQ:
@@ -1576,9 +1582,6 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (!must_push_back_rq(m))
r = -EIO;
- } else {
- if (error == -EBADE)
- r = error;
}
}
@@ -1627,9 +1630,6 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
if (!must_push_back_bio(m))
return -EIO;
return DM_ENDIO_REQUEUE;
- } else {
- if (error == -EBADE)
- return error;
}
}
@@ -1941,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
- process_queued_bios_list(m);
+ process_queued_io_list(m);
}
/*
@@ -1994,11 +1994,14 @@ static int multipath_busy(struct dm_target *ti)
struct priority_group *pg, *next_pg;
struct pgpath *pgpath;
- /* pg_init in progress or no paths available */
- if (atomic_read(&m->pg_init_in_progress) ||
- (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+ /* pg_init in progress */
+ if (atomic_read(&m->pg_init_in_progress))
return true;
+ /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
+ if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+
/* Guess which priority_group will be used at next mapping time */
pg = lockless_dereference(m->current_pg);
next_pg = lockless_dereference(m->next_pg);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8cedc..6d53810963f7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
+ if (le32_to_cpu(sb->level) != mddev->new_level) {
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
return -EINVAL;
}
- if (le32_to_cpu(sb->layout) != mddev->layout) {
+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
return -EINVAL;
}
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
/* Table line is checked vs. authoritative superblock */
rs_set_new(rs);
}
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ if (le32_to_cpu(sb->compat_features) &&
+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
return -EINVAL;
}
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 0},
+ .version = {1, 9, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f67bc..9a8b71067c6e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
-
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
}
if (error == -EOPNOTSUPP)
- goto out;
+ return error;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
- goto out;
+ return error;
if (unlikely(error)) {
- if (!bio_record->details.bi_bdev) {
- /*
- * There wasn't enough memory to record necessary
- * information for a retry or there was no other
- * mirror in-sync.
- */
- DMERR_LIMIT("Mirror read failed.");
- return -EIO;
- }
-
m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio->bi_error = 0;
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O");
}
-out:
- bio_record->details.bi_bdev = NULL;
-
return error;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1ca7463e8bb2..1d0d2adc050a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void dm_mq_start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+}
+
void dm_start_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_start_queue(q);
- else {
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
- blk_mq_start_stopped_hw_queues(q, true);
- blk_mq_kick_requeue_list(q);
- }
+ else
+ dm_mq_start_queue(q);
}
static void dm_old_stop_queue(struct request_queue *q)
@@ -89,27 +98,35 @@ static void dm_old_stop_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_mq_stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
if (blk_queue_stopped(q)) {
spin_unlock_irqrestore(q->queue_lock, flags);
return;
}
- blk_stop_queue(q);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* Avoid that requeuing could restart the queue. */
+ blk_mq_cancel_requeue_work(q);
+ blk_mq_stop_hw_queues(q);
}
void dm_stop_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_stop_queue(q);
- else {
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_mq_cancel_requeue_work(q);
- blk_mq_stop_hw_queues(q);
- }
+ else
+ dm_mq_stop_queue(q);
}
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
@@ -319,21 +336,32 @@ static void dm_old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_mq_requeue_request(struct request *rq)
+static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
- struct request_queue *q = rq->q;
unsigned long flags;
- blk_mq_requeue_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
if (!blk_queue_stopped(q))
- blk_mq_kick_requeue_list(q);
+ blk_mq_delay_kick_requeue_list(q, msecs);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void dm_requeue_original_request(struct mapped_device *md,
- struct request *rq)
+void dm_mq_kick_requeue_list(struct mapped_device *md)
+{
+ __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
+}
+EXPORT_SYMBOL(dm_mq_kick_requeue_list);
+
+static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
+{
+ blk_mq_requeue_request(rq);
+ __dm_mq_kick_requeue_list(rq->q, msecs);
+}
+
+static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
{
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
int rw = rq_data_dir(rq);
rq_end_stats(md, rq);
@@ -342,7 +370,7 @@ static void dm_requeue_original_request(struct mapped_device *md,
if (!rq->q->mq_ops)
dm_old_requeue_request(rq);
else
- dm_mq_requeue_request(rq);
+ dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
rq_completed(md, rw, false);
}
@@ -372,7 +400,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
return;
else if (r == DM_ENDIO_REQUEUE)
/* The target wants to requeue the I/O */
- dm_requeue_original_request(tio->md, tio->orig);
+ dm_requeue_original_request(tio, false);
else {
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -553,7 +581,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
if (md->kworker_task)
- init_kthread_work(&tio->work, map_tio_request);
+ kthread_init_work(&tio->work, map_tio_request);
}
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
@@ -612,20 +640,23 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
/*
* Returns:
- * 0 : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * DM_MAPIO_* : the request has been processed as indicated
+ * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
* < 0 : the request was completed due to failure
*/
-static int map_request(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
+static int map_request(struct dm_rq_target_io *tio)
{
int r;
struct dm_target *ti = tio->ti;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
struct request *clone = NULL;
if (tio->clone) {
clone = tio->clone;
r = ti->type->map_rq(ti, clone, &tio->info);
+ if (r == DM_MAPIO_DELAY_REQUEUE)
+ return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
} else {
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
if (r < 0) {
@@ -633,9 +664,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
dm_kill_unmapped_request(rq, r);
return r;
}
- if (r != DM_MAPIO_REMAPPED)
- return r;
- if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ if (r == DM_MAPIO_REMAPPED &&
+ setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
ti->type->release_clone_rq(clone);
return DM_MAPIO_REQUEUE;
@@ -654,7 +684,10 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
- dm_requeue_original_request(md, tio->orig);
+ break;
+ case DM_MAPIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
break;
default:
if (r > 0) {
@@ -664,10 +697,9 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
/* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, r);
- return r;
}
- return 0;
+ return r;
}
static void dm_start_request(struct mapped_device *md, struct request *orig)
@@ -706,11 +738,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
static void map_tio_request(struct kthread_work *work)
{
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
- struct request *rq = tio->orig;
- struct mapped_device *md = tio->md;
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(md, rq);
+ if (map_request(tio) == DM_MAPIO_REQUEUE)
+ dm_requeue_original_request(tio, false);
}
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
@@ -801,7 +831,7 @@ static void dm_old_request_fn(struct request_queue *q)
tio = tio_from_request(rq);
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
+ kthread_queue_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
}
}
@@ -823,11 +853,14 @@ int dm_old_init_request_queue(struct mapped_device *md)
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
/* Initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
+ kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task))
- return PTR_ERR(md->kworker_task);
+ if (IS_ERR(md->kworker_task)) {
+ int error = PTR_ERR(md->kworker_task);
+ md->kworker_task = NULL;
+ return error;
+ }
elv_register_queue(md->queue);
@@ -896,7 +929,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
tio->ti = ti;
/* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ if (map_request(tio) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
@@ -908,7 +941,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
static struct blk_mq_ops dm_mq_ops = {
.queue_rq = dm_mq_queue_rq,
- .map_queue = blk_mq_map_queue,
.complete = dm_softirq_done,
.init_request = dm_mq_init_request,
};
@@ -955,7 +987,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
- blk_mq_register_disk(md->disk);
+ blk_mq_register_dev(disk_to_dev(md->disk), q);
return 0;
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 9e6f0a3773d4..4da06cae7bad 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -55,6 +55,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md);
void dm_start_queue(struct request_queue *q);
void dm_stop_queue(struct request_queue *q);
+void dm_mq_kick_requeue_list(struct mapped_device *md);
+
unsigned dm_get_reserved_rq_based_ios(void);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cde1f..c4b53b332607 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fa9b1cb4438a..ef7bf1dd6900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue)
blk_cleanup_queue(md->queue);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
@@ -1648,6 +1648,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
+ lockdep_assert_held(&md->suspend_lock);
+
size = dm_table_get_size(t);
/*
@@ -1873,6 +1875,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
+ struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -1883,8 +1886,12 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
+
if (dm_request_based(md) && md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
@@ -1934,30 +1941,25 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&md->wait, &wait);
+ DEFINE_WAIT(wait);
while (1) {
- set_current_state(interruptible);
+ prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight(md))
break;
- if (interruptible == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
- set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&md->wait, &wait);
+ finish_wait(&md->wait, &wait);
return r;
}
@@ -2075,6 +2077,10 @@ static void unlock_fs(struct mapped_device *md)
}
/*
+ * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
+ * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
+ * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
+ *
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
@@ -2082,13 +2088,15 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible,
+ unsigned suspend_flags, long task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
int r;
+ lockdep_assert_held(&md->suspend_lock);
+
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
@@ -2139,7 +2147,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (dm_request_based(md)) {
dm_stop_queue(md->queue);
if (md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
}
flush_workqueue(md->wq);
@@ -2149,7 +2157,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, interruptible);
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2249,10 +2257,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
int dm_resume(struct mapped_device *md)
{
- int r = -EINVAL;
+ int r;
struct dm_table *map = NULL;
retry:
+ r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
@@ -2276,8 +2285,6 @@ retry:
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
-
- r = 0;
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eac84d8ff724..2089d46b0eb8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3887,10 +3887,10 @@ array_state_show(struct mddev *mddev, char *page)
st = read_auto;
break;
case 0:
- if (mddev->in_sync)
- st = clean;
- else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
st = write_pending;
+ else if (mddev->in_sync)
+ st = clean;
else if (mddev->safemode)
st = active_idle;
else
@@ -8144,14 +8144,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 431a03067d64..e83047cbb2da 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -277,6 +277,48 @@ static int insert_ablock(struct dm_array_info *info, uint64_t index,
return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
}
+/*----------------------------------------------------------------*/
+
+static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
+ struct dm_block **block, struct array_block **ab)
+{
+ int inc;
+ int r = dm_tm_shadow_block(info->btree_info.tm, b,
+ &array_validator, block, &inc);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ if (inc)
+ inc_ablock_entries(info, *ab);
+
+ return 0;
+}
+
+/*
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+ struct dm_block *block, dm_block_t b,
+ dm_block_t *root)
+{
+ int r = 0;
+
+ if (dm_block_location(block) != b) {
+ /*
+ * dm_tm_shadow_block will have already decremented the old
+ * block, but it is still referenced by the btree. We
+ * increment to stop the insert decrementing it below zero
+ * when overwriting the old value.
+ */
+ dm_tm_inc(info->btree_info.tm, b);
+ r = insert_ablock(info, index, block, root);
+ }
+
+ return r;
+}
+
/*
* Looks up an array block in the btree. Then shadows it, and updates the
* btree to point to this new shadow. 'root' is an input/output parameter
@@ -286,49 +328,21 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
unsigned index, struct dm_block **block,
struct array_block **ab)
{
- int r, inc;
+ int r;
uint64_t key = index;
dm_block_t b;
__le64 block_le;
- /*
- * lookup
- */
r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
if (r)
return r;
b = le64_to_cpu(block_le);
- /*
- * shadow
- */
- r = dm_tm_shadow_block(info->btree_info.tm, b,
- &array_validator, block, &inc);
+ r = __shadow_ablock(info, b, block, ab);
if (r)
return r;
- *ab = dm_block_data(*block);
- if (inc)
- inc_ablock_entries(info, *ab);
-
- /*
- * Reinsert.
- *
- * The shadow op will often be a noop. Only insert if it really
- * copied data.
- */
- if (dm_block_location(*block) != b) {
- /*
- * dm_tm_shadow_block will have already decremented the old
- * block, but it is still referenced by the btree. We
- * increment to stop the insert decrementing it below zero
- * when overwriting the old value.
- */
- dm_tm_inc(info->btree_info.tm, b);
- r = insert_ablock(info, index, *block, root);
- }
-
- return r;
+ return __reinsert_ablock(info, index, *block, b, root);
}
/*
@@ -681,6 +695,72 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
}
EXPORT_SYMBOL_GPL(dm_array_resize);
+static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
+ value_fn fn, void *context, unsigned base, unsigned new_nr)
+{
+ int r;
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(le32_to_cpu(ab->nr_entries));
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = 0; i < new_nr; i++) {
+ r = fn(base + i, element_at(info, ab, i), context);
+ if (r)
+ return r;
+
+ if (vt->inc)
+ vt->inc(vt->context, element_at(info, ab, i));
+ }
+
+ ab->nr_entries = cpu_to_le32(new_nr);
+ return 0;
+}
+
+int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ uint32_t size, value_fn fn, void *context)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ unsigned block_index, end_block, size_of_block, max_entries;
+
+ r = dm_array_empty(info, root);
+ if (r)
+ return r;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+ end_block = dm_div_up(size, max_entries);
+
+ for (block_index = 0; block_index != end_block; block_index++) {
+ r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+ if (r)
+ break;
+
+ r = populate_ablock_with_values(info, ab, fn, context,
+ block_index * max_entries,
+ min(max_entries, size));
+ if (r) {
+ unlock_ablock(info, block);
+ break;
+ }
+
+ r = insert_ablock(info, block_index, block, root);
+ unlock_ablock(info, block);
+ if (r)
+ break;
+
+ size -= max_entries;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_new);
+
int dm_array_del(struct dm_array_info *info, dm_block_t root)
{
return dm_btree_del(&info->btree_info, root);
@@ -819,3 +899,89 @@ int dm_array_walk(struct dm_array_info *info, dm_block_t root,
EXPORT_SYMBOL_GPL(dm_array_walk);
/*----------------------------------------------------------------*/
+
+static int load_ablock(struct dm_array_cursor *c)
+{
+ int r;
+ __le64 value_le;
+ uint64_t key;
+
+ if (c->block)
+ unlock_ablock(c->info, c->block);
+
+ c->block = NULL;
+ c->ab = NULL;
+ c->index = 0;
+
+ r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le);
+ if (r) {
+ DMERR("dm_btree_cursor_get_value failed");
+ dm_btree_cursor_end(&c->cursor);
+
+ } else {
+ r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab);
+ if (r) {
+ DMERR("get_ablock failed");
+ dm_btree_cursor_end(&c->cursor);
+ }
+ }
+
+ return r;
+}
+
+int dm_array_cursor_begin(struct dm_array_info *info, dm_block_t root,
+ struct dm_array_cursor *c)
+{
+ int r;
+
+ memset(c, 0, sizeof(*c));
+ c->info = info;
+ r = dm_btree_cursor_begin(&info->btree_info, root, true, &c->cursor);
+ if (r) {
+ DMERR("couldn't create btree cursor");
+ return r;
+ }
+
+ return load_ablock(c);
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_begin);
+
+void dm_array_cursor_end(struct dm_array_cursor *c)
+{
+ if (c->block) {
+ unlock_ablock(c->info, c->block);
+ dm_btree_cursor_end(&c->cursor);
+ }
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_end);
+
+int dm_array_cursor_next(struct dm_array_cursor *c)
+{
+ int r;
+
+ if (!c->block)
+ return -ENODATA;
+
+ c->index++;
+
+ if (c->index >= le32_to_cpu(c->ab->nr_entries)) {
+ r = dm_btree_cursor_next(&c->cursor);
+ if (r)
+ return r;
+
+ r = load_ablock(c);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_next);
+
+void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le)
+{
+ *value_le = element_at(c->info, c->ab, c->index);
+}
+EXPORT_SYMBOL_GPL(dm_array_cursor_get_value);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
index ea177d6fa58f..27ee49a55473 100644
--- a/drivers/md/persistent-data/dm-array.h
+++ b/drivers/md/persistent-data/dm-array.h
@@ -112,6 +112,25 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
__dm_written_to_disk(value);
/*
+ * Creates a new array populated with values provided by a callback
+ * function. This is more efficient than creating an empty array,
+ * resizing, and then setting values since that process incurs a lot of
+ * copying.
+ *
+ * Assumes 32bit values for now since it's only used by the cache hint
+ * array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * size - the number of entries in the array
+ * fn - the callback
+ * context - passed to the callback
+ */
+typedef int (*value_fn)(uint32_t index, void *value_le, void *context);
+int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ uint32_t size, value_fn fn, void *context);
+
+/*
* Frees a whole array. The value_type's decrement operation will be called
* for all values in the array
*/
@@ -163,4 +182,37 @@ int dm_array_walk(struct dm_array_info *info, dm_block_t root,
/*----------------------------------------------------------------*/
+/*
+ * Cursor api.
+ *
+ * This lets you iterate through all the entries in an array efficiently
+ * (it will preload metadata).
+ *
+ * I'm using a cursor, rather than a walk function with a callback because
+ * the cache target needs to iterate both the mapping and hint arrays in
+ * unison.
+ */
+struct dm_array_cursor {
+ struct dm_array_info *info;
+ struct dm_btree_cursor cursor;
+
+ struct dm_block *block;
+ struct array_block *ab;
+ unsigned index;
+};
+
+int dm_array_cursor_begin(struct dm_array_info *info,
+ dm_block_t root, struct dm_array_cursor *c);
+void dm_array_cursor_end(struct dm_array_cursor *c);
+
+uint32_t dm_array_cursor_index(struct dm_array_cursor *c);
+int dm_array_cursor_next(struct dm_array_cursor *c);
+
+/*
+ * value_le is only valid while the cursor points at the current value.
+ */
+void dm_array_cursor_get_value(struct dm_array_cursor *c, void **value_le);
+
+/*----------------------------------------------------------------*/
+
#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 2cc1877804c2..20a40329d84a 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -994,3 +994,165 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
return walk_node(info, root, fn, context);
}
EXPORT_SYMBOL_GPL(dm_btree_walk);
+
+/*----------------------------------------------------------------*/
+
+static void prefetch_values(struct dm_btree_cursor *c)
+{
+ unsigned i, nr;
+ __le64 value_le;
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+ struct dm_block_manager *bm = dm_tm_get_bm(c->info->tm);
+
+ BUG_ON(c->info->value_type.size != sizeof(value_le));
+
+ nr = le32_to_cpu(bn->header.nr_entries);
+ for (i = 0; i < nr; i++) {
+ memcpy(&value_le, value_ptr(bn, i), sizeof(value_le));
+ dm_bm_prefetch(bm, le64_to_cpu(value_le));
+ }
+}
+
+static bool leaf_node(struct dm_btree_cursor *c)
+{
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+
+ return le32_to_cpu(bn->header.flags) & LEAF_NODE;
+}
+
+static int push_node(struct dm_btree_cursor *c, dm_block_t b)
+{
+ int r;
+ struct cursor_node *n = c->nodes + c->depth;
+
+ if (c->depth >= DM_BTREE_CURSOR_MAX_DEPTH - 1) {
+ DMERR("couldn't push cursor node, stack depth too high");
+ return -EINVAL;
+ }
+
+ r = bn_read_lock(c->info, b, &n->b);
+ if (r)
+ return r;
+
+ n->index = 0;
+ c->depth++;
+
+ if (c->prefetch_leaves || !leaf_node(c))
+ prefetch_values(c);
+
+ return 0;
+}
+
+static void pop_node(struct dm_btree_cursor *c)
+{
+ c->depth--;
+ unlock_block(c->info, c->nodes[c->depth].b);
+}
+
+static int inc_or_backtrack(struct dm_btree_cursor *c)
+{
+ struct cursor_node *n;
+ struct btree_node *bn;
+
+ for (;;) {
+ if (!c->depth)
+ return -ENODATA;
+
+ n = c->nodes + c->depth - 1;
+ bn = dm_block_data(n->b);
+
+ n->index++;
+ if (n->index < le32_to_cpu(bn->header.nr_entries))
+ break;
+
+ pop_node(c);
+ }
+
+ return 0;
+}
+
+static int find_leaf(struct dm_btree_cursor *c)
+{
+ int r = 0;
+ struct cursor_node *n;
+ struct btree_node *bn;
+ __le64 value_le;
+
+ for (;;) {
+ n = c->nodes + c->depth - 1;
+ bn = dm_block_data(n->b);
+
+ if (le32_to_cpu(bn->header.flags) & LEAF_NODE)
+ break;
+
+ memcpy(&value_le, value_ptr(bn, n->index), sizeof(value_le));
+ r = push_node(c, le64_to_cpu(value_le));
+ if (r) {
+ DMERR("push_node failed");
+ break;
+ }
+ }
+
+ if (!r && (le32_to_cpu(bn->header.nr_entries) == 0))
+ return -ENODATA;
+
+ return r;
+}
+
+int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
+ bool prefetch_leaves, struct dm_btree_cursor *c)
+{
+ int r;
+
+ c->info = info;
+ c->root = root;
+ c->depth = 0;
+ c->prefetch_leaves = prefetch_leaves;
+
+ r = push_node(c, root);
+ if (r)
+ return r;
+
+ return find_leaf(c);
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_begin);
+
+void dm_btree_cursor_end(struct dm_btree_cursor *c)
+{
+ while (c->depth)
+ pop_node(c);
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
+
+int dm_btree_cursor_next(struct dm_btree_cursor *c)
+{
+ int r = inc_or_backtrack(c);
+ if (!r) {
+ r = find_leaf(c);
+ if (r)
+ DMERR("find_leaf failed");
+ }
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_next);
+
+int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le)
+{
+ if (c->depth) {
+ struct cursor_node *n = c->nodes + c->depth - 1;
+ struct btree_node *bn = dm_block_data(n->b);
+
+ if (le32_to_cpu(bn->header.flags) & INTERNAL_NODE)
+ return -EINVAL;
+
+ *key = le64_to_cpu(*key_ptr(bn, n->index));
+ memcpy(value_le, value_ptr(bn, n->index), c->info->value_type.size);
+ return 0;
+
+ } else
+ return -ENODATA;
+}
+EXPORT_SYMBOL_GPL(dm_btree_cursor_get_value);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index c74301fa5a37..db9bd26adf31 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -176,4 +176,39 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
int (*fn)(void *context, uint64_t *keys, void *leaf),
void *context);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Cursor API. This does not follow the rolling lock convention. Since we
+ * know the order that values are required we can issue prefetches to speed
+ * up iteration. Use on a single level btree only.
+ */
+#define DM_BTREE_CURSOR_MAX_DEPTH 16
+
+struct cursor_node {
+ struct dm_block *b;
+ unsigned index;
+};
+
+struct dm_btree_cursor {
+ struct dm_btree_info *info;
+ dm_block_t root;
+
+ bool prefetch_leaves;
+ unsigned depth;
+ struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
+};
+
+/*
+ * Creates a fresh cursor. If prefetch_leaves is set then it is assumed
+ * the btree contains block indexes that will be prefetched. The cursor is
+ * quite large, so you probably don't want to put it on the stack.
+ */
+int dm_btree_cursor_begin(struct dm_btree_info *info, dm_block_t root,
+ bool prefetch_leaves, struct dm_btree_cursor *c);
+void dm_btree_cursor_end(struct dm_btree_cursor *c);
+int dm_btree_cursor_next(struct dm_btree_cursor *c);
+int dm_btree_cursor_get_value(struct dm_btree_cursor *c, uint64_t *key, void *value_le);
+
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 21dc00eb1989..29e2df5cd77b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -145,12 +145,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
out_free_pages:
- while (--j >= 0) {
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, r1_bio->bios[j], i)
- __free_page(bv->bv_page);
- }
+ while (--j >= 0)
+ bio_free_pages(r1_bio->bios[j]);
out_free_bio:
while (++j < pi->raid_disks)
@@ -407,11 +403,14 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -448,7 +447,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -2298,17 +2297,23 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* This is all done synchronously while the array is
* frozen
*/
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+ bio_put(bio);
+ r1_bio->bios[r1_bio->read_disk] = NULL;
+
if (mddev->ro == 0) {
freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
- } else
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ } else {
+ r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
+ }
+
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
- bio = r1_bio->bios[r1_bio->read_disk];
- bdevname(bio->bi_bdev, b);
read_more:
disk = read_balance(conf, r1_bio, &max_sectors);
if (disk == -1) {
@@ -2319,11 +2324,6 @@ read_more:
} else {
const unsigned long do_sync
= r1_bio->master_bio->bi_opf & REQ_SYNC;
- if (bio) {
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- bio_put(bio);
- }
r1_bio->read_disk = disk;
bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3b2d..39fddda2fef2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 1b1ab4a1d132..a227a9f3ee65 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1087,7 +1087,7 @@ static int r5l_recovery_log(struct r5l_log *log)
* 1's seq + 10 and let superblock points to meta2. The same recovery will
* not think meta 3 is a valid meta, because its seq doesn't match
*/
- if (ctx.seq > log->last_cp_seq + 1) {
+ if (ctx.seq > log->last_cp_seq) {
int ret;
ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
@@ -1096,6 +1096,8 @@ static int r5l_recovery_log(struct r5l_log *log)
log->seq = ctx.seq + 11;
log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
r5l_write_super(log, ctx.pos);
+ log->last_checkpoint = ctx.pos;
+ log->next_checkpoint = ctx.pos;
} else {
log->log_start = ctx.pos;
log->seq = ctx.seq;
@@ -1154,6 +1156,7 @@ create:
if (create_super) {
log->last_cp_seq = prandom_u32();
cp = 0;
+ r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
* Make sure super points to correct address. Log might have
* data very soon. If super hasn't correct log tail address,
@@ -1168,6 +1171,7 @@ create:
if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
log->max_free_space = RECLAIM_MAX_FREE_SPACE;
log->last_checkpoint = cp;
+ log->next_checkpoint = cp;
__free_page(page);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 962f2a9a6614..7b8540291217 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -180,14 +180,14 @@ source "drivers/media/firewire/Kconfig"
# Common driver options
source "drivers/media/common/Kconfig"
-comment "Media ancillary drivers (tuners, sensors, i2c, frontends)"
+comment "Media ancillary drivers (tuners, sensors, i2c, spi, frontends)"
#
-# Ancillary drivers (tuners, i2c, frontends)
+# Ancillary drivers (tuners, i2c, spi, frontends)
#
config MEDIA_SUBDRV_AUTOSELECT
- bool "Autoselect ancillary drivers (tuners, sensors, i2c, frontends)"
+ bool "Autoselect ancillary drivers (tuners, sensors, i2c, spi, frontends)"
depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_CAMERA_SUPPORT || MEDIA_SDR_SUPPORT
depends on HAS_IOMEM
select I2C
@@ -216,6 +216,7 @@ config MEDIA_ATTACH
default MODULES
source "drivers/media/i2c/Kconfig"
+source "drivers/media/spi/Kconfig"
source "drivers/media/tuners/Kconfig"
source "drivers/media/dvb-frontends/Kconfig"
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 081a7866fd44..0deaa93efdee 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -32,6 +32,6 @@ obj-y += rc/
# Finally, merge the drivers that require the core
#
-obj-y += common/ platform/ pci/ usb/ mmc/ firewire/
+obj-y += common/ platform/ pci/ usb/ mmc/ firewire/ spi/
obj-$(CONFIG_VIDEO_DEV) += radio/
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 3ec3cebe62b9..1684810cab83 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -504,14 +504,14 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0))
static const int bt601[3][3] = {
- { COEFF(0.299, 219), COEFF(0.587, 219), COEFF(0.114, 219) },
- { COEFF(-0.169, 224), COEFF(-0.331, 224), COEFF(0.5, 224) },
- { COEFF(0.5, 224), COEFF(-0.419, 224), COEFF(-0.081, 224) },
+ { COEFF(0.299, 219), COEFF(0.587, 219), COEFF(0.114, 219) },
+ { COEFF(-0.1687, 224), COEFF(-0.3313, 224), COEFF(0.5, 224) },
+ { COEFF(0.5, 224), COEFF(-0.4187, 224), COEFF(-0.0813, 224) },
};
static const int bt601_full[3][3] = {
- { COEFF(0.299, 255), COEFF(0.587, 255), COEFF(0.114, 255) },
- { COEFF(-0.169, 255), COEFF(-0.331, 255), COEFF(0.5, 255) },
- { COEFF(0.5, 255), COEFF(-0.419, 255), COEFF(-0.081, 255) },
+ { COEFF(0.299, 255), COEFF(0.587, 255), COEFF(0.114, 255) },
+ { COEFF(-0.1687, 255), COEFF(-0.3313, 255), COEFF(0.5, 255) },
+ { COEFF(0.5, 255), COEFF(-0.4187, 255), COEFF(-0.0813, 255) },
};
static const int rec709[3][3] = {
{ COEFF(0.2126, 219), COEFF(0.7152, 219), COEFF(0.0722, 219) },
@@ -558,7 +558,6 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
- case V4L2_YCBCR_ENC_SYCC:
rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
break;
case V4L2_YCBCR_ENC_XV601:
@@ -674,7 +673,6 @@ static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
- case V4L2_YCBCR_ENC_SYCC:
ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
break;
case V4L2_YCBCR_ENC_XV601:
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index 4b4c1da20f4b..aeda2b64931c 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -202,7 +202,7 @@ struct dmx_section_feed {
*
* This function callback prototype, provided by the client of the demux API,
* is called from the demux code. The function is only called when filtering
- * on ae TS feed has been enabled using the start_filtering() function at
+ * on a TS feed has been enabled using the start_filtering\(\) function at
* the &dmx_demux.
* Any TS packets that match the filter settings are copied to a circular
* buffer. The filtered TS packets are delivered to the client using this
@@ -243,8 +243,10 @@ struct dmx_section_feed {
* will also be sent to the hardware MPEG decoder.
*
* Return:
- * 0, on success;
- * -EOVERFLOW, on buffer overflow.
+ *
+ * - 0, on success;
+ *
+ * - -EOVERFLOW, on buffer overflow.
*/
typedef int (*dmx_ts_cb)(const u8 *buffer1,
size_t buffer1_length,
@@ -293,9 +295,9 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
size_t buffer2_len,
struct dmx_section_filter *source);
-/*--------------------------------------------------------------------------*/
-/* DVB Front-End */
-/*--------------------------------------------------------------------------*/
+/*
+ * DVB Front-End
+ */
/**
* enum dmx_frontend_source - Used to identify the type of frontend
@@ -349,15 +351,15 @@ enum dmx_demux_caps {
/*
* Demux resource type identifier.
-*/
-
-/*
- * DMX_FE_ENTRY(): Casts elements in the list of registered
- * front-ends from the generic type struct list_head
- * to the type * struct dmx_frontend
- *.
-*/
+ */
+/**
+ * DMX_FE_ENTRY - Casts elements in the list of registered
+ * front-ends from the generic type struct list_head
+ * to the type * struct dmx_frontend
+ *
+ * @list: list of struct dmx_frontend
+ */
#define DMX_FE_ENTRY(list) \
list_entry(list, struct dmx_frontend, connectivity_list)
@@ -551,7 +553,6 @@ enum dmx_demux_caps {
* 0 on success;
* -EINVAL on bad parameter.
*/
-
struct dmx_demux {
enum dmx_demux_caps capabilities;
struct dmx_frontend *frontend;
@@ -581,15 +582,12 @@ struct dmx_demux {
int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids);
- /* private: Not used upstream and never documented */
-#if 0
- int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps);
- int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src);
-#endif
+ /* private: */
+
/*
- * private: Only used at av7110, to read some data from firmware.
- * As this was never documented, we have no clue about what's
- * there, and its usage on other drivers aren't encouraged.
+ * Only used at av7110, to read some data from firmware.
+ * As this was never documented, we have no clue about what's
+ * there, and its usage on other drivers aren't encouraged.
*/
int (*get_stc)(struct dmx_demux *demux, unsigned int num,
u64 *stc, unsigned int *base);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index be99c8dbc5f8..01511e5a5566 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -1969,17 +1969,9 @@ static int dvb_frontend_ioctl_properties(struct file *file,
if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
return -EINVAL;
- tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL);
- if (!tvp) {
- err = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(tvp, (void __user *)tvps->props,
- tvps->num * sizeof(struct dtv_property))) {
- err = -EFAULT;
- goto out;
- }
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
for (i = 0; i < tvps->num; i++) {
err = dtv_property_process_set(fe, tvp + i, file);
@@ -2002,17 +1994,9 @@ static int dvb_frontend_ioctl_properties(struct file *file,
if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
return -EINVAL;
- tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL);
- if (!tvp) {
- err = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(tvp, (void __user *)tvps->props,
- tvps->num * sizeof(struct dtv_property))) {
- err = -EFAULT;
- goto out;
- }
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
/*
* Let's use our own copy of property cache, in order to
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index 2f0326674ca6..4d11d3529c14 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -25,7 +25,7 @@
#include <linux/types.h>
/**
- * cintlog2 - computes log2 of a value; the result is shifted left by 24 bits
+ * intlog2 - computes log2 of a value; the result is shifted left by 24 bits
*
* @value: The value (must be != 0)
*
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 8af642399f1e..bbe94873d44d 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -18,10 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _DVB_RINGBUFFER_H_
@@ -30,6 +26,18 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
+/**
+ * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework
+ *
+ * @data: Area were the ringbuffer data is written
+ * @size: size of the ringbuffer
+ * @pread: next position to read
+ * @pwrite: next position to write
+ * @error: used by ringbuffer clients to indicate that an error happened.
+ * @queue: Wait queue used by ringbuffer clients to indicate when buffer
+ * was filled
+ * @lock: Spinlock used to protect the ringbuffer
+ */
struct dvb_ringbuffer {
u8 *data;
ssize_t size;
@@ -43,99 +51,161 @@ struct dvb_ringbuffer {
#define DVB_RINGBUFFER_PKTHDRSIZE 3
+/**
+ * dvb_ringbuffer_init - initialize ring buffer, lock and queue
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @data: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ */
+extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data,
+ size_t len);
-/*
- * Notes:
- * ------
- * (1) For performance reasons read and write routines don't check buffer sizes
- * and/or number of bytes free/available. This has to be done before these
- * routines are called. For example:
- *
- * *** write @buflen: bytes ***
- * free = dvb_ringbuffer_free(rbuf);
- * if (free >= buflen)
- * count = dvb_ringbuffer_write(rbuf, buffer, buflen);
- * else
- * ...
- *
- * *** read min. 1000, max. @bufsize: bytes ***
- * avail = dvb_ringbuffer_avail(rbuf);
- * if (avail >= 1000)
- * count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
- * else
- * ...
- *
- * (2) If there is exactly one reader and one writer, there is no need
- * to lock read or write operations.
- * Two or more readers must be locked against each other.
- * Flushing the buffer counts as a read operation.
- * Resetting the buffer counts as a read and write operation.
- * Two or more writers must be locked against each other.
- */
-
-/* initialize ring buffer, lock and queue */
-extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len);
-
-/* test whether buffer is empty */
+/**
+ * dvb_ringbuffer_empty - test whether buffer is empty
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf);
-/* return the number of free bytes in the buffer */
+/**
+ * dvb_ringbuffer_free - returns the number of free bytes in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of free bytes in the buffer
+ */
extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf);
-/* return the number of bytes waiting in the buffer */
+/**
+ * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Return: number of bytes waiting in the buffer
+ */
extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf);
-
-/*
- * Reset the read and write pointers to zero and flush the buffer
+/**
+ * dvb_ringbuffer_reset - resets the ringbuffer to initial state
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ *
+ * Resets the read and write pointers to zero and flush the buffer.
+ *
* This counts as a read and write operation
*/
extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf);
+/*
+ * read routines & macros
+ */
-/* read routines & macros */
-/* ---------------------- */
-/* flush buffer */
+/**
+ * dvb_ringbuffer_flush - flush buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf);
-/* flush buffer protected by spinlock and wake-up waiting task(s) */
+/**
+ * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock
+ * and wake-up waiting task(s)
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ */
extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
-/* peek at byte @offs: in the buffer */
-#define DVB_RINGBUFFER_PEEK(rbuf,offs) \
- (rbuf)->data[((rbuf)->pread+(offs))%(rbuf)->size]
+/**
+ * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @offs: offset inside the ringbuffer
+ */
+#define DVB_RINGBUFFER_PEEK(rbuf, offs) \
+ ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size])
-/* advance read ptr by @num: bytes */
-#define DVB_RINGBUFFER_SKIP(rbuf,num) \
- (rbuf)->pread=((rbuf)->pread+(num))%(rbuf)->size
+/**
+ * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @num: number of bytes to advance
+ */
+#define DVB_RINGBUFFER_SKIP(rbuf, num) {\
+ (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\
+}
-/*
- * read @len: bytes from ring buffer into @buf:
- * @usermem: specifies whether @buf: resides in user space
- * returns number of bytes transferred or -EFAULT
+/**
+ * dvb_ringbuffer_read_user - Reads a buffer into an user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_to_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
*/
extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf,
u8 __user *buf, size_t len);
+
+/**
+ * dvb_ringbuffer_read - Reads a buffer into a pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be stored
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf,
u8 *buf, size_t len);
-
-/* write routines & macros */
-/* ----------------------- */
-/* write single byte to ring buffer */
-#define DVB_RINGBUFFER_WRITE_BYTE(rbuf,byte) \
- { (rbuf)->data[(rbuf)->pwrite]=(byte); \
- (rbuf)->pwrite=((rbuf)->pwrite+1)%(rbuf)->size; }
/*
- * write @len: bytes to ring buffer
- * @usermem: specifies whether @buf: resides in user space
- * returns number of bytes transferred or -EFAULT
-*/
+ * write routines & macros
+ */
+
+/**
+ * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @byte: byte to write
+ */
+#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \
+ { (rbuf)->data[(rbuf)->pwrite] = (byte); \
+ (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; }
+
+/**
+ * dvb_ringbuffer_write - Writes a buffer into the ringbuffer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the Kernel space
+ *
+ * return: number of bytes transferred or -EFAULT
+ */
extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
size_t len);
-extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
- const u8 __user *buf, size_t len);
+/**
+ * dvb_ringbuffer_write_user - Writes a buffer received via an user pointer
+ *
+ * @rbuf: pointer to struct dvb_ringbuffer
+ * @buf: pointer to the buffer where the data will be read
+ * @len: bytes from ring buffer into @buf
+ *
+ * This variant assumes that the buffer is a memory at the userspace. So,
+ * it will internally call copy_from_user().
+ *
+ * Return: number of bytes transferred or -EFAULT
+ */
+extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ const u8 __user *buf, size_t len);
/**
* dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer.
@@ -143,9 +213,10 @@ extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
* @rbuf: Ringbuffer to write to.
* @buf: Buffer to write.
* @len: Length of buffer (currently limited to 65535 bytes max).
- * returns Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
+ *
+ * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
*/
-extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
+extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf,
size_t len);
/**
@@ -157,7 +228,7 @@ extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
* @buf: Destination buffer for data.
* @len: Size of destination buffer.
*
- * returns Number of bytes read, or -EFAULT.
+ * Return: Number of bytes read, or -EFAULT.
*
* .. note::
*
@@ -167,7 +238,7 @@ extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
*/
extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
size_t idx,
- int offset, u8 __user *buf,
+ int offset, u8 __user *buf,
size_t len);
/**
@@ -181,7 +252,7 @@ extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
* @buf: Destination buffer for data.
* @len: Size of destination buffer.
*
- * returns Number of bytes read, or -EFAULT.
+ * Return: Number of bytes read, or -EFAULT.
*/
extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
int offset, u8 *buf, size_t len);
@@ -199,10 +270,11 @@ extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx);
*
* @rbuf: Ringbuffer concerned.
* @idx: Previous packet index, or -1 to return the first packet index.
- * @pktlen: On success, will be updated to contain the length of the packet in bytes.
+ * @pktlen: On success, will be updated to contain the length of the packet
+ * in bytes.
* returns Packet index (if >=0), or -1 if no packets available.
*/
-extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen);
-
+extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf,
+ size_t idx, size_t *pktlen);
#endif /* _DVB_RINGBUFFER_H_ */
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index c645aa81f423..b71b747ee0ba 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -67,6 +67,7 @@ config DVB_TDA18271C2DD
config DVB_SI2165
tristate "Silicon Labs si2165 based"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
A DVB-C/T demodulator.
@@ -463,6 +464,7 @@ config DVB_STV0367
config DVB_CXD2820R
tristate "Sony CXD2820R"
depends on DVB_CORE && I2C
+ select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
@@ -511,6 +513,11 @@ config DVB_AS102_FE
depends on DVB_CORE
default DVB_AS102
+config DVB_GP8PSK_FE
+ tristate
+ depends on DVB_CORE
+ default DVB_USB_GP8PSK
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index e90165ad361b..93921a4eaa27 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -121,6 +121,7 @@ obj-$(CONFIG_DVB_RTL2832_SDR) += rtl2832_sdr.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
+obj-$(CONFIG_DVB_GP8PSK_FE) += gp8psk-fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 1dcc936e1661..dcdd163ace85 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -25,7 +25,6 @@
#ifndef AF9013_H
#define AF9013_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/* AF9013/5 GPIOs (mostly guessed)
diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
index 6ad22b69a636..5b83e4f96297 100644
--- a/drivers/media/dvb-frontends/af9033.h
+++ b/drivers/media/dvb-frontends/af9033.h
@@ -22,8 +22,6 @@
#ifndef AF9033_H
#define AF9033_H
-#include <linux/kconfig.h>
-
/*
* I2C address (TODO: are these in 8-bit format?)
* 0x38, 0x3a, 0x3c, 0x3e
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index 8cc8c4597b6a..ad304eed656d 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -464,7 +464,7 @@ static int ascot2e_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops ascot2e_tuner_ops = {
+static const struct dvb_tuner_ops ascot2e_tuner_ops = {
.info = {
.name = "Sony ASCOT2E",
.frequency_min = 1000000,
diff --git a/drivers/media/dvb-frontends/ascot2e.h b/drivers/media/dvb-frontends/ascot2e.h
index 6da4ae6d6cc3..dc61bf7d1b09 100644
--- a/drivers/media/dvb-frontends/ascot2e.h
+++ b/drivers/media/dvb-frontends/ascot2e.h
@@ -22,7 +22,6 @@
#ifndef __DVB_ASCOT2E_H__
#define __DVB_ASCOT2E_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/atbm8830.h b/drivers/media/dvb-frontends/atbm8830.h
index 5446d13fdfe8..bb862387080f 100644
--- a/drivers/media/dvb-frontends/atbm8830.h
+++ b/drivers/media/dvb-frontends/atbm8830.h
@@ -22,7 +22,6 @@
#ifndef __ATBM8830_H__
#define __ATBM8830_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index 78bf3f73e58d..21c51a4c519a 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -22,7 +22,6 @@
#ifndef __AU8522_H__
#define __AU8522_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
enum au8522_if_freq {
diff --git a/drivers/media/dvb-frontends/cx22702.h b/drivers/media/dvb-frontends/cx22702.h
index 68b69a7660d2..a1956a9ba406 100644
--- a/drivers/media/dvb-frontends/cx22702.h
+++ b/drivers/media/dvb-frontends/cx22702.h
@@ -28,7 +28,6 @@
#ifndef CX22702_H
#define CX22702_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx22702_config {
diff --git a/drivers/media/dvb-frontends/cx24113.h b/drivers/media/dvb-frontends/cx24113.h
index 962919b9b6e6..194c703611b4 100644
--- a/drivers/media/dvb-frontends/cx24113.h
+++ b/drivers/media/dvb-frontends/cx24113.h
@@ -22,8 +22,6 @@
#ifndef CX24113_H
#define CX24113_H
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct cx24113_config {
diff --git a/drivers/media/dvb-frontends/cx24116.h b/drivers/media/dvb-frontends/cx24116.h
index f6dbabc1d62b..9ff8df8d44b8 100644
--- a/drivers/media/dvb-frontends/cx24116.h
+++ b/drivers/media/dvb-frontends/cx24116.h
@@ -21,7 +21,6 @@
#ifndef CX24116_H
#define CX24116_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24116_config {
diff --git a/drivers/media/dvb-frontends/cx24117.h b/drivers/media/dvb-frontends/cx24117.h
index 1648ab432168..445f13faf63a 100644
--- a/drivers/media/dvb-frontends/cx24117.h
+++ b/drivers/media/dvb-frontends/cx24117.h
@@ -22,7 +22,6 @@
#ifndef CX24117_H
#define CX24117_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24117_config {
diff --git a/drivers/media/dvb-frontends/cx24120.h b/drivers/media/dvb-frontends/cx24120.h
index f0970423e16f..de4ca9aa0923 100644
--- a/drivers/media/dvb-frontends/cx24120.h
+++ b/drivers/media/dvb-frontends/cx24120.h
@@ -20,7 +20,6 @@
#ifndef CX24120_H
#define CX24120_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/firmware.h>
diff --git a/drivers/media/dvb-frontends/cx24123.h b/drivers/media/dvb-frontends/cx24123.h
index 975f3c926fe8..aac23444aa9a 100644
--- a/drivers/media/dvb-frontends/cx24123.h
+++ b/drivers/media/dvb-frontends/cx24123.h
@@ -21,7 +21,6 @@
#ifndef CX24123_H
#define CX24123_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct cx24123_config {
diff --git a/drivers/media/dvb-frontends/cxd2820r.h b/drivers/media/dvb-frontends/cxd2820r.h
index 56d42760263d..f3ff8f6eb3bb 100644
--- a/drivers/media/dvb-frontends/cxd2820r.h
+++ b/drivers/media/dvb-frontends/cxd2820r.h
@@ -22,7 +22,6 @@
#ifndef CXD2820R_H
#define CXD2820R_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define CXD2820R_GPIO_D (0 << 0) /* disable */
@@ -37,6 +36,32 @@
#define CXD2820R_TS_PARALLEL 0x30
#define CXD2820R_TS_PARALLEL_MSB 0x70
+/*
+ * I2C address: 0x6c, 0x6d
+ */
+
+/**
+ * struct cxd2820r_platform_data - Platform data for the cxd2820r driver
+ * @ts_mode: TS mode.
+ * @ts_clk_inv: TS clock inverted.
+ * @if_agc_polarity: IF AGC polarity.
+ * @spec_inv: Input spectrum inverted.
+ * @gpio_chip_base: GPIO.
+ * @get_dvb_frontend: Get DVB frontend.
+ */
+
+struct cxd2820r_platform_data {
+ u8 ts_mode;
+ bool ts_clk_inv;
+ bool if_agc_polarity;
+ bool spec_inv;
+ int **gpio_chip_base;
+
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
+/* private: For legacy media attach wrapper. Do not set value. */
+ bool attach_in_use;
+};
+
struct cxd2820r_config {
/* Demodulator I2C address.
* Driver determines DVB-C slave I2C address automatically from master
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index a674a6312c38..d75b0776d5b5 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -24,12 +24,12 @@
int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i;
+ int ret;
+ unsigned int utmp;
u8 buf[2];
- u32 if_freq;
- u16 if_ctl;
- u64 num;
+ u32 if_frequency;
struct reg_val_mask tab[] = {
{ 0x00080, 0x01, 0xff },
{ 0x00081, 0x05, 0xff },
@@ -43,25 +43,24 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
{ 0x10059, 0x50, 0xff },
{ 0x10087, 0x0c, 0x3c },
{ 0x1008b, 0x07, 0xff },
- { 0x1001f, priv->cfg.if_agc_polarity << 7, 0x80 },
- { 0x10070, priv->cfg.ts_mode, 0xff },
- { 0x10071, !priv->cfg.ts_clock_inv << 4, 0x10 },
+ { 0x1001f, priv->if_agc_polarity << 7, 0x80 },
+ { 0x10070, priv->ts_mode, 0xff },
+ { 0x10071, !priv->ts_clk_inv << 4, 0x10 },
};
- dev_dbg(&priv->i2c->dev, "%s: frequency=%d symbol_rate=%d\n", __func__,
- c->frequency, c->symbol_rate);
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%u symbol_rate=%u inversion=%d\n",
+ c->delivery_system, c->modulation, c->frequency,
+ c->symbol_rate, c->inversion);
/* program tuner */
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
if (priv->delivery_system != SYS_DVBC_ANNEX_A) {
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
- tab[i].val, tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
}
priv->delivery_system = SYS_DVBC_ANNEX_A;
@@ -69,35 +68,33 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
/* program IF frequency */
if (fe->ops.tuner_ops.get_if_frequency) {
- ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
if (ret)
goto error;
- } else
- if_freq = 0;
-
- dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
-
- num = if_freq / 1000; /* Hz => kHz */
- num *= 0x4000;
- if_ctl = 0x4000 - DIV_ROUND_CLOSEST_ULL(num, 41000);
- buf[0] = (if_ctl >> 8) & 0x3f;
- buf[1] = (if_ctl >> 0) & 0xff;
+ dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
- ret = cxd2820r_wr_regs(priv, 0x10042, buf, 2);
+ utmp = 0x4000 - DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x4000, CXD2820R_CLK);
+ buf[0] = (utmp >> 8) & 0xff;
+ buf[1] = (utmp >> 0) & 0xff;
+ ret = regmap_bulk_write(priv->regmap[1], 0x0042, buf, 2);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
+ ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+ ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -105,20 +102,24 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
int ret;
+ unsigned int utmp;
u8 buf[2];
- ret = cxd2820r_rd_regs(priv, 0x1001a, buf, 2);
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_bulk_read(priv->regmap[1], 0x001a, buf, 2);
if (ret)
goto error;
c->symbol_rate = 2500 * ((buf[0] & 0x0f) << 8 | buf[1]);
- ret = cxd2820r_rd_reg(priv, 0x10019, &buf[0]);
+ ret = regmap_read(priv->regmap[1], 0x0019, &utmp);
if (ret)
goto error;
- switch ((buf[0] >> 0) & 0x07) {
+ switch ((utmp >> 0) & 0x07) {
case 0:
c->modulation = QAM_16;
break;
@@ -136,7 +137,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
break;
}
- switch ((buf[0] >> 7) & 0x01) {
+ switch ((utmp >> 7) & 0x01) {
case 0:
c->inversion = INVERSION_OFF;
break;
@@ -147,167 +148,169 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe,
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-int cxd2820r_read_ber_c(struct dvb_frontend *fe, u32 *ber)
+int cxd2820r_read_status_c(struct dvb_frontend *fe, enum fe_status *status)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- u8 buf[3], start_ber = 0;
- *ber = 0;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[3];
- if (priv->ber_running) {
- ret = cxd2820r_rd_regs(priv, 0x10076, buf, sizeof(buf));
- if (ret)
- goto error;
+ /* Lock detection */
+ ret = regmap_bulk_read(priv->regmap[1], 0x0088, &buf[0], 1);
+ if (ret)
+ goto error;
+ ret = regmap_bulk_read(priv->regmap[1], 0x0073, &buf[1], 1);
+ if (ret)
+ goto error;
- if ((buf[2] >> 7) & 0x01 || (buf[2] >> 4) & 0x01) {
- *ber = (buf[2] & 0x0f) << 16 | buf[1] << 8 | buf[0];
- start_ber = 1;
- }
+ utmp1 = (buf[0] >> 0) & 0x01;
+ utmp2 = (buf[1] >> 3) & 0x01;
+
+ if (utmp1 == 1 && utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ } else if (utmp1 == 1 || utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC;
} else {
- priv->ber_running = true;
- start_ber = 1;
+ *status = 0;
}
- if (start_ber) {
- /* (re)start BER */
- ret = cxd2820r_wr_reg(priv, 0x10079, 0x01);
- if (ret)
- goto error;
- }
+ dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+ *status, 2, buf, utmp1, utmp2);
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ unsigned int strength;
-int cxd2820r_read_signal_strength_c(struct dvb_frontend *fe,
- u16 *strength)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
- u16 tmp;
+ ret = regmap_bulk_read(priv->regmap[1], 0x0049, buf, 2);
+ if (ret)
+ goto error;
- ret = cxd2820r_rd_regs(priv, 0x10049, buf, sizeof(buf));
- if (ret)
- goto error;
+ utmp = buf[0] << 8 | buf[1] << 0;
+ utmp = 511 - sign_extend32(utmp, 9);
+ /* Scale value to 0x0000-0xffff */
+ strength = utmp << 6 | utmp >> 4;
- tmp = (buf[0] & 0x03) << 8 | buf[1];
- tmp = (~tmp & 0x03ff);
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = strength;
+ } else {
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- if (tmp == 512)
- /* ~no signal */
- tmp = 0;
- else if (tmp > 350)
- tmp = 350;
+ /* CNR */
+ if (*status & FE_HAS_VITERBI) {
+ unsigned int cnr, const_a, const_b;
- /* scale value to 0x0000-0xffff */
- *strength = tmp * 0xffff / (350-0);
+ ret = regmap_read(priv->regmap[1], 0x0019, &utmp);
+ if (ret)
+ goto error;
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ if (((utmp >> 0) & 0x03) % 2) {
+ const_a = 8750;
+ const_b = 650;
+ } else {
+ const_a = 9500;
+ const_b = 760;
+ }
-int cxd2820r_read_snr_c(struct dvb_frontend *fe, u16 *snr)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 tmp;
- unsigned int A, B;
- /* report SNR in dB * 10 */
+ ret = regmap_read(priv->regmap[1], 0x004d, &utmp);
+ if (ret)
+ goto error;
- ret = cxd2820r_rd_reg(priv, 0x10019, &tmp);
- if (ret)
- goto error;
+ #define CXD2820R_LOG2_E_24 24204406 /* log2(e) << 24 */
+ if (utmp)
+ cnr = div_u64((u64)(intlog2(const_b) - intlog2(utmp))
+ * const_a, CXD2820R_LOG2_E_24);
+ else
+ cnr = 0;
- if (((tmp >> 0) & 0x03) % 2) {
- A = 875;
- B = 650;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = cnr;
} else {
- A = 950;
- B = 760;
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- ret = cxd2820r_rd_reg(priv, 0x1004d, &tmp);
- if (ret)
- goto error;
-
- #define CXD2820R_LOG2_E_24 24204406 /* log2(e) << 24 */
- if (tmp)
- *snr = A * (intlog2(B / tmp) >> 5) / (CXD2820R_LOG2_E_24 >> 5)
- / 10;
- else
- *snr = 0;
-
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* BER */
+ if (*status & FE_HAS_SYNC) {
+ unsigned int post_bit_error;
+ bool start_ber;
-int cxd2820r_read_ucblocks_c(struct dvb_frontend *fe, u32 *ucblocks)
-{
- *ucblocks = 0;
- /* no way to read ? */
- return 0;
-}
+ if (priv->ber_running) {
+ ret = regmap_bulk_read(priv->regmap[1], 0x0076, buf, 3);
+ if (ret)
+ goto error;
-int cxd2820r_read_status_c(struct dvb_frontend *fe, enum fe_status *status)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
- *status = 0;
+ if ((buf[2] >> 7) & 0x01) {
+ post_bit_error = buf[2] << 16 | buf[1] << 8 |
+ buf[0] << 0;
+ post_bit_error &= 0x0fffff;
+ start_ber = true;
+ } else {
+ post_bit_error = 0;
+ start_ber = false;
+ }
+ } else {
+ post_bit_error = 0;
+ start_ber = true;
+ }
- ret = cxd2820r_rd_regs(priv, 0x10088, buf, sizeof(buf));
- if (ret)
- goto error;
+ if (start_ber) {
+ ret = regmap_write(priv->regmap[1], 0x0079, 0x01);
+ if (ret)
+ goto error;
+ priv->ber_running = true;
+ }
- if (((buf[0] >> 0) & 0x01) == 1) {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC;
+ priv->post_bit_error += post_bit_error;
- if (((buf[1] >> 3) & 0x01) == 1) {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- }
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+ } else {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- dev_dbg(&priv->i2c->dev, "%s: lock=%02x %02x\n", __func__, buf[0],
- buf[1]);
-
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
int cxd2820r_init_c(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
int ret;
- ret = cxd2820r_wr_reg(priv, 0x00085, 0x07);
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_write(priv->regmap[0], 0x0085, 0x07);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
int cxd2820r_sleep_c(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret, i;
+ struct i2c_client *client = priv->client[0];
+ int ret;
struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
@@ -316,20 +319,17 @@ int cxd2820r_sleep_c(struct dvb_frontend *fe)
{ 0x00080, 0x00, 0xff },
};
- dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "\n");
priv->delivery_system = SYS_UNDEFINED;
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
- tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index 314d3b8c1080..95267c6edb3a 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -21,178 +21,50 @@
#include "cxd2820r_priv.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
-/* write multiple registers */
-static int cxd2820r_wr_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
- u8 *val, int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = i2c,
- .flags = 0,
- .len = len + 1,
- .buf = buf,
- }
- };
-
- if (1 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = reg;
- memcpy(&buf[1], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple registers */
-static int cxd2820r_rd_regs_i2c(struct cxd2820r_priv *priv, u8 i2c, u8 reg,
- u8 *val, int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[2] = {
- {
- .addr = i2c,
- .flags = 0,
- .len = 1,
- .buf = &reg,
- }, {
- .addr = i2c,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
-
- if (len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- memcpy(val, buf, len);
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
-
- return ret;
-}
-
-/* write multiple registers */
-int cxd2820r_wr_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
- int len)
+/* Write register table */
+int cxd2820r_wr_reg_val_mask_tab(struct cxd2820r_priv *priv,
+ const struct reg_val_mask *tab, int tab_len)
{
+ struct i2c_client *client = priv->client[0];
int ret;
- u8 i2c_addr;
- u8 reg = (reginfo >> 0) & 0xff;
- u8 bank = (reginfo >> 8) & 0xff;
- u8 i2c = (reginfo >> 16) & 0x01;
-
- /* select I2C */
- if (i2c)
- i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */
- else
- i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */
+ unsigned int i, reg, mask, val;
+ struct regmap *regmap;
- /* switch bank if needed */
- if (bank != priv->bank[i2c]) {
- ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1);
- if (ret)
- return ret;
- priv->bank[i2c] = bank;
- }
- return cxd2820r_wr_regs_i2c(priv, i2c_addr, reg, val, len);
-}
-
-/* read multiple registers */
-int cxd2820r_rd_regs(struct cxd2820r_priv *priv, u32 reginfo, u8 *val,
- int len)
-{
- int ret;
- u8 i2c_addr;
- u8 reg = (reginfo >> 0) & 0xff;
- u8 bank = (reginfo >> 8) & 0xff;
- u8 i2c = (reginfo >> 16) & 0x01;
-
- /* select I2C */
- if (i2c)
- i2c_addr = priv->cfg.i2c_address | (1 << 1); /* DVB-C */
- else
- i2c_addr = priv->cfg.i2c_address; /* DVB-T/T2 */
+ dev_dbg(&client->dev, "tab_len=%d\n", tab_len);
- /* switch bank if needed */
- if (bank != priv->bank[i2c]) {
- ret = cxd2820r_wr_regs_i2c(priv, i2c_addr, 0x00, &bank, 1);
- if (ret)
- return ret;
- priv->bank[i2c] = bank;
- }
- return cxd2820r_rd_regs_i2c(priv, i2c_addr, reg, val, len);
-}
-
-/* write single register */
-int cxd2820r_wr_reg(struct cxd2820r_priv *priv, u32 reg, u8 val)
-{
- return cxd2820r_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-int cxd2820r_rd_reg(struct cxd2820r_priv *priv, u32 reg, u8 *val)
-{
- return cxd2820r_rd_regs(priv, reg, val, 1);
-}
+ for (i = 0; i < tab_len; i++) {
+ if ((tab[i].reg >> 16) & 0x1)
+ regmap = priv->regmap[1];
+ else
+ regmap = priv->regmap[0];
-/* write single register with mask */
-int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
- u8 mask)
-{
- int ret;
- u8 tmp;
+ reg = (tab[i].reg >> 0) & 0xffff;
+ val = tab[i].val;
+ mask = tab[i].mask;
- /* no need for read if whole reg is written */
- if (mask != 0xff) {
- ret = cxd2820r_rd_reg(priv, reg, &tmp);
+ if (mask == 0xff)
+ ret = regmap_write(regmap, reg, val);
+ else
+ ret = regmap_write_bits(regmap, reg, mask, val);
if (ret)
- return ret;
-
- val &= mask;
- tmp &= ~mask;
- val |= tmp;
+ goto error;
}
- return cxd2820r_wr_reg(priv, reg, val);
+ return 0;
+error:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 tmp0, tmp1;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
/* update GPIOs only when needed */
if (!memcmp(gpio, priv->gpio, sizeof(priv->gpio)))
@@ -219,20 +91,18 @@ int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio)
else
tmp1 |= (0 << (0 + i));
- dev_dbg(&priv->i2c->dev, "%s: gpio i=%d %02x %02x\n", __func__,
- i, tmp0, tmp1);
+ dev_dbg(&client->dev, "gpio i=%d %02x %02x\n", i, tmp0, tmp1);
}
- dev_dbg(&priv->i2c->dev, "%s: wr gpio=%02x %02x\n", __func__, tmp0,
- tmp1);
+ dev_dbg(&client->dev, "wr gpio=%02x %02x\n", tmp0, tmp1);
/* write bits [7:2] */
- ret = cxd2820r_wr_reg_mask(priv, 0x00089, tmp0, 0xfc);
+ ret = regmap_update_bits(priv->regmap[0], 0x0089, 0xfc, tmp0);
if (ret)
goto error;
/* write bits [5:0] */
- ret = cxd2820r_wr_reg_mask(priv, 0x0008e, tmp1, 0x3f);
+ ret = regmap_update_bits(priv->regmap[0], 0x008e, 0x3f, tmp1);
if (ret)
goto error;
@@ -240,18 +110,18 @@ int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio)
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int cxd2820r_set_frontend(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
switch (c->delivery_system) {
case SYS_DVBT:
@@ -279,8 +149,7 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe)
goto err;
break;
default:
- dev_dbg(&priv->i2c->dev, "%s: error state=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "invalid delivery_system\n");
ret = -EINVAL;
break;
}
@@ -291,12 +160,13 @@ err:
static int cxd2820r_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
+ switch (c->delivery_system) {
case SYS_DVBT:
ret = cxd2820r_read_status_t(fe, status);
break;
@@ -317,15 +187,16 @@ static int cxd2820r_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *p)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
if (priv->delivery_system == SYS_UNDEFINED)
return 0;
- switch (fe->dtv_property_cache.delivery_system) {
+ switch (c->delivery_system) {
case SYS_DVBT:
ret = cxd2820r_get_frontend_t(fe, p);
break;
@@ -345,101 +216,60 @@ static int cxd2820r_get_frontend(struct dvb_frontend *fe,
static int cxd2820r_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_ber_t(fe, ber);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_ber_t2(fe, ber);
- break;
- case SYS_DVBC_ANNEX_A:
- ret = cxd2820r_read_ber_c(fe, ber);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
+ *ber = (priv->post_bit_error - priv->post_bit_error_prev_dvbv3);
+ priv->post_bit_error_prev_dvbv3 = priv->post_bit_error;
+
+ return 0;
}
static int cxd2820r_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_signal_strength_t(fe, strength);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_signal_strength_t2(fe, strength);
- break;
- case SYS_DVBC_ANNEX_A:
- ret = cxd2820r_read_signal_strength_c(fe, strength);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
+ if (c->strength.stat[0].scale == FE_SCALE_RELATIVE)
+ *strength = c->strength.stat[0].uvalue;
+ else
+ *strength = 0;
+
+ return 0;
}
static int cxd2820r_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_snr_t(fe, snr);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_snr_t2(fe, snr);
- break;
- case SYS_DVBC_ANNEX_A:
- ret = cxd2820r_read_snr_c(fe, snr);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
+
+ return 0;
}
static int cxd2820r_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
- case SYS_DVBT:
- ret = cxd2820r_read_ucblocks_t(fe, ucblocks);
- break;
- case SYS_DVBT2:
- ret = cxd2820r_read_ucblocks_t2(fe, ucblocks);
- break;
- case SYS_DVBC_ANNEX_A:
- ret = cxd2820r_read_ucblocks_c(fe, ucblocks);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
+ *ucblocks = 0;
+
+ return 0;
}
static int cxd2820r_init(struct dvb_frontend *fe)
@@ -450,12 +280,13 @@ static int cxd2820r_init(struct dvb_frontend *fe)
static int cxd2820r_sleep(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
+ switch (c->delivery_system) {
case SYS_DVBT:
ret = cxd2820r_sleep_t(fe);
break;
@@ -476,12 +307,13 @@ static int cxd2820r_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
- switch (fe->dtv_property_cache.delivery_system) {
+ switch (c->delivery_system) {
case SYS_DVBT:
ret = cxd2820r_get_tune_settings_t(fe, s);
break;
@@ -501,12 +333,12 @@ static int cxd2820r_get_tune_settings(struct dvb_frontend *fe,
static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
enum fe_status status = 0;
- dev_dbg(&priv->i2c->dev, "%s: delsys=%d\n", __func__,
- fe->dtv_property_cache.delivery_system);
+ dev_dbg(&client->dev, "delivery_system=%d\n", c->delivery_system);
/* switch between DVB-T and DVB-T2 when tune fails */
if (priv->last_tune_failed) {
@@ -530,7 +362,6 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
if (ret)
goto error;
-
/* frontend lock wait loop count */
switch (priv->delivery_system) {
case SYS_DVBT:
@@ -548,7 +379,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
/* wait frontend lock */
for (; i > 0; i--) {
- dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
+ dev_dbg(&client->dev, "loop=%d\n", i);
msleep(50);
ret = cxd2820r_read_status(fe, &status);
if (ret)
@@ -568,7 +399,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
}
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return DVBFE_ALGO_SEARCH_ERROR;
}
@@ -580,27 +411,23 @@ static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
static void cxd2820r_release(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
- dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "\n");
-#ifdef CONFIG_GPIOLIB
- /* remove GPIOs */
- if (priv->gpio_chip.label)
- gpiochip_remove(&priv->gpio_chip);
+ i2c_unregister_device(client);
-#endif
- kfree(priv);
return;
}
static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
- dev_dbg(&priv->i2c->dev, "%s: %d\n", __func__, enable);
+ dev_dbg_ratelimited(&client->dev, "enable=%d\n", enable);
- /* Bit 0 of reg 0xdb in bank 0x00 controls I2C repeater */
- return cxd2820r_wr_reg_mask(priv, 0xdb, enable ? 1 : 0, 0x1);
+ return regmap_update_bits(priv->regmap[0], 0x00db, 0x01, enable ? 1 : 0);
}
#ifdef CONFIG_GPIOLIB
@@ -608,9 +435,10 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
int val)
{
struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+ struct i2c_client *client = priv->client[0];
u8 gpio[GPIO_COUNT];
- dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
+ dev_dbg(&client->dev, "nr=%u val=%d\n", nr, val);
memcpy(gpio, priv->gpio, sizeof(gpio));
gpio[nr] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | (val << 2);
@@ -621,9 +449,10 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
{
struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+ struct i2c_client *client = priv->client[0];
u8 gpio[GPIO_COUNT];
- dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
+ dev_dbg(&client->dev, "nr=%u val=%d\n", nr, val);
memcpy(gpio, priv->gpio, sizeof(gpio));
gpio[nr] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | (val << 2);
@@ -636,8 +465,9 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
{
struct cxd2820r_priv *priv = gpiochip_get_data(chip);
+ struct i2c_client *client = priv->client[0];
- dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr);
+ dev_dbg(&client->dev, "nr=%u\n", nr);
return (priv->gpio[nr] >> 2) & 0x01;
}
@@ -689,52 +519,163 @@ static const struct dvb_frontend_ops cxd2820r_ops = {
.read_signal_strength = cxd2820r_read_signal_strength,
};
-struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
- struct i2c_adapter *i2c, int *gpio_chip_base
-)
+/*
+ * XXX: That is wrapper to cxd2820r_probe() via driver core in order to provide
+ * proper I2C client for legacy media attach binding.
+ * New users must use I2C client binding directly!
+ */
+struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *config,
+ struct i2c_adapter *adapter,
+ int *gpio_chip_base)
+{
+ struct i2c_client *client;
+ struct i2c_board_info board_info;
+ struct cxd2820r_platform_data pdata;
+
+ pdata.ts_mode = config->ts_mode;
+ pdata.ts_clk_inv = config->ts_clock_inv;
+ pdata.if_agc_polarity = config->if_agc_polarity;
+ pdata.spec_inv = config->spec_inv;
+ pdata.gpio_chip_base = &gpio_chip_base;
+ pdata.attach_in_use = true;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "cxd2820r", I2C_NAME_SIZE);
+ board_info.addr = config->i2c_address;
+ board_info.platform_data = &pdata;
+ client = i2c_new_device(adapter, &board_info);
+ if (!client || !client->dev.driver)
+ return NULL;
+
+ return pdata.get_dvb_frontend(client);
+}
+EXPORT_SYMBOL(cxd2820r_attach);
+
+static struct dvb_frontend *cxd2820r_get_dvb_frontend(struct i2c_client *client)
+{
+ struct cxd2820r_priv *priv = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ return &priv->fe;
+}
+
+static int cxd2820r_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
+ struct cxd2820r_platform_data *pdata = client->dev.platform_data;
struct cxd2820r_priv *priv;
- int ret;
- u8 tmp;
+ int ret, *gpio_chip_base;
+ unsigned int utmp;
+ static const struct regmap_range_cfg regmap_range_cfg0[] = {
+ {
+ .range_min = 0x0000,
+ .range_max = 0x3fff,
+ .selector_reg = 0x00,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0x00,
+ .window_len = 0x100,
+ },
+ };
+ static const struct regmap_range_cfg regmap_range_cfg1[] = {
+ {
+ .range_min = 0x0000,
+ .range_max = 0x01ff,
+ .selector_reg = 0x00,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0x00,
+ .window_len = 0x100,
+ },
+ };
+ static const struct regmap_config regmap_config0 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x3fff,
+ .ranges = regmap_range_cfg0,
+ .num_ranges = ARRAY_SIZE(regmap_range_cfg0),
+ .cache_type = REGCACHE_NONE,
+ };
+ static const struct regmap_config regmap_config1 = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x01ff,
+ .ranges = regmap_range_cfg1,
+ .num_ranges = ARRAY_SIZE(regmap_range_cfg1),
+ .cache_type = REGCACHE_NONE,
+ };
+
+ dev_dbg(&client->dev, "\n");
- priv = kzalloc(sizeof(struct cxd2820r_priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- dev_err(&i2c->dev, "%s: kzalloc() failed\n",
- KBUILD_MODNAME);
- goto error;
+ goto err;
}
- priv->i2c = i2c;
- memcpy(&priv->cfg, cfg, sizeof(struct cxd2820r_config));
- memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(struct dvb_frontend_ops));
- priv->fe.demodulator_priv = priv;
+ priv->client[0] = client;
+ priv->i2c = client->adapter;
+ priv->ts_mode = pdata->ts_mode;
+ priv->ts_clk_inv = pdata->ts_clk_inv;
+ priv->if_agc_polarity = pdata->if_agc_polarity;
+ priv->spec_inv = pdata->spec_inv;
+ gpio_chip_base = *pdata->gpio_chip_base;
+ priv->regmap[0] = regmap_init_i2c(priv->client[0], &regmap_config0);
+ if (IS_ERR(priv->regmap[0])) {
+ ret = PTR_ERR(priv->regmap[0]);
+ goto err_kfree;
+ }
- priv->bank[0] = priv->bank[1] = 0xff;
- ret = cxd2820r_rd_reg(priv, 0x000fd, &tmp);
- dev_dbg(&priv->i2c->dev, "%s: chip id=%02x\n", __func__, tmp);
- if (ret || tmp != 0xe1)
- goto error;
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(priv->regmap[0], 0x00fd, &utmp);
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+
+ dev_dbg(&client->dev, "chip_id=%02x\n", utmp);
+
+ if (utmp != 0xe1) {
+ ret = -ENODEV;
+ goto err_regmap_0_regmap_exit;
+ }
+
+ /*
+ * Chip has two I2C addresses for different register banks. We register
+ * one dummy I2C client in in order to get own I2C client for each
+ * register bank.
+ */
+ priv->client[1] = i2c_new_dummy(client->adapter, client->addr | (1 << 1));
+ if (!priv->client[1]) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "I2C registration failed\n");
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+ }
+
+ priv->regmap[1] = regmap_init_i2c(priv->client[1], &regmap_config1);
+ if (IS_ERR(priv->regmap[1])) {
+ ret = PTR_ERR(priv->regmap[1]);
+ goto err_client_1_i2c_unregister_device;
+ }
if (gpio_chip_base) {
#ifdef CONFIG_GPIOLIB
- /* add GPIOs */
+ /* Add GPIOs */
priv->gpio_chip.label = KBUILD_MODNAME;
- priv->gpio_chip.parent = &priv->i2c->dev;
+ priv->gpio_chip.parent = &client->dev;
priv->gpio_chip.owner = THIS_MODULE;
- priv->gpio_chip.direction_output =
- cxd2820r_gpio_direction_output;
+ priv->gpio_chip.direction_output = cxd2820r_gpio_direction_output;
priv->gpio_chip.set = cxd2820r_gpio_set;
priv->gpio_chip.get = cxd2820r_gpio_get;
- priv->gpio_chip.base = -1; /* dynamic allocation */
+ priv->gpio_chip.base = -1; /* Dynamic allocation */
priv->gpio_chip.ngpio = GPIO_COUNT;
priv->gpio_chip.can_sleep = 1;
ret = gpiochip_add_data(&priv->gpio_chip, priv);
if (ret)
- goto error;
+ goto err_regmap_1_regmap_exit;
- dev_dbg(&priv->i2c->dev, "%s: gpio_chip.base=%d\n", __func__,
- priv->gpio_chip.base);
+ dev_dbg(&client->dev, "gpio_chip.base=%d\n",
+ priv->gpio_chip.base);
*gpio_chip_base = priv->gpio_chip.base;
#else
@@ -748,17 +689,73 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
gpio[2] = 0;
ret = cxd2820r_gpio(&priv->fe, gpio);
if (ret)
- goto error;
+ goto err_regmap_1_regmap_exit;
#endif
}
- return &priv->fe;
-error:
- dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
+ /* Create dvb frontend */
+ memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(priv->fe.ops));
+ if (!pdata->attach_in_use)
+ priv->fe.ops.release = NULL;
+ priv->fe.demodulator_priv = priv;
+ i2c_set_clientdata(client, priv);
+
+ /* Setup callbacks */
+ pdata->get_dvb_frontend = cxd2820r_get_dvb_frontend;
+
+ dev_info(&client->dev, "Sony CXD2820R successfully identified\n");
+
+ return 0;
+err_regmap_1_regmap_exit:
+ regmap_exit(priv->regmap[1]);
+err_client_1_i2c_unregister_device:
+ i2c_unregister_device(priv->client[1]);
+err_regmap_0_regmap_exit:
+ regmap_exit(priv->regmap[0]);
+err_kfree:
kfree(priv);
- return NULL;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(cxd2820r_attach);
+
+static int cxd2820r_remove(struct i2c_client *client)
+{
+ struct cxd2820r_priv *priv = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+#ifdef CONFIG_GPIOLIB
+ if (priv->gpio_chip.label)
+ gpiochip_remove(&priv->gpio_chip);
+#endif
+ regmap_exit(priv->regmap[1]);
+ i2c_unregister_device(priv->client[1]);
+
+ regmap_exit(priv->regmap[0]);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id cxd2820r_id_table[] = {
+ {"cxd2820r", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cxd2820r_id_table);
+
+static struct i2c_driver cxd2820r_driver = {
+ .driver = {
+ .name = "cxd2820r",
+ .suppress_bind_attrs = true,
+ },
+ .probe = cxd2820r_probe,
+ .remove = cxd2820r_remove,
+ .id_table = cxd2820r_id_table,
+};
+
+module_i2c_driver(cxd2820r_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Sony CXD2820R demodulator driver");
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index e31c48e53097..0d096206ac66 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -27,6 +27,8 @@
#include "dvb_math.h"
#include "cxd2820r.h"
#include <linux/gpio.h>
+#include <linux/math64.h>
+#include <linux/regmap.h>
struct reg_val_mask {
u32 reg;
@@ -34,14 +36,23 @@ struct reg_val_mask {
u8 mask;
};
+#define CXD2820R_CLK 41000000
+
struct cxd2820r_priv {
+ struct i2c_client *client[2];
+ struct regmap *regmap[2];
struct i2c_adapter *i2c;
struct dvb_frontend fe;
- struct cxd2820r_config cfg;
+ u8 ts_mode;
+ bool ts_clk_inv;
+ bool if_agc_polarity;
+ bool spec_inv;
+
+ u64 post_bit_error_prev_dvbv3;
+ u64 post_bit_error;
bool ber_running;
- u8 bank[2];
#define GPIO_COUNT 3
u8 gpio[GPIO_COUNT];
#ifdef CONFIG_GPIOLIB
@@ -58,6 +69,9 @@ extern int cxd2820r_debug;
int cxd2820r_gpio(struct dvb_frontend *fe, u8 *gpio);
+int cxd2820r_wr_reg_val_mask_tab(struct cxd2820r_priv *priv,
+ const struct reg_val_mask *tab, int tab_len);
+
int cxd2820r_wr_reg_mask(struct cxd2820r_priv *priv, u32 reg, u8 val,
u8 mask);
@@ -83,14 +97,6 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe);
int cxd2820r_read_status_c(struct dvb_frontend *fe, enum fe_status *status);
-int cxd2820r_read_ber_c(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_c(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_c(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_c(struct dvb_frontend *fe, u32 *ucblocks);
-
int cxd2820r_init_c(struct dvb_frontend *fe);
int cxd2820r_sleep_c(struct dvb_frontend *fe);
@@ -107,14 +113,6 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe);
int cxd2820r_read_status_t(struct dvb_frontend *fe, enum fe_status *status);
-int cxd2820r_read_ber_t(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_t(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_t(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_t(struct dvb_frontend *fe, u32 *ucblocks);
-
int cxd2820r_init_t(struct dvb_frontend *fe);
int cxd2820r_sleep_t(struct dvb_frontend *fe);
@@ -131,14 +129,6 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe);
int cxd2820r_read_status_t2(struct dvb_frontend *fe, enum fe_status *status);
-int cxd2820r_read_ber_t2(struct dvb_frontend *fe, u32 *ber);
-
-int cxd2820r_read_signal_strength_t2(struct dvb_frontend *fe, u16 *strength);
-
-int cxd2820r_read_snr_t2(struct dvb_frontend *fe, u16 *snr);
-
-int cxd2820r_read_ucblocks_t2(struct dvb_frontend *fe, u32 *ucblocks);
-
int cxd2820r_init_t2(struct dvb_frontend *fe);
int cxd2820r_sleep_t2(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index 75ce7d8ded00..c2e7caf9b010 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -24,10 +24,11 @@
int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, i, bw_i;
- u32 if_freq, if_ctl;
- u64 num;
+ int ret, bw_i;
+ unsigned int utmp;
+ u32 if_frequency;
u8 buf[3], bw_param;
u8 bw_params1[][5] = {
{ 0x17, 0xea, 0xaa, 0xaa, 0xaa }, /* 6 MHz */
@@ -45,9 +46,9 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
{ 0x00085, 0x07, 0xff },
{ 0x00088, 0x01, 0xff },
- { 0x00070, priv->cfg.ts_mode, 0xff },
- { 0x00071, !priv->cfg.ts_clock_inv << 4, 0x10 },
- { 0x000cb, priv->cfg.if_agc_polarity << 6, 0x40 },
+ { 0x00070, priv->ts_mode, 0xff },
+ { 0x00071, !priv->ts_clk_inv << 4, 0x10 },
+ { 0x000cb, priv->if_agc_polarity << 6, 0x40 },
{ 0x000a5, 0x00, 0x01 },
{ 0x00082, 0x20, 0x60 },
{ 0x000c2, 0xc3, 0xff },
@@ -55,8 +56,10 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
{ 0x00427, 0x41, 0xff },
};
- dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n", __func__,
- c->frequency, c->bandwidth_hz);
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%u bandwidth_hz=%u inversion=%d\n",
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->inversion);
switch (c->bandwidth_hz) {
case 6000000:
@@ -80,12 +83,9 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
fe->ops.tuner_ops.set_params(fe);
if (priv->delivery_system != SYS_DVBT) {
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
- tab[i].val, tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
}
priv->delivery_system = SYS_DVBT;
@@ -93,48 +93,46 @@ int cxd2820r_set_frontend_t(struct dvb_frontend *fe)
/* program IF frequency */
if (fe->ops.tuner_ops.get_if_frequency) {
- ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
if (ret)
goto error;
- } else
- if_freq = 0;
-
- dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
-
- num = if_freq / 1000; /* Hz => kHz */
- num *= 0x1000000;
- if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
- buf[0] = ((if_ctl >> 16) & 0xff);
- buf[1] = ((if_ctl >> 8) & 0xff);
- buf[2] = ((if_ctl >> 0) & 0xff);
+ dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
- ret = cxd2820r_wr_regs(priv, 0x000b6, buf, 3);
+ utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x1000000, CXD2820R_CLK);
+ buf[0] = (utmp >> 16) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 0) & 0xff;
+ ret = regmap_bulk_write(priv->regmap[0], 0x00b6, buf, 3);
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x0009f, bw_params1[bw_i], 5);
+ ret = regmap_bulk_write(priv->regmap[0], 0x009f, bw_params1[bw_i], 5);
if (ret)
goto error;
- ret = cxd2820r_wr_reg_mask(priv, 0x000d7, bw_param << 6, 0xc0);
+ ret = regmap_update_bits(priv->regmap[0], 0x00d7, 0xc0, bw_param << 6);
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x000d9, bw_params2[bw_i], 2);
+ ret = regmap_bulk_write(priv->regmap[0], 0x00d9, bw_params2[bw_i], 2);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
+ ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+ ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -142,10 +140,14 @@ int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
int ret;
+ unsigned int utmp;
u8 buf[2];
- ret = cxd2820r_rd_regs(priv, 0x0002f, buf, sizeof(buf));
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_bulk_read(priv->regmap[0], 0x002f, buf, sizeof(buf));
if (ret)
goto error;
@@ -236,11 +238,11 @@ int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
break;
}
- ret = cxd2820r_rd_reg(priv, 0x007c6, &buf[0]);
+ ret = regmap_read(priv->regmap[0], 0x07c6, &utmp);
if (ret)
goto error;
- switch ((buf[0] >> 0) & 0x01) {
+ switch ((utmp >> 0) & 0x01) {
case 0:
c->inversion = INVERSION_OFF;
break;
@@ -251,169 +253,158 @@ int cxd2820r_get_frontend_t(struct dvb_frontend *fe,
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-int cxd2820r_read_ber_t(struct dvb_frontend *fe, u32 *ber)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[3], start_ber = 0;
- *ber = 0;
-
- if (priv->ber_running) {
- ret = cxd2820r_rd_regs(priv, 0x00076, buf, sizeof(buf));
- if (ret)
- goto error;
-
- if ((buf[2] >> 7) & 0x01 || (buf[2] >> 4) & 0x01) {
- *ber = (buf[2] & 0x0f) << 16 | buf[1] << 8 | buf[0];
- start_ber = 1;
- }
- } else {
- priv->ber_running = true;
- start_ber = 1;
- }
-
- if (start_ber) {
- /* (re)start BER */
- ret = cxd2820r_wr_reg(priv, 0x00079, 0x01);
- if (ret)
- goto error;
- }
-
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-int cxd2820r_read_signal_strength_t(struct dvb_frontend *fe,
- u16 *strength)
+int cxd2820r_read_status_t(struct dvb_frontend *fe, enum fe_status *status)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- u8 buf[2];
- u16 tmp;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[3];
- ret = cxd2820r_rd_regs(priv, 0x00026, buf, sizeof(buf));
+ /* Lock detection */
+ ret = regmap_bulk_read(priv->regmap[0], 0x0010, &buf[0], 1);
+ if (ret)
+ goto error;
+ ret = regmap_bulk_read(priv->regmap[0], 0x0073, &buf[1], 1);
if (ret)
goto error;
- tmp = (buf[0] & 0x0f) << 8 | buf[1];
- tmp = ~tmp & 0x0fff;
+ utmp1 = (buf[0] >> 0) & 0x07;
+ utmp2 = (buf[1] >> 3) & 0x01;
- /* scale value to 0x0000-0xffff from 0x0000-0x0fff */
- *strength = tmp * 0xffff / 0x0fff;
+ if (utmp1 == 6 && utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ } else if (utmp1 == 6 || utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC;
+ } else {
+ *status = 0;
+ }
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+ *status, 2, buf, utmp1, utmp2);
-int cxd2820r_read_snr_t(struct dvb_frontend *fe, u16 *snr)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
- u16 tmp;
- /* report SNR in dB * 10 */
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ unsigned int strength;
- ret = cxd2820r_rd_regs(priv, 0x00028, buf, sizeof(buf));
- if (ret)
- goto error;
+ ret = regmap_bulk_read(priv->regmap[0], 0x0026, buf, 2);
+ if (ret)
+ goto error;
- tmp = (buf[0] & 0x1f) << 8 | buf[1];
- #define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
- if (tmp)
- *snr = (intlog10(tmp) - CXD2820R_LOG10_8_24) / ((1 << 24)
- / 100);
- else
- *snr = 0;
+ utmp = buf[0] << 8 | buf[1] << 0;
+ utmp = ~utmp & 0x0fff;
+ /* Scale value to 0x0000-0xffff */
+ strength = utmp << 4 | utmp >> 8;
- dev_dbg(&priv->i2c->dev, "%s: dBx10=%d val=%04x\n", __func__, *snr,
- tmp);
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = strength;
+ } else {
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* CNR */
+ if (*status & FE_HAS_VITERBI) {
+ unsigned int cnr;
-int cxd2820r_read_ucblocks_t(struct dvb_frontend *fe, u32 *ucblocks)
-{
- *ucblocks = 0;
- /* no way to read ? */
- return 0;
-}
+ ret = regmap_bulk_read(priv->regmap[0], 0x002c, buf, 2);
+ if (ret)
+ goto error;
-int cxd2820r_read_status_t(struct dvb_frontend *fe, enum fe_status *status)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[4];
- *status = 0;
+ utmp = buf[0] << 8 | buf[1] << 0;
+ if (utmp)
+ cnr = div_u64((u64)(intlog10(utmp)
+ - intlog10(32000 - utmp) + 55532585)
+ * 10000, (1 << 24));
+ else
+ cnr = 0;
+
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = cnr;
+ } else {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- ret = cxd2820r_rd_reg(priv, 0x00010, &buf[0]);
- if (ret)
- goto error;
+ /* BER */
+ if (*status & FE_HAS_SYNC) {
+ unsigned int post_bit_error;
+ bool start_ber;
- if ((buf[0] & 0x07) == 6) {
- ret = cxd2820r_rd_reg(priv, 0x00073, &buf[1]);
- if (ret)
- goto error;
+ if (priv->ber_running) {
+ ret = regmap_bulk_read(priv->regmap[0], 0x0076, buf, 3);
+ if (ret)
+ goto error;
- if (((buf[1] >> 3) & 0x01) == 1) {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ if ((buf[2] >> 7) & 0x01) {
+ post_bit_error = buf[2] << 16 | buf[1] << 8 |
+ buf[0] << 0;
+ post_bit_error &= 0x0fffff;
+ start_ber = true;
+ } else {
+ post_bit_error = 0;
+ start_ber = false;
+ }
} else {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC;
+ post_bit_error = 0;
+ start_ber = true;
}
- } else {
- ret = cxd2820r_rd_reg(priv, 0x00014, &buf[2]);
- if (ret)
- goto error;
- if ((buf[2] & 0x0f) >= 4) {
- ret = cxd2820r_rd_reg(priv, 0x00a14, &buf[3]);
+ if (start_ber) {
+ ret = regmap_write(priv->regmap[0], 0x0079, 0x01);
if (ret)
goto error;
-
- if (((buf[3] >> 4) & 0x01) == 1)
- *status |= FE_HAS_SIGNAL;
+ priv->ber_running = true;
}
- }
- dev_dbg(&priv->i2c->dev, "%s: lock=%*ph\n", __func__, 4, buf);
+ priv->post_bit_error += post_bit_error;
+
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+ } else {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
int cxd2820r_init_t(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
int ret;
- ret = cxd2820r_wr_reg(priv, 0x00085, 0x07);
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_write(priv->regmap[0], 0x0085, 0x07);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
int cxd2820r_sleep_t(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret, i;
+ struct i2c_client *client = priv->client[0];
+ int ret;
struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
@@ -422,20 +413,17 @@ int cxd2820r_sleep_t(struct dvb_frontend *fe)
{ 0x00080, 0x00, 0xff },
};
- dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "\n");
priv->delivery_system = SYS_UNDEFINED;
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
- tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index 704475676234..e641fde75379 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -23,11 +23,12 @@
int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret, i, bw_i;
- u32 if_freq, if_ctl;
- u64 num;
+ struct i2c_client *client = priv->client[0];
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, bw_i;
+ unsigned int utmp;
+ u32 if_frequency;
u8 buf[3], bw_param;
u8 bw_params1[][5] = {
{ 0x1c, 0xb3, 0x33, 0x33, 0x33 }, /* 5 MHz */
@@ -45,10 +46,10 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
{ 0x0207f, 0x2a, 0xff },
{ 0x02082, 0x0a, 0xff },
{ 0x02083, 0x0a, 0xff },
- { 0x020cb, priv->cfg.if_agc_polarity << 6, 0x40 },
- { 0x02070, priv->cfg.ts_mode, 0xff },
- { 0x02071, !priv->cfg.ts_clock_inv << 6, 0x40 },
- { 0x020b5, priv->cfg.spec_inv << 4, 0x10 },
+ { 0x020cb, priv->if_agc_polarity << 6, 0x40 },
+ { 0x02070, priv->ts_mode, 0xff },
+ { 0x02071, !priv->ts_clk_inv << 6, 0x40 },
+ { 0x020b5, priv->spec_inv << 4, 0x10 },
{ 0x02567, 0x07, 0x0f },
{ 0x02569, 0x03, 0x03 },
{ 0x02595, 0x1a, 0xff },
@@ -69,8 +70,10 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
{ 0x027ef, 0x10, 0x18 },
};
- dev_dbg(&priv->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n", __func__,
- c->frequency, c->bandwidth_hz);
+ dev_dbg(&client->dev,
+ "delivery_system=%d modulation=%d frequency=%u bandwidth_hz=%u inversion=%d stream_id=%u\n",
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->inversion, c->stream_id);
switch (c->bandwidth_hz) {
case 5000000:
@@ -98,73 +101,67 @@ int cxd2820r_set_frontend_t2(struct dvb_frontend *fe)
fe->ops.tuner_ops.set_params(fe);
if (priv->delivery_system != SYS_DVBT2) {
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg,
- tab[i].val, tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
}
priv->delivery_system = SYS_DVBT2;
/* program IF frequency */
if (fe->ops.tuner_ops.get_if_frequency) {
- ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_freq);
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
if (ret)
goto error;
- } else
- if_freq = 0;
-
- dev_dbg(&priv->i2c->dev, "%s: if_freq=%d\n", __func__, if_freq);
+ dev_dbg(&client->dev, "if_frequency=%u\n", if_frequency);
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
- num = if_freq / 1000; /* Hz => kHz */
- num *= 0x1000000;
- if_ctl = DIV_ROUND_CLOSEST_ULL(num, 41000);
- buf[0] = ((if_ctl >> 16) & 0xff);
- buf[1] = ((if_ctl >> 8) & 0xff);
- buf[2] = ((if_ctl >> 0) & 0xff);
+ utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x1000000, CXD2820R_CLK);
+ buf[0] = (utmp >> 16) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 0) & 0xff;
+ ret = regmap_bulk_write(priv->regmap[0], 0x20b6, buf, 3);
+ if (ret)
+ goto error;
/* PLP filtering */
if (c->stream_id > 255) {
- dev_dbg(&priv->i2c->dev, "%s: Disable PLP filtering\n", __func__);
- ret = cxd2820r_wr_reg(priv, 0x023ad , 0);
+ dev_dbg(&client->dev, "disable PLP filtering\n");
+ ret = regmap_write(priv->regmap[0], 0x23ad, 0x00);
if (ret)
goto error;
} else {
- dev_dbg(&priv->i2c->dev, "%s: Enable PLP filtering = %d\n", __func__,
- c->stream_id);
- ret = cxd2820r_wr_reg(priv, 0x023af , c->stream_id & 0xFF);
+ dev_dbg(&client->dev, "enable PLP filtering\n");
+ ret = regmap_write(priv->regmap[0], 0x23af, c->stream_id & 0xff);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x023ad , 1);
+ ret = regmap_write(priv->regmap[0], 0x23ad, 0x01);
if (ret)
goto error;
}
- ret = cxd2820r_wr_regs(priv, 0x020b6, buf, 3);
+ ret = regmap_bulk_write(priv->regmap[0], 0x209f, bw_params1[bw_i], 5);
if (ret)
goto error;
- ret = cxd2820r_wr_regs(priv, 0x0209f, bw_params1[bw_i], 5);
+ ret = regmap_update_bits(priv->regmap[0], 0x20d7, 0xc0, bw_param << 6);
if (ret)
goto error;
- ret = cxd2820r_wr_reg_mask(priv, 0x020d7, bw_param << 6, 0xc0);
+ ret = regmap_write(priv->regmap[0], 0x00ff, 0x08);
if (ret)
goto error;
- ret = cxd2820r_wr_reg(priv, 0x000ff, 0x08);
- if (ret)
- goto error;
-
- ret = cxd2820r_wr_reg(priv, 0x000fe, 0x01);
+ ret = regmap_write(priv->regmap[0], 0x00fe, 0x01);
if (ret)
goto error;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -173,10 +170,14 @@ int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
struct dtv_frontend_properties *c)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct i2c_client *client = priv->client[0];
int ret;
+ unsigned int utmp;
u8 buf[2];
- ret = cxd2820r_rd_regs(priv, 0x0205c, buf, 2);
+ dev_dbg(&client->dev, "\n");
+
+ ret = regmap_bulk_read(priv->regmap[0], 0x205c, buf, 2);
if (ret)
goto error;
@@ -225,7 +226,7 @@ int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
break;
}
- ret = cxd2820r_rd_regs(priv, 0x0225b, buf, 2);
+ ret = regmap_bulk_read(priv->regmap[0], 0x225b, buf, 2);
if (ret)
goto error;
@@ -265,11 +266,11 @@ int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
break;
}
- ret = cxd2820r_rd_reg(priv, 0x020b5, &buf[0]);
+ ret = regmap_read(priv->regmap[0], 0x20b5, &utmp);
if (ret)
goto error;
- switch ((buf[0] >> 4) & 0x01) {
+ switch ((utmp >> 4) & 0x01) {
case 0:
c->inversion = INVERSION_OFF;
break;
@@ -280,130 +281,124 @@ int cxd2820r_get_frontend_t2(struct dvb_frontend *fe,
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
int cxd2820r_read_status_t2(struct dvb_frontend *fe, enum fe_status *status)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct i2c_client *client = priv->client[0];
int ret;
- u8 buf[1];
- *status = 0;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[4];
- ret = cxd2820r_rd_reg(priv, 0x02010 , &buf[0]);
+ /* Lock detection */
+ ret = regmap_bulk_read(priv->regmap[0], 0x2010, &buf[0], 1);
if (ret)
goto error;
- if ((buf[0] & 0x07) == 6) {
- if (((buf[0] >> 5) & 0x01) == 1) {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
- } else {
- *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
- FE_HAS_VITERBI | FE_HAS_SYNC;
- }
- }
+ utmp1 = (buf[0] >> 0) & 0x07;
+ utmp2 = (buf[0] >> 5) & 0x01;
- dev_dbg(&priv->i2c->dev, "%s: lock=%02x\n", __func__, buf[0]);
+ if (utmp1 == 6 && utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ } else if (utmp1 == 6 || utmp2 == 1) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC;
+ } else {
+ *status = 0;
+ }
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ dev_dbg(&client->dev, "status=%02x raw=%*ph sync=%u ts=%u\n",
+ *status, 1, buf, utmp1, utmp2);
-int cxd2820r_read_ber_t2(struct dvb_frontend *fe, u32 *ber)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[4];
- unsigned int errbits;
- *ber = 0;
- /* FIXME: correct calculation */
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ unsigned int strength;
- ret = cxd2820r_rd_regs(priv, 0x02039, buf, sizeof(buf));
- if (ret)
- goto error;
+ ret = regmap_bulk_read(priv->regmap[0], 0x2026, buf, 2);
+ if (ret)
+ goto error;
- if ((buf[0] >> 4) & 0x01) {
- errbits = (buf[0] & 0x0f) << 24 | buf[1] << 16 |
- buf[2] << 8 | buf[3];
+ utmp = buf[0] << 8 | buf[1] << 0;
+ utmp = ~utmp & 0x0fff;
+ /* Scale value to 0x0000-0xffff */
+ strength = utmp << 4 | utmp >> 8;
- if (errbits)
- *ber = errbits * 64 / 16588800;
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = strength;
+ } else {
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* CNR */
+ if (*status & FE_HAS_VITERBI) {
+ unsigned int cnr;
-int cxd2820r_read_signal_strength_t2(struct dvb_frontend *fe,
- u16 *strength)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
- u16 tmp;
-
- ret = cxd2820r_rd_regs(priv, 0x02026, buf, sizeof(buf));
- if (ret)
- goto error;
-
- tmp = (buf[0] & 0x0f) << 8 | buf[1];
- tmp = ~tmp & 0x0fff;
+ ret = regmap_bulk_read(priv->regmap[0], 0x2028, buf, 2);
+ if (ret)
+ goto error;
- /* scale value to 0x0000-0xffff from 0x0000-0x0fff */
- *strength = tmp * 0xffff / 0x0fff;
+ utmp = buf[0] << 8 | buf[1] << 0;
+ utmp = utmp & 0x0fff;
+ #define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
+ if (utmp)
+ cnr = div_u64((u64)(intlog10(utmp)
+ - CXD2820R_LOG10_8_24) * 10000,
+ (1 << 24));
+ else
+ cnr = 0;
+
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = cnr;
+ } else {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
- return ret;
-error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
+ /* BER */
+ if (*status & FE_HAS_SYNC) {
+ unsigned int post_bit_error;
-int cxd2820r_read_snr_t2(struct dvb_frontend *fe, u16 *snr)
-{
- struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret;
- u8 buf[2];
- u16 tmp;
- /* report SNR in dB * 10 */
+ ret = regmap_bulk_read(priv->regmap[0], 0x2039, buf, 4);
+ if (ret)
+ goto error;
- ret = cxd2820r_rd_regs(priv, 0x02028, buf, sizeof(buf));
- if (ret)
- goto error;
+ if ((buf[0] >> 4) & 0x01) {
+ post_bit_error = buf[0] << 24 | buf[1] << 16 |
+ buf[2] << 8 | buf[3] << 0;
+ post_bit_error &= 0x0fffffff;
+ } else {
+ post_bit_error = 0;
+ }
- tmp = (buf[0] & 0x0f) << 8 | buf[1];
- #define CXD2820R_LOG10_8_24 15151336 /* log10(8) << 24 */
- if (tmp)
- *snr = (intlog10(tmp) - CXD2820R_LOG10_8_24) / ((1 << 24)
- / 100);
- else
- *snr = 0;
+ priv->post_bit_error += post_bit_error;
- dev_dbg(&priv->i2c->dev, "%s: dBx10=%d val=%04x\n", __func__, *snr,
- tmp);
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = priv->post_bit_error;
+ } else {
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-int cxd2820r_read_ucblocks_t2(struct dvb_frontend *fe, u32 *ucblocks)
-{
- *ucblocks = 0;
- /* no way to read ? */
- return 0;
-}
-
int cxd2820r_sleep_t2(struct dvb_frontend *fe)
{
struct cxd2820r_priv *priv = fe->demodulator_priv;
- int ret, i;
+ struct i2c_client *client = priv->client[0];
+ int ret;
struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
@@ -413,20 +408,17 @@ int cxd2820r_sleep_t2(struct dvb_frontend *fe)
{ 0x00080, 0x00, 0xff },
};
- dev_dbg(&priv->i2c->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "\n");
- for (i = 0; i < ARRAY_SIZE(tab); i++) {
- ret = cxd2820r_wr_reg_mask(priv, tab[i].reg, tab[i].val,
- tab[i].mask);
- if (ret)
- goto error;
- }
+ ret = cxd2820r_wr_reg_val_mask_tab(priv, tab, ARRAY_SIZE(tab));
+ if (ret)
+ goto error;
priv->delivery_system = SYS_UNDEFINED;
return ret;
error:
- dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index ffe88bc6b813..5afb9c508f65 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -206,6 +206,9 @@ static const struct cxd2841er_cnr_data s2_cn_data[] = {
(u32)(((iffreq)/48.0)*16777216.0 + 0.5) : \
(u32)(((iffreq)/41.0)*16777216.0 + 0.5))
+static int cxd2841er_freeze_regs(struct cxd2841er_priv *priv);
+static int cxd2841er_unfreeze_regs(struct cxd2841er_priv *priv);
+
static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
u8 addr, u8 reg, u8 write,
const u8 *data, u32 len)
@@ -1401,6 +1404,41 @@ static int cxd2841er_read_ber_c(struct cxd2841er_priv *priv,
return 0;
}
+static int cxd2841er_read_ber_i(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
+{
+ u8 data[3];
+ u8 pktnum[2];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+
+ cxd2841er_freeze_regs(priv);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x5B, pktnum, sizeof(pktnum));
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x16, data, sizeof(data));
+
+ if (!pktnum[0] && !pktnum[1]) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no valid BER data\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
+ return -EINVAL;
+ }
+
+ *bit_error = ((u32)(data[0] & 0x7F) << 16) |
+ ((u32)data[1] << 8) | data[2];
+ *bit_count = ((((u32)pktnum[0] << 8) | pktnum[1]) * 204 * 8);
+ dev_dbg(&priv->i2c->dev, "%s(): bit_error=%u bit_count=%u\n",
+ __func__, *bit_error, *bit_count);
+
+ cxd2841er_unfreeze_regs(priv);
+ return 0;
+}
+
static int cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv,
u32 *bit_error, u32 *bit_count)
{
@@ -1570,6 +1608,25 @@ static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv,
return 0;
}
+static int cxd2841er_freeze_regs(struct cxd2841er_priv *priv)
+{
+ /*
+ * Freeze registers: ensure multiple separate register reads
+ * are from the same snapshot
+ */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
+ return 0;
+}
+
+static int cxd2841er_unfreeze_regs(struct cxd2841er_priv *priv)
+{
+ /*
+ * un-freeze registers
+ */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x00);
+ return 0;
+}
+
static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
u8 delsys, u32 *snr)
{
@@ -1578,6 +1635,7 @@ static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
int min_index, max_index, index;
static const struct cxd2841er_cnr_data *cn_data;
+ cxd2841er_freeze_regs(priv);
/* Set SLV-T Bank : 0xA1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa1);
/*
@@ -1629,9 +1687,11 @@ static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
} else {
dev_dbg(&priv->i2c->dev,
"%s(): no data available\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
return -EINVAL;
}
done:
+ cxd2841er_unfreeze_regs(priv);
*snr = res;
return 0;
}
@@ -1655,12 +1715,7 @@ static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
return -EINVAL;
}
- /*
- * Freeze registers: ensure multiple separate register reads
- * are from the same snapshot
- */
- cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
-
+ cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
cxd2841er_read_regs(priv, I2C_SLVT, 0x19, data, 1);
qam = (enum sony_dvbc_constellation_t) (data[0] & 0x07);
@@ -1670,6 +1725,7 @@ static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1690,9 +1746,11 @@ static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
*snr = -88 * (int32_t)sony_log(reg) + 86999;
break;
default:
+ cxd2841er_unfreeze_regs(priv);
return -EINVAL;
}
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1707,17 +1765,21 @@ static int cxd2841er_read_snr_t(struct cxd2841er_priv *priv, u32 *snr)
"%s(): invalid state %d\n", __func__, priv->state);
return -EINVAL;
}
+
+ cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
if (reg > 4996)
reg = 4996;
*snr = 10000 * ((intlog10(reg) - intlog10(5350 - reg)) >> 24) + 28500;
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1732,18 +1794,22 @@ static int cxd2841er_read_snr_t2(struct cxd2841er_priv *priv, u32 *snr)
"%s(): invalid state %d\n", __func__, priv->state);
return -EINVAL;
}
+
+ cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
if (reg > 10876)
reg = 10876;
*snr = 10000 * ((intlog10(reg) -
intlog10(12600 - reg)) >> 24) + 32000;
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1760,21 +1826,18 @@ static int cxd2841er_read_snr_i(struct cxd2841er_priv *priv, u32 *snr)
return -EINVAL;
}
- /* Freeze all registers */
- cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
-
-
+ cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
- if (reg > 4996)
- reg = 4996;
- *snr = 100 * intlog10(reg) - 9031;
+ *snr = 10000 * (intlog10(reg) >> 24) - 9031;
+ cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1852,6 +1915,9 @@ static void cxd2841er_read_ber(struct dvb_frontend *fe)
case SYS_DVBC_ANNEX_C:
ret = cxd2841er_read_ber_c(priv, &bit_error, &bit_count);
break;
+ case SYS_ISDBT:
+ ret = cxd2841er_read_ber_i(priv, &bit_error, &bit_count);
+ break;
case SYS_DVBS:
ret = cxd2841er_mon_read_ber_s(priv, &bit_error, &bit_count);
break;
@@ -1965,6 +2031,9 @@ static void cxd2841er_read_snr(struct dvb_frontend *fe)
return;
}
+ dev_dbg(&priv->i2c->dev, "%s(): snr=%d\n",
+ __func__, (int32_t)tmp);
+
if (!ret) {
p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
p->cnr.stat[0].svalue = tmp;
@@ -1977,7 +2046,7 @@ static void cxd2841er_read_ucblocks(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
- u32 ucblocks;
+ u32 ucblocks = 0;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (p->delivery_system) {
@@ -1999,7 +2068,7 @@ static void cxd2841er_read_ucblocks(struct dvb_frontend *fe)
p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
return;
}
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ dev_dbg(&priv->i2c->dev, "%s() ucblocks=%u\n", __func__, ucblocks);
p->block_error.stat[0].scale = FE_SCALE_COUNTER;
p->block_error.stat[0].uvalue = ucblocks;
@@ -2694,6 +2763,14 @@ static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
u8 b10_b6[3];
u32 iffreq;
+ if (bandwidth != 6000000 &&
+ bandwidth != 7000000 &&
+ bandwidth != 8000000) {
+ dev_info(&priv->i2c->dev, "%s(): unsupported bandwidth %d. Forcing 8Mhz!\n",
+ __func__, bandwidth);
+ bandwidth = 8000000;
+ }
+
dev_dbg(&priv->i2c->dev, "%s() bw=%d\n", __func__, bandwidth);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
switch (bandwidth) {
@@ -3076,6 +3153,7 @@ static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
/* Enable demod clock */
cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
/* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x59, 0x00);
cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
/* Enable ADC clock */
cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
index 62ad5f07390b..7f1acfb8f4f5 100644
--- a/drivers/media/dvb-frontends/cxd2841er.h
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -22,7 +22,6 @@
#ifndef CXD2841ER_H
#define CXD2841ER_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
enum cxd2841er_xtal {
diff --git a/drivers/media/dvb-frontends/dib3000mc.h b/drivers/media/dvb-frontends/dib3000mc.h
index b37e69e6a58c..67a6d50865fb 100644
--- a/drivers/media/dvb-frontends/dib3000mc.h
+++ b/drivers/media/dvb-frontends/dib3000mc.h
@@ -13,8 +13,6 @@
#ifndef DIB3000MC_H
#define DIB3000MC_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib3000mc_config {
diff --git a/drivers/media/dvb-frontends/dib7000m.h b/drivers/media/dvb-frontends/dib7000m.h
index 6468c278cc4d..8f84dfa9bb58 100644
--- a/drivers/media/dvb-frontends/dib7000m.h
+++ b/drivers/media/dvb-frontends/dib7000m.h
@@ -1,8 +1,6 @@
#ifndef DIB7000M_H
#define DIB7000M_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib7000m_config {
diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
index baa278928cf3..205fbbff632b 100644
--- a/drivers/media/dvb-frontends/dib7000p.h
+++ b/drivers/media/dvb-frontends/dib7000p.h
@@ -1,8 +1,6 @@
#ifndef DIB7000P_H
#define DIB7000P_H
-#include <linux/kconfig.h>
-
#include "dibx000_common.h"
struct dib7000p_config {
diff --git a/drivers/media/dvb-frontends/drxd.h b/drivers/media/dvb-frontends/drxd.h
index a47c22d6667e..f0507cdbb503 100644
--- a/drivers/media/dvb-frontends/drxd.h
+++ b/drivers/media/dvb-frontends/drxd.h
@@ -24,7 +24,6 @@
#ifndef _DRXD_H_
#define _DRXD_H_
-#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index 8f0b9eec528f..a629897eb905 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -1,7 +1,6 @@
#ifndef _DRXK_H_
#define _DRXK_H_
-#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index b975da099929..c595adc61c6f 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6448,7 +6448,7 @@ static int get_strength(struct drxk_state *state, u64 *strength)
return status;
/* SCU c.o.c. */
- read16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, &scu_coc);
+ status = read16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, &scu_coc);
if (status < 0)
return status;
diff --git a/drivers/media/dvb-frontends/ds3000.h b/drivers/media/dvb-frontends/ds3000.h
index 153169da9017..82e8c2531f26 100644
--- a/drivers/media/dvb-frontends/ds3000.h
+++ b/drivers/media/dvb-frontends/ds3000.h
@@ -22,7 +22,6 @@
#ifndef DS3000_H
#define DS3000_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ds3000_config {
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 53089e142715..735a96662022 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -739,7 +739,7 @@ static int dvb_pll_init(struct dvb_frontend *fe)
return -EINVAL;
}
-static struct dvb_tuner_ops dvb_pll_tuner_ops = {
+static const struct dvb_tuner_ops dvb_pll_tuner_ops = {
.release = dvb_pll_release,
.sleep = dvb_pll_sleep,
.init = dvb_pll_init,
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.h b/drivers/media/dvb-frontends/dvb_dummy_fe.h
index 15e4ceab869a..50f1af512b62 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.h
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.h
@@ -22,7 +22,6 @@
#ifndef DVB_DUMMY_FE_H
#define DVB_DUMMY_FE_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/ec100.h b/drivers/media/dvb-frontends/ec100.h
index 9544bab5cd1d..e894bdcf35a3 100644
--- a/drivers/media/dvb-frontends/ec100.h
+++ b/drivers/media/dvb-frontends/ec100.h
@@ -22,7 +22,6 @@
#ifndef EC100_H
#define EC100_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ec100_config {
diff --git a/drivers/media/usb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index db6eb79cde07..93f59bfea092 100644
--- a/drivers/media/usb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
-/* DVB USB compliant Linux driver for the
- * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
+/*
+ * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
*
* Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
* Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,17 +8,31 @@
*
* This module is based off the vp7045 and vp702x modules
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/dvb/README.dvb-usb for more information
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
*/
-#include "gp8psk.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "gp8psk-fe.h"
+#include "dvb_frontend.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
+
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
struct gp8psk_fe_state {
struct dvb_frontend fe;
- struct dvb_usb_device *d;
+ void *priv;
+ const struct gp8psk_fe_ops *ops;
+ bool is_rev1;
u8 lock;
u16 snr;
unsigned long next_status_check;
@@ -29,22 +43,24 @@ static int gp8psk_tuned_to_DCII(struct dvb_frontend *fe)
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 status;
- gp8psk_usb_in_op(st->d, GET_8PSK_CONFIG, 0, 0, &status, 1);
+
+ st->ops->in(st->priv, GET_8PSK_CONFIG, 0, 0, &status, 1);
return status & bmDCtuned;
}
static int gp8psk_set_tuner_mode(struct dvb_frontend *fe, int mode)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, SET_8PSK_CONFIG, mode, 0, NULL, 0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, SET_8PSK_CONFIG, mode, 0, NULL, 0);
}
static int gp8psk_fe_update_status(struct gp8psk_fe_state *st)
{
u8 buf[6];
if (time_after(jiffies,st->next_status_check)) {
- gp8psk_usb_in_op(st->d, GET_SIGNAL_LOCK, 0,0,&st->lock,1);
- gp8psk_usb_in_op(st->d, GET_SIGNAL_STRENGTH, 0,0,buf,6);
+ st->ops->in(st->priv, GET_SIGNAL_LOCK, 0, 0, &st->lock, 1);
+ st->ops->in(st->priv, GET_SIGNAL_STRENGTH, 0, 0, buf, 6);
st->snr = (buf[1]) << 8 | buf[0];
st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000;
}
@@ -116,13 +132,12 @@ static int gp8psk_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_front
static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 cmd[10];
u32 freq = c->frequency * 1000;
- int gp_product_id = le16_to_cpu(state->d->udev->descriptor.idProduct);
- deb_fe("%s()\n", __func__);
+ dprintk("%s()\n", __func__);
cmd[4] = freq & 0xff;
cmd[5] = (freq >> 8) & 0xff;
@@ -136,21 +151,21 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
if (c->modulation != QPSK) {
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
c->fec_inner = FEC_AUTO;
break;
case SYS_DVBS2: /* kept for backwards compatibility */
- deb_fe("%s: DVB-S2 delivery system selected\n", __func__);
+ dprintk("%s: DVB-S2 delivery system selected\n", __func__);
break;
case SYS_TURBO:
- deb_fe("%s: Turbo-FEC delivery system selected\n", __func__);
+ dprintk("%s: Turbo-FEC delivery system selected\n", __func__);
break;
default:
- deb_fe("%s: unsupported delivery system selected (%d)\n",
+ dprintk("%s: unsupported delivery system selected (%d)\n",
__func__, c->delivery_system);
return -EOPNOTSUPP;
}
@@ -161,9 +176,9 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[3] = (c->symbol_rate >> 24) & 0xff;
switch (c->modulation) {
case QPSK:
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
if (gp8psk_tuned_to_DCII(fe))
- gp8psk_bcm4500_reload(state->d);
+ st->ops->reload(st->priv);
switch (c->fec_inner) {
case FEC_1_2:
cmd[9] = 0; break;
@@ -207,18 +222,18 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend *fe)
cmd[9] = 0;
break;
default: /* Unknown modulation */
- deb_fe("%s: unsupported modulation selected (%d)\n",
+ dprintk("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
}
- if (gp_product_id == USB_PID_GENPIX_8PSK_REV_1_WARM)
+ if (st->is_rev1)
gp8psk_set_tuner_mode(fe, 0);
- gp8psk_usb_out_op(state->d, TUNE_8PSK, 0, 0, cmd, 10);
+ st->ops->out(st->priv, TUNE_8PSK, 0, 0, cmd, 10);
- state->lock = 0;
- state->next_status_check = jiffies;
- state->status_check_interval = 200;
+ st->lock = 0;
+ st->next_status_check = jiffies;
+ st->status_check_interval = 200;
return 0;
}
@@ -228,9 +243,9 @@ static int gp8psk_fe_send_diseqc_msg (struct dvb_frontend* fe,
{
struct gp8psk_fe_state *st = fe->demodulator_priv;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, m->msg[0], 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, m->msg[0], 0,
m->msg, m->msg_len)) {
return -EINVAL;
}
@@ -243,12 +258,12 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd;
- deb_fe("%s\n",__func__);
+ dprintk("%s\n", __func__);
/* These commands are certainly wrong */
cmd = (burst == SEC_MINI_A) ? 0x00 : 0x01;
- if (gp8psk_usb_out_op(st->d,SEND_DISEQC_COMMAND, cmd, 0,
+ if (st->ops->out(st->priv, SEND_DISEQC_COMMAND, cmd, 0,
&cmd, 0)) {
return -EINVAL;
}
@@ -258,10 +273,10 @@ static int gp8psk_fe_send_diseqc_burst(struct dvb_frontend *fe,
static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_22KHZ_TONE,
- (tone == SEC_TONE_ON), 0, NULL, 0)) {
+ if (st->ops->out(st->priv, SET_22KHZ_TONE,
+ (tone == SEC_TONE_ON), 0, NULL, 0)) {
return -EINVAL;
}
return 0;
@@ -270,9 +285,9 @@ static int gp8psk_fe_set_tone(struct dvb_frontend *fe,
static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE,
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE,
voltage == SEC_VOLTAGE_18, 0, NULL, 0)) {
return -EINVAL;
}
@@ -281,52 +296,60 @@ static int gp8psk_fe_set_voltage(struct dvb_frontend *fe,
static int gp8psk_fe_enable_high_lnb_voltage(struct dvb_frontend* fe, long onoff)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
- return gp8psk_usb_out_op(state->d, USE_EXTRA_VOLT, onoff, 0,NULL,0);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ return st->ops->out(st->priv, USE_EXTRA_VOLT, onoff, 0, NULL, 0);
}
static int gp8psk_fe_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long sw_cmd)
{
- struct gp8psk_fe_state* state = fe->demodulator_priv;
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
u8 cmd = sw_cmd & 0x7f;
- if (gp8psk_usb_out_op(state->d,SET_DN_SWITCH, cmd, 0,
- NULL, 0)) {
+ if (st->ops->out(st->priv, SET_DN_SWITCH, cmd, 0, NULL, 0))
return -EINVAL;
- }
- if (gp8psk_usb_out_op(state->d,SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
- 0, NULL, 0)) {
+
+ if (st->ops->out(st->priv, SET_LNB_VOLTAGE, !!(sw_cmd & 0x80),
+ 0, NULL, 0))
return -EINVAL;
- }
return 0;
}
static void gp8psk_fe_release(struct dvb_frontend* fe)
{
- struct gp8psk_fe_state *state = fe->demodulator_priv;
- kfree(state);
+ struct gp8psk_fe_state *st = fe->demodulator_priv;
+
+ kfree(st);
}
static struct dvb_frontend_ops gp8psk_fe_ops;
-struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d)
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1)
{
- struct gp8psk_fe_state *s = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
- if (s == NULL)
- goto error;
-
- s->d = d;
- memcpy(&s->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
- s->fe.demodulator_priv = s;
-
- goto success;
-error:
- return NULL;
-success:
- return &s->fe;
-}
+ struct gp8psk_fe_state *st;
+ if (!ops || !ops->in || !ops->out || !ops->reload) {
+ pr_err("Error! gp8psk-fe ops not defined.\n");
+ return NULL;
+ }
+
+ st = kzalloc(sizeof(struct gp8psk_fe_state), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ memcpy(&st->fe.ops, &gp8psk_fe_ops, sizeof(struct dvb_frontend_ops));
+ st->fe.demodulator_priv = st;
+ st->ops = ops;
+ st->priv = priv;
+ st->is_rev1 = is_rev1;
+
+ pr_info("Frontend %sattached\n", is_rev1 ? "revision 1 " : "");
+
+ return &st->fe;
+}
+EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
static struct dvb_frontend_ops gp8psk_fe_ops = {
.delsys = { SYS_DVBS },
@@ -370,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
.dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
.enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
};
+
+MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
+MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
+MODULE_VERSION("1.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.h b/drivers/media/dvb-frontends/gp8psk-fe.h
new file mode 100644
index 000000000000..6c7944b1ecd6
--- /dev/null
+++ b/drivers/media/dvb-frontends/gp8psk-fe.h
@@ -0,0 +1,82 @@
+/*
+ * gp8psk_fe driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef GP8PSK_FE_H
+#define GP8PSK_FE_H
+
+#include <linux/types.h>
+
+/* gp8psk commands */
+
+#define GET_8PSK_CONFIG 0x80 /* in */
+#define SET_8PSK_CONFIG 0x81
+#define I2C_WRITE 0x83
+#define I2C_READ 0x84
+#define ARM_TRANSFER 0x85
+#define TUNE_8PSK 0x86
+#define GET_SIGNAL_STRENGTH 0x87 /* in */
+#define LOAD_BCM4500 0x88
+#define BOOT_8PSK 0x89 /* in */
+#define START_INTERSIL 0x8A /* in */
+#define SET_LNB_VOLTAGE 0x8B
+#define SET_22KHZ_TONE 0x8C
+#define SEND_DISEQC_COMMAND 0x8D
+#define SET_DVB_MODE 0x8E
+#define SET_DN_SWITCH 0x8F
+#define GET_SIGNAL_LOCK 0x90 /* in */
+#define GET_FW_VERS 0x92
+#define GET_SERIAL_NUMBER 0x93 /* in */
+#define USE_EXTRA_VOLT 0x94
+#define GET_FPGA_VERS 0x95
+#define CW3K_INIT 0x9d
+
+/* PSK_configuration bits */
+#define bm8pskStarted 0x01
+#define bm8pskFW_Loaded 0x02
+#define bmIntersilOn 0x04
+#define bmDVBmode 0x08
+#define bm22kHz 0x10
+#define bmSEL18V 0x20
+#define bmDCtuned 0x40
+#define bmArmed 0x80
+
+/* Satellite modulation modes */
+#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
+#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
+#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
+#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
+
+#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
+#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
+#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
+#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
+#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
+#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
+
+/* firmware revision id's */
+#define GP8PSK_FW_REV1 0x020604
+#define GP8PSK_FW_REV2 0x020704
+#define GP8PSK_FW_VERS(_fw_vers) \
+ ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
+
+struct gp8psk_fe_ops {
+ int (*in)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*out)(void *priv, u8 req, u16 value, u16 index, u8 *b, int blen);
+ int (*reload)(void *priv);
+};
+
+struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
+ void *priv, bool is_rev1);
+
+#endif
diff --git a/drivers/media/dvb-frontends/hd29l2.h b/drivers/media/dvb-frontends/hd29l2.h
index 48e9ab74c883..a14d6f36dbf6 100644
--- a/drivers/media/dvb-frontends/hd29l2.h
+++ b/drivers/media/dvb-frontends/hd29l2.h
@@ -23,7 +23,6 @@
#ifndef HD29L2_H
#define HD29L2_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct hd29l2_config {
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index 97a8982740a6..dc43c5f6d0ea 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -842,7 +842,7 @@ static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops helene_tuner_ops = {
+static const struct dvb_tuner_ops helene_tuner_ops = {
.info = {
.name = "Sony HELENE Ter tuner",
.frequency_min = 1000000,
@@ -856,7 +856,7 @@ static struct dvb_tuner_ops helene_tuner_ops = {
.get_frequency = helene_get_frequency,
};
-static struct dvb_tuner_ops helene_tuner_ops_s = {
+static const struct dvb_tuner_ops helene_tuner_ops_s = {
.info = {
.name = "Sony HELENE Sat tuner",
.frequency_min = 500000,
@@ -987,8 +987,10 @@ struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (helene_x_pon(priv) != 0)
+ if (helene_x_pon(priv) != 0) {
+ kfree(priv);
return NULL;
+ }
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
@@ -1021,8 +1023,10 @@ struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
- if (helene_x_pon(priv) != 0)
+ if (helene_x_pon(priv) != 0) {
+ kfree(priv);
return NULL;
+ }
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
index e1b9224cfc55..333615491d9e 100644
--- a/drivers/media/dvb-frontends/helene.h
+++ b/drivers/media/dvb-frontends/helene.h
@@ -21,7 +21,6 @@
#ifndef __DVB_HELENE_H__
#define __DVB_HELENE_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index a98bca5270d9..0c089b5986a1 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -326,7 +326,7 @@ static int horus3a_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops horus3a_tuner_ops = {
+static const struct dvb_tuner_ops horus3a_tuner_ops = {
.info = {
.name = "Sony Horus3a",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h
index c1e2d1834b78..672a556df71a 100644
--- a/drivers/media/dvb-frontends/horus3a.h
+++ b/drivers/media/dvb-frontends/horus3a.h
@@ -22,7 +22,6 @@
#ifndef __DVB_HORUS3A_H__
#define __DVB_HORUS3A_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 0e3387e00952..2826bbb36b73 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -258,7 +258,7 @@ static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops ix2505v_tuner_ops = {
+static const struct dvb_tuner_ops ix2505v_tuner_ops = {
.info = {
.name = "Sharp IX2505V (B0017)",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ix2505v.h b/drivers/media/dvb-frontends/ix2505v.h
index af107a2dd357..5eab39744b23 100644
--- a/drivers/media/dvb-frontends/ix2505v.h
+++ b/drivers/media/dvb-frontends/ix2505v.h
@@ -20,7 +20,6 @@
#ifndef DVB_IX2505V_H
#define DVB_IX2505V_H
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/lg2160.h b/drivers/media/dvb-frontends/lg2160.h
index d20bd909de39..8c74ddc6b88a 100644
--- a/drivers/media/dvb-frontends/lg2160.h
+++ b/drivers/media/dvb-frontends/lg2160.h
@@ -22,7 +22,6 @@
#ifndef _LG2160_H_
#define _LG2160_H_
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/lgdt3305.h b/drivers/media/dvb-frontends/lgdt3305.h
index f91a1b49ce2f..e7dceb60e572 100644
--- a/drivers/media/dvb-frontends/lgdt3305.h
+++ b/drivers/media/dvb-frontends/lgdt3305.h
@@ -22,7 +22,6 @@
#ifndef _LGDT3305_H_
#define _LGDT3305_H_
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 179c26e5eb4e..0ca4e810e9d8 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -731,7 +731,7 @@ static int lgdt3306a_set_if(struct lgdt3306a_state *state,
switch (if_freq_khz) {
default:
- pr_warn("IF=%d KHz is not supportted, 3250 assumed\n",
+ pr_warn("IF=%d KHz is not supported, 3250 assumed\n",
if_freq_khz);
/* fallthrough */
case 3250: /* 3.25Mhz */
@@ -1737,24 +1737,16 @@ static int lgdt3306a_get_tune_settings(struct dvb_frontend *fe,
static int lgdt3306a_search(struct dvb_frontend *fe)
{
enum fe_status status = 0;
- int i, ret;
+ int ret;
/* set frontend */
ret = lgdt3306a_set_parameters(fe);
if (ret)
goto error;
- /* wait frontend lock */
- for (i = 20; i > 0; i--) {
- dbg_info(": loop=%d\n", i);
- msleep(50);
- ret = lgdt3306a_read_status(fe, &status);
- if (ret)
- goto error;
-
- if (status & FE_HAS_LOCK)
- break;
- }
+ ret = lgdt3306a_read_status(fe, &status);
+ if (ret)
+ goto error;
/* check if we have a valid signal */
if (status & FE_HAS_LOCK)
diff --git a/drivers/media/dvb-frontends/lgs8gl5.h b/drivers/media/dvb-frontends/lgs8gl5.h
index a5b3faf121f0..f36a7fd0b102 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.h
+++ b/drivers/media/dvb-frontends/lgs8gl5.h
@@ -23,7 +23,6 @@
#ifndef LGS8GL5_H
#define LGS8GL5_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct lgs8gl5_config {
diff --git a/drivers/media/dvb-frontends/lgs8gxx.h b/drivers/media/dvb-frontends/lgs8gxx.h
index 368c9928ef7f..7519c0210399 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.h
+++ b/drivers/media/dvb-frontends/lgs8gxx.h
@@ -26,7 +26,6 @@
#ifndef __LGS8GXX_H__
#define __LGS8GXX_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/i2c.h>
diff --git a/drivers/media/dvb-frontends/lnbh24.h b/drivers/media/dvb-frontends/lnbh24.h
index a088b8ec1e53..24431dfdce1f 100644
--- a/drivers/media/dvb-frontends/lnbh24.h
+++ b/drivers/media/dvb-frontends/lnbh24.h
@@ -23,8 +23,6 @@
#ifndef _LNBH24_H
#define _LNBH24_H
-#include <linux/kconfig.h>
-
/* system register bits */
#define LNBH24_OLF 0x01
#define LNBH24_OTF 0x02
diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h
index 1f329ef05acc..f13fd0308b3e 100644
--- a/drivers/media/dvb-frontends/lnbh25.h
+++ b/drivers/media/dvb-frontends/lnbh25.h
@@ -22,7 +22,6 @@
#define LNBH25_H
#include <linux/i2c.h>
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
diff --git a/drivers/media/dvb-frontends/lnbp21.h b/drivers/media/dvb-frontends/lnbp21.h
index cd9101f6e579..4bb6439068ec 100644
--- a/drivers/media/dvb-frontends/lnbp21.h
+++ b/drivers/media/dvb-frontends/lnbp21.h
@@ -27,8 +27,6 @@
#ifndef _LNBP21_H
#define _LNBP21_H
-#include <linux/kconfig.h>
-
/* system register bits */
/* [RO] 0=OK; 1=over current limit flag */
#define LNBP21_OLF 0x01
diff --git a/drivers/media/dvb-frontends/lnbp22.h b/drivers/media/dvb-frontends/lnbp22.h
index 5d01d92814c2..0cb72126c498 100644
--- a/drivers/media/dvb-frontends/lnbp22.h
+++ b/drivers/media/dvb-frontends/lnbp22.h
@@ -28,8 +28,6 @@
#ifndef _LNBP22_H
#define _LNBP22_H
-#include <linux/kconfig.h>
-
/* Enable */
#define LNBP22_EN 0x10
/* Voltage selection */
diff --git a/drivers/media/dvb-frontends/m88rs2000.h b/drivers/media/dvb-frontends/m88rs2000.h
index de7430178e9e..1a313b0f5875 100644
--- a/drivers/media/dvb-frontends/m88rs2000.h
+++ b/drivers/media/dvb-frontends/m88rs2000.h
@@ -20,7 +20,6 @@
#ifndef M88RS2000_H
#define M88RS2000_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 41325328a22e..fe79358b035e 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = {
};
static struct regdata mb86a20s_init2[] = {
- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
+ { 0x50, 0xd1 }, { 0x51, 0x22 },
+ { 0x39, 0x01 },
+ { 0x71, 0x00 },
{ 0x3b, 0x21 },
- { 0x3c, 0x38 },
+ { 0x3c, 0x3a },
{ 0x01, 0x0d },
- { 0x04, 0x08 }, { 0x05, 0x03 },
+ { 0x04, 0x08 }, { 0x05, 0x05 },
{ 0x04, 0x0e }, { 0x05, 0x00 },
- { 0x04, 0x0f }, { 0x05, 0x37 },
- { 0x04, 0x0b }, { 0x05, 0x78 },
+ { 0x04, 0x0f }, { 0x05, 0x14 },
+ { 0x04, 0x0b }, { 0x05, 0x8c },
{ 0x04, 0x00 }, { 0x05, 0x00 },
- { 0x04, 0x01 }, { 0x05, 0x1e },
- { 0x04, 0x02 }, { 0x05, 0x07 },
- { 0x04, 0x03 }, { 0x05, 0xd0 },
+ { 0x04, 0x01 }, { 0x05, 0x07 },
+ { 0x04, 0x02 }, { 0x05, 0x0f },
+ { 0x04, 0x03 }, { 0x05, 0xa0 },
{ 0x04, 0x09 }, { 0x05, 0x00 },
{ 0x04, 0x0a }, { 0x05, 0xff },
- { 0x04, 0x27 }, { 0x05, 0x00 },
+ { 0x04, 0x27 }, { 0x05, 0x64 },
{ 0x04, 0x28 }, { 0x05, 0x00 },
- { 0x04, 0x1e }, { 0x05, 0x00 },
- { 0x04, 0x29 }, { 0x05, 0x64 },
- { 0x04, 0x32 }, { 0x05, 0x02 },
+ { 0x04, 0x1e }, { 0x05, 0xff },
+ { 0x04, 0x29 }, { 0x05, 0x0a },
+ { 0x04, 0x32 }, { 0x05, 0x0a },
{ 0x04, 0x14 }, { 0x05, 0x02 },
{ 0x04, 0x04 }, { 0x05, 0x00 },
{ 0x04, 0x05 }, { 0x05, 0x22 },
@@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = {
{ 0x04, 0x07 }, { 0x05, 0xd8 },
{ 0x04, 0x12 }, { 0x05, 0x00 },
{ 0x04, 0x13 }, { 0x05, 0xff },
- { 0x04, 0x15 }, { 0x05, 0x4e },
- { 0x04, 0x16 }, { 0x05, 0x20 },
/*
* On this demod, when the bit count reaches the count below,
@@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = {
{ 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
{ 0x45, 0x04 }, /* CN symbol 4 */
{ 0x48, 0x04 }, /* CN manual mode */
-
+ { 0x50, 0xd5 }, { 0x51, 0x01 },
{ 0x50, 0xd6 }, { 0x51, 0x1f },
{ 0x50, 0xd2 }, { 0x51, 0x03 },
- { 0x50, 0xd7 }, { 0x51, 0xbf },
- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
-
- { 0x04, 0x40 }, { 0x05, 0x00 },
- { 0x28, 0x00 }, { 0x2b, 0x08 },
- { 0x28, 0x05 }, { 0x2b, 0x00 },
+ { 0x50, 0xd7 }, { 0x51, 0x3f },
{ 0x1c, 0x01 },
- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
+ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
+ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
+ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
+ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
+ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
+ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
+ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
+ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
+ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
+ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
+ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
+ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
+ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
+ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
+ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
+ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
+ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
+ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
+ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
+ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
+ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
+ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
{ 0x50, 0x1e }, { 0x51, 0x5d },
{ 0x50, 0x22 }, { 0x51, 0x00 },
{ 0x50, 0x23 }, { 0x51, 0xc8 },
@@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = {
{ 0x50, 0x26 }, { 0x51, 0x00 },
{ 0x50, 0x27 }, { 0x51, 0xc3 },
{ 0x50, 0x39 }, { 0x51, 0x02 },
- { 0xec, 0x0f },
- { 0xeb, 0x1f },
- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
+ { 0x50, 0xd5 }, { 0x51, 0x01 },
{ 0xd0, 0x00 },
};
@@ -318,7 +310,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
if (val >= 7)
*status |= FE_HAS_SYNC;
- if (val >= 8) /* Maybe 9? */
+ /*
+ * Actually, on state S8, it starts receiving TS, but the TS
+ * output is only on normal state after the transition to S9.
+ */
+ if (val >= 9)
*status |= FE_HAS_LOCK;
dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
@@ -2058,6 +2054,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
kfree(state);
}
+static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
static struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
@@ -2130,6 +2131,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.read_status = mb86a20s_read_status_and_stats,
.read_signal_strength = mb86a20s_read_signal_strength_from_cache,
.tune = mb86a20s_tune,
+ .get_frontend_algo = mb86a20s_get_frontend_algo,
};
MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
diff --git a/drivers/media/dvb-frontends/mb86a20s.h b/drivers/media/dvb-frontends/mb86a20s.h
index a113282d6956..dfb02db2126c 100644
--- a/drivers/media/dvb-frontends/mb86a20s.h
+++ b/drivers/media/dvb-frontends/mb86a20s.h
@@ -16,7 +16,6 @@
#ifndef MB86A20S_H
#define MB86A20S_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
/**
diff --git a/drivers/media/dvb-frontends/s5h1409.h b/drivers/media/dvb-frontends/s5h1409.h
index f58b9ca5557a..b38557c451b9 100644
--- a/drivers/media/dvb-frontends/s5h1409.h
+++ b/drivers/media/dvb-frontends/s5h1409.h
@@ -22,7 +22,6 @@
#ifndef __S5H1409_H__
#define __S5H1409_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct s5h1409_config {
diff --git a/drivers/media/dvb-frontends/s5h1411.h b/drivers/media/dvb-frontends/s5h1411.h
index f3a87f7ec360..791bab0e16e9 100644
--- a/drivers/media/dvb-frontends/s5h1411.h
+++ b/drivers/media/dvb-frontends/s5h1411.h
@@ -22,7 +22,6 @@
#ifndef __S5H1411_H__
#define __S5H1411_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
diff --git a/drivers/media/dvb-frontends/s5h1432.h b/drivers/media/dvb-frontends/s5h1432.h
index f490c5ee5801..b81c9bd4e422 100644
--- a/drivers/media/dvb-frontends/s5h1432.h
+++ b/drivers/media/dvb-frontends/s5h1432.h
@@ -22,7 +22,6 @@
#ifndef __S5H1432_H__
#define __S5H1432_H__
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
diff --git a/drivers/media/dvb-frontends/s921.h b/drivers/media/dvb-frontends/s921.h
index f5b722d8081b..a47ed894d4ae 100644
--- a/drivers/media/dvb-frontends/s921.h
+++ b/drivers/media/dvb-frontends/s921.h
@@ -17,7 +17,6 @@
#ifndef S921_H
#define S921_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct s921_config {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 8bf716a8ea58..78669ea68c61 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/firmware.h>
+#include <linux/regmap.h>
#include "dvb_frontend.h"
#include "dvb_math.h"
@@ -40,7 +41,9 @@
*/
struct si2165_state {
- struct i2c_adapter *i2c;
+ struct i2c_client *client;
+
+ struct regmap *regmap;
struct dvb_frontend fe;
@@ -108,61 +111,27 @@ static int si2165_write(struct si2165_state *state, const u16 reg,
const u8 *src, const int count)
{
int ret;
- struct i2c_msg msg;
- u8 buf[2 + 4]; /* write a maximum of 4 bytes of data */
-
- if (count + 2 > sizeof(buf)) {
- dev_warn(&state->i2c->dev,
- "%s: i2c wr reg=%04x: count=%d is too big!\n",
- KBUILD_MODNAME, reg, count);
- return -EINVAL;
- }
- buf[0] = reg >> 8;
- buf[1] = reg & 0xff;
- memcpy(buf + 2, src, count);
-
- msg.addr = state->config.i2c_addr;
- msg.flags = 0;
- msg.buf = buf;
- msg.len = count + 2;
if (debug & DEBUG_I2C_WRITE)
deb_i2c_write("reg: 0x%04x, data: %*ph\n", reg, count, src);
- ret = i2c_transfer(state->i2c, &msg, 1);
+ ret = regmap_bulk_write(state->regmap, reg, src, count);
- if (ret != 1) {
- dev_err(&state->i2c->dev, "%s: ret == %d\n", __func__, ret);
- if (ret < 0)
- return ret;
- else
- return -EREMOTEIO;
- }
+ if (ret)
+ dev_err(&state->client->dev, "%s: ret == %d\n", __func__, ret);
- return 0;
+ return ret;
}
static int si2165_read(struct si2165_state *state,
const u16 reg, u8 *val, const int count)
{
- int ret;
- u8 reg_buf[] = { reg >> 8, reg & 0xff };
- struct i2c_msg msg[] = {
- { .addr = state->config.i2c_addr,
- .flags = 0, .buf = reg_buf, .len = 2 },
- { .addr = state->config.i2c_addr,
- .flags = I2C_M_RD, .buf = val, .len = count },
- };
-
- ret = i2c_transfer(state->i2c, msg, 2);
+ int ret = regmap_bulk_read(state->regmap, reg, val, count);
- if (ret != 2) {
- dev_err(&state->i2c->dev, "%s: error (addr %02x reg %04x error (ret == %i)\n",
+ if (ret) {
+ dev_err(&state->client->dev, "%s: error (addr %02x reg %04x error (ret == %i)\n",
__func__, state->config.i2c_addr, reg, ret);
- if (ret < 0)
- return ret;
- else
- return -EREMOTEIO;
+ return ret;
}
if (debug & DEBUG_I2C_READ)
@@ -174,9 +143,9 @@ static int si2165_read(struct si2165_state *state,
static int si2165_readreg8(struct si2165_state *state,
const u16 reg, u8 *val)
{
- int ret;
-
- ret = si2165_read(state, reg, val, 1);
+ unsigned int val_tmp;
+ int ret = regmap_read(state->regmap, reg, &val_tmp);
+ *val = (u8)val_tmp;
deb_readreg("R(0x%04x)=0x%02x\n", reg, *val);
return ret;
}
@@ -194,7 +163,7 @@ static int si2165_readreg16(struct si2165_state *state,
static int si2165_writereg8(struct si2165_state *state, const u16 reg, u8 val)
{
- return si2165_write(state, reg, &val, 1);
+ return regmap_write(state->regmap, reg, val);
}
static int si2165_writereg16(struct si2165_state *state, const u16 reg, u16 val)
@@ -345,7 +314,7 @@ static int si2165_wait_init_done(struct si2165_state *state)
return 0;
usleep_range(1000, 50000);
}
- dev_err(&state->i2c->dev, "%s: init_done was not set\n",
+ dev_err(&state->client->dev, "%s: init_done was not set\n",
KBUILD_MODNAME);
return ret;
}
@@ -374,14 +343,14 @@ static int si2165_upload_firmware_block(struct si2165_state *state,
wordcount = data[offset];
if (wordcount < 1 || data[offset+1] ||
data[offset+2] || data[offset+3]) {
- dev_warn(&state->i2c->dev,
+ dev_warn(&state->client->dev,
"%s: bad fw data[0..3] = %*ph\n",
KBUILD_MODNAME, 4, data);
return -EINVAL;
}
if (offset + 8 + wordcount * 4 > len) {
- dev_warn(&state->i2c->dev,
+ dev_warn(&state->client->dev,
"%s: len is too small for block len=%d, wordcount=%d\n",
KBUILD_MODNAME, len, wordcount);
return -EINVAL;
@@ -444,15 +413,15 @@ static int si2165_upload_firmware(struct si2165_state *state)
fw_file = SI2165_FIRMWARE_REV_D;
break;
default:
- dev_info(&state->i2c->dev, "%s: no firmware file for revision=%d\n",
+ dev_info(&state->client->dev, "%s: no firmware file for revision=%d\n",
KBUILD_MODNAME, state->chip_revcode);
return 0;
}
/* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
+ ret = request_firmware(&fw, fw_file, &state->client->dev);
if (ret) {
- dev_warn(&state->i2c->dev, "%s: firmware file '%s' not found\n",
+ dev_warn(&state->client->dev, "%s: firmware file '%s' not found\n",
KBUILD_MODNAME, fw_file);
goto error;
}
@@ -460,11 +429,11 @@ static int si2165_upload_firmware(struct si2165_state *state)
data = fw->data;
len = fw->size;
- dev_info(&state->i2c->dev, "%s: downloading firmware from file '%s' size=%d\n",
+ dev_info(&state->client->dev, "%s: downloading firmware from file '%s' size=%d\n",
KBUILD_MODNAME, fw_file, len);
if (len % 4 != 0) {
- dev_warn(&state->i2c->dev, "%s: firmware size is not multiple of 4\n",
+ dev_warn(&state->client->dev, "%s: firmware size is not multiple of 4\n",
KBUILD_MODNAME);
ret = -EINVAL;
goto error;
@@ -472,14 +441,14 @@ static int si2165_upload_firmware(struct si2165_state *state)
/* check header (8 bytes) */
if (len < 8) {
- dev_warn(&state->i2c->dev, "%s: firmware header is missing\n",
+ dev_warn(&state->client->dev, "%s: firmware header is missing\n",
KBUILD_MODNAME);
ret = -EINVAL;
goto error;
}
if (data[0] != 1 || data[1] != 0) {
- dev_warn(&state->i2c->dev, "%s: firmware file version is wrong\n",
+ dev_warn(&state->client->dev, "%s: firmware file version is wrong\n",
KBUILD_MODNAME);
ret = -EINVAL;
goto error;
@@ -517,7 +486,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
/* start right after the header */
offset = 8;
- dev_info(&state->i2c->dev, "%s: si2165_upload_firmware extracted patch_version=0x%02x, block_count=0x%02x, crc_expected=0x%04x\n",
+ dev_info(&state->client->dev, "%s: si2165_upload_firmware extracted patch_version=0x%02x, block_count=0x%02x, crc_expected=0x%04x\n",
KBUILD_MODNAME, patch_version, block_count, crc_expected);
ret = si2165_upload_firmware_block(state, data, len, &offset, 1);
@@ -536,7 +505,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
ret = si2165_upload_firmware_block(state, data, len,
&offset, block_count);
if (ret < 0) {
- dev_err(&state->i2c->dev,
+ dev_err(&state->client->dev,
"%s: firmware could not be uploaded\n",
KBUILD_MODNAME);
goto error;
@@ -548,7 +517,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
goto error;
if (val16 != crc_expected) {
- dev_err(&state->i2c->dev,
+ dev_err(&state->client->dev,
"%s: firmware crc mismatch %04x != %04x\n",
KBUILD_MODNAME, val16, crc_expected);
ret = -EINVAL;
@@ -560,7 +529,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
goto error;
if (len != offset) {
- dev_err(&state->i2c->dev,
+ dev_err(&state->client->dev,
"%s: firmware len mismatch %04x != %04x\n",
KBUILD_MODNAME, len, offset);
ret = -EINVAL;
@@ -577,7 +546,7 @@ static int si2165_upload_firmware(struct si2165_state *state)
if (ret < 0)
goto error;
- dev_info(&state->i2c->dev, "%s: fw load finished\n", KBUILD_MODNAME);
+ dev_info(&state->client->dev, "%s: fw load finished\n", KBUILD_MODNAME);
ret = 0;
state->firmware_loaded = true;
@@ -611,7 +580,7 @@ static int si2165_init(struct dvb_frontend *fe)
if (ret < 0)
goto error;
if (val != state->config.chip_mode) {
- dev_err(&state->i2c->dev, "%s: could not set chip_mode\n",
+ dev_err(&state->client->dev, "%s: could not set chip_mode\n",
KBUILD_MODNAME);
return -EINVAL;
}
@@ -751,6 +720,9 @@ static int si2165_set_oversamp(struct si2165_state *state, u32 dvb_rate)
u64 oversamp;
u32 reg_value;
+ if (!dvb_rate)
+ return -EINVAL;
+
oversamp = si2165_get_fe_clk(state);
oversamp <<= 23;
do_div(oversamp, dvb_rate);
@@ -769,12 +741,15 @@ static int si2165_set_if_freq_shift(struct si2165_state *state)
u32 IF = 0;
if (!fe->ops.tuner_ops.get_if_frequency) {
- dev_err(&state->i2c->dev,
+ dev_err(&state->client->dev,
"%s: Error: get_if_frequency() not defined at tuner. Can't work without it!\n",
KBUILD_MODNAME);
return -EINVAL;
}
+ if (!fe_clk)
+ return -EINVAL;
+
fe->ops.tuner_ops.get_if_frequency(fe, &IF);
if_freq_shift = IF;
if_freq_shift <<= 29;
@@ -1003,14 +978,6 @@ static int si2165_set_frontend(struct dvb_frontend *fe)
return 0;
}
-static void si2165_release(struct dvb_frontend *fe)
-{
- struct si2165_state *state = fe->demodulator_priv;
-
- dprintk("%s: called\n", __func__);
- kfree(state);
-}
-
static struct dvb_frontend_ops si2165_ops = {
.info = {
.name = "Silicon Labs ",
@@ -1046,67 +1013,83 @@ static struct dvb_frontend_ops si2165_ops = {
.set_frontend = si2165_set_frontend,
.read_status = si2165_read_status,
-
- .release = si2165_release,
};
-struct dvb_frontend *si2165_attach(const struct si2165_config *config,
- struct i2c_adapter *i2c)
+static int si2165_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct si2165_state *state = NULL;
+ struct si2165_platform_data *pdata = client->dev.platform_data;
int n;
- int io_ret;
+ int ret = 0;
u8 val;
char rev_char;
const char *chip_name;
-
- if (config == NULL || i2c == NULL)
- goto error;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = 0x08ff,
+ };
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct si2165_state), GFP_KERNEL);
- if (state == NULL)
+ if (state == NULL) {
+ ret = -ENOMEM;
goto error;
+ }
+
+ /* create regmap */
+ state->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(state->regmap)) {
+ ret = PTR_ERR(state->regmap);
+ goto error;
+ }
/* setup the state */
- state->i2c = i2c;
- state->config = *config;
+ state->client = client;
+ state->config.i2c_addr = client->addr;
+ state->config.chip_mode = pdata->chip_mode;
+ state->config.ref_freq_Hz = pdata->ref_freq_Hz;
+ state->config.inversion = pdata->inversion;
if (state->config.ref_freq_Hz < 4000000
|| state->config.ref_freq_Hz > 27000000) {
- dev_err(&state->i2c->dev, "%s: ref_freq of %d Hz not supported by this driver\n",
+ dev_err(&state->client->dev, "%s: ref_freq of %d Hz not supported by this driver\n",
KBUILD_MODNAME, state->config.ref_freq_Hz);
+ ret = -EINVAL;
goto error;
}
/* create dvb_frontend */
memcpy(&state->fe.ops, &si2165_ops,
sizeof(struct dvb_frontend_ops));
+ state->fe.ops.release = NULL;
state->fe.demodulator_priv = state;
+ i2c_set_clientdata(client, state);
/* powerup */
- io_ret = si2165_writereg8(state, 0x0000, state->config.chip_mode);
- if (io_ret < 0)
- goto error;
+ ret = si2165_writereg8(state, 0x0000, state->config.chip_mode);
+ if (ret < 0)
+ goto nodev_error;
- io_ret = si2165_readreg8(state, 0x0000, &val);
- if (io_ret < 0)
- goto error;
+ ret = si2165_readreg8(state, 0x0000, &val);
+ if (ret < 0)
+ goto nodev_error;
if (val != state->config.chip_mode)
- goto error;
+ goto nodev_error;
- io_ret = si2165_readreg8(state, 0x0023, &state->chip_revcode);
- if (io_ret < 0)
- goto error;
+ ret = si2165_readreg8(state, 0x0023, &state->chip_revcode);
+ if (ret < 0)
+ goto nodev_error;
- io_ret = si2165_readreg8(state, 0x0118, &state->chip_type);
- if (io_ret < 0)
- goto error;
+ ret = si2165_readreg8(state, 0x0118, &state->chip_type);
+ if (ret < 0)
+ goto nodev_error;
/* powerdown */
- io_ret = si2165_writereg8(state, 0x0000, SI2165_MODE_OFF);
- if (io_ret < 0)
- goto error;
+ ret = si2165_writereg8(state, 0x0000, SI2165_MODE_OFF);
+ if (ret < 0)
+ goto nodev_error;
if (state->chip_revcode < 26)
rev_char = 'A' + state->chip_revcode;
@@ -1124,12 +1107,12 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config,
state->has_dvbc = true;
break;
default:
- dev_err(&state->i2c->dev, "%s: Unsupported Silicon Labs chip (type %d, rev %d)\n",
+ dev_err(&state->client->dev, "%s: Unsupported Silicon Labs chip (type %d, rev %d)\n",
KBUILD_MODNAME, state->chip_type, state->chip_revcode);
- goto error;
+ goto nodev_error;
}
- dev_info(&state->i2c->dev,
+ dev_info(&state->client->dev,
"%s: Detected Silicon Labs %s-%c (type %d, rev %d)\n",
KBUILD_MODNAME, chip_name, rev_char, state->chip_type,
state->chip_revcode);
@@ -1149,13 +1132,46 @@ struct dvb_frontend *si2165_attach(const struct si2165_config *config,
sizeof(state->fe.ops.info.name));
}
- return &state->fe;
+ /* return fe pointer */
+ *pdata->fe = &state->fe;
+
+ return 0;
+nodev_error:
+ ret = -ENODEV;
error:
kfree(state);
- return NULL;
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
}
-EXPORT_SYMBOL(si2165_attach);
+
+static int si2165_remove(struct i2c_client *client)
+{
+ struct si2165_state *state = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ kfree(state);
+ return 0;
+}
+
+static const struct i2c_device_id si2165_id_table[] = {
+ {"si2165", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, si2165_id_table);
+
+static struct i2c_driver si2165_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "si2165",
+ },
+ .probe = si2165_probe,
+ .remove = si2165_remove,
+ .id_table = si2165_id_table,
+};
+
+module_i2c_driver(si2165_driver);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
diff --git a/drivers/media/dvb-frontends/si2165.h b/drivers/media/dvb-frontends/si2165.h
index 8a15d6a9c552..76c2ca7d7edb 100644
--- a/drivers/media/dvb-frontends/si2165.h
+++ b/drivers/media/dvb-frontends/si2165.h
@@ -28,10 +28,15 @@ enum {
SI2165_MODE_PLL_XTAL = 0x21
};
-struct si2165_config {
- /* i2c addr
- * possible values: 0x64,0x65,0x66,0x67 */
- u8 i2c_addr;
+/* I2C addresses
+ * possible values: 0x64,0x65,0x66,0x67
+ */
+struct si2165_platform_data {
+ /*
+ * frontend
+ * returned by driver
+ */
+ struct dvb_frontend **fe;
/* external clock or XTAL */
u8 chip_mode;
@@ -45,18 +50,4 @@ struct si2165_config {
bool inversion;
};
-#if IS_REACHABLE(CONFIG_DVB_SI2165)
-struct dvb_frontend *si2165_attach(
- const struct si2165_config *config,
- struct i2c_adapter *i2c);
-#else
-static inline struct dvb_frontend *si2165_attach(
- const struct si2165_config *config,
- struct i2c_adapter *i2c)
-{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
-#endif /* CONFIG_DVB_SI2165 */
-
#endif /* _DVB_SI2165_H */
diff --git a/drivers/media/dvb-frontends/si2165_priv.h b/drivers/media/dvb-frontends/si2165_priv.h
index 2b70cf12cd79..e5932118834b 100644
--- a/drivers/media/dvb-frontends/si2165_priv.h
+++ b/drivers/media/dvb-frontends/si2165_priv.h
@@ -20,4 +20,21 @@
#define SI2165_FIRMWARE_REV_D "dvb-demod-si2165.fw"
+struct si2165_config {
+ /* i2c addr
+ * possible values: 0x64,0x65,0x66,0x67 */
+ u8 i2c_addr;
+
+ /* external clock or XTAL */
+ u8 chip_mode;
+
+ /* frequency of external clock or xtal in Hz
+ * possible values: 4000000, 16000000, 20000000, 240000000, 27000000
+ */
+ u32 ref_freq_Hz;
+
+ /* invert the spectrum */
+ bool inversion;
+};
+
#endif /* _DVB_SI2165_PRIV */
diff --git a/drivers/media/dvb-frontends/si21xx.h b/drivers/media/dvb-frontends/si21xx.h
index ef5f351ca68e..b1be62f1983a 100644
--- a/drivers/media/dvb-frontends/si21xx.h
+++ b/drivers/media/dvb-frontends/si21xx.h
@@ -1,7 +1,6 @@
#ifndef SI21XX_H
#define SI21XX_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/sp2.h b/drivers/media/dvb-frontends/sp2.h
index 6cceea022d49..3901cd74b3f7 100644
--- a/drivers/media/dvb-frontends/sp2.h
+++ b/drivers/media/dvb-frontends/sp2.h
@@ -17,7 +17,6 @@
#ifndef SP2_H
#define SP2_H
-#include <linux/kconfig.h>
#include "dvb_ca_en50221.h"
/*
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index a0c3c526b132..73347d51f340 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -186,7 +186,7 @@ static int stb6000_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops stb6000_tuner_ops = {
+static const struct dvb_tuner_ops stb6000_tuner_ops = {
.info = {
.name = "ST STB6000",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/stb6000.h b/drivers/media/dvb-frontends/stb6000.h
index da581b652cb9..78e75dfc317f 100644
--- a/drivers/media/dvb-frontends/stb6000.h
+++ b/drivers/media/dvb-frontends/stb6000.h
@@ -23,7 +23,6 @@
#ifndef __DVB_STB6000_H__
#define __DVB_STB6000_H__
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index b9c2511bf019..5add1182c3ca 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -522,7 +522,7 @@ static int stb6100_set_params(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_tuner_ops stb6100_ops = {
+static const struct dvb_tuner_ops stb6100_ops = {
.info = {
.name = "STB6100 Silicon Tuner",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/stv0288.h b/drivers/media/dvb-frontends/stv0288.h
index b58603c00c80..803acb917282 100644
--- a/drivers/media/dvb-frontends/stv0288.h
+++ b/drivers/media/dvb-frontends/stv0288.h
@@ -27,7 +27,6 @@
#ifndef STV0288_H
#define STV0288_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h
index 92b3e85fb818..b88166a9716f 100644
--- a/drivers/media/dvb-frontends/stv0367.h
+++ b/drivers/media/dvb-frontends/stv0367.h
@@ -26,7 +26,6 @@
#ifndef STV0367_H
#define STV0367_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/stv0900.h b/drivers/media/dvb-frontends/stv0900.h
index c90bf00ea9ce..9ca2da90c7d7 100644
--- a/drivers/media/dvb-frontends/stv0900.h
+++ b/drivers/media/dvb-frontends/stv0900.h
@@ -26,7 +26,6 @@
#ifndef STV0900_H
#define STV0900_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 91c6dcf65d2a..66a5a7f2295c 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -382,7 +382,7 @@ static int stv6110_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return 0;
}
-static struct dvb_tuner_ops stv6110_tuner_ops = {
+static const struct dvb_tuner_ops stv6110_tuner_ops = {
.info = {
.name = "ST STV6110",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/stv6110.h b/drivers/media/dvb-frontends/stv6110.h
index f3c8a5c6b77d..4604f793d954 100644
--- a/drivers/media/dvb-frontends/stv6110.h
+++ b/drivers/media/dvb-frontends/stv6110.h
@@ -25,7 +25,6 @@
#ifndef __DVB_STV6110_H__
#define __DVB_STV6110_H__
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index a62c01e454f5..c611ad210b5c 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -345,7 +345,7 @@ static int stv6110x_release(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_tuner_ops stv6110x_ops = {
+static const struct dvb_tuner_ops stv6110x_ops = {
.info = {
.name = "STV6110(A) Silicon Tuner",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/tda10048.h b/drivers/media/dvb-frontends/tda10048.h
index bc77a7311de1..a2cebb0cceba 100644
--- a/drivers/media/dvb-frontends/tda10048.h
+++ b/drivers/media/dvb-frontends/tda10048.h
@@ -22,7 +22,6 @@
#ifndef TDA10048_H
#define TDA10048_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
#include <linux/firmware.h>
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index de0a1c110972..bc247f9b553a 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1217,7 +1217,7 @@ static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
}
-static struct dvb_tuner_ops tuner_ops = {
+static const struct dvb_tuner_ops tuner_ops = {
.info = {
.name = "NXP TDA18271C2D",
.frequency_min = 47125000,
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.h b/drivers/media/dvb-frontends/tda18271c2dd.h
index 7ebd8eaff4eb..e6ccf240f54c 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd.h
@@ -1,8 +1,6 @@
#ifndef _TDA18271C2DD_H_
#define _TDA18271C2DD_H_
-#include <linux/kconfig.h>
-
#if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD)
struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 adr);
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 82f8cc534f33..7ca965987f40 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -206,7 +206,7 @@ static int tda665x_release(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_tuner_ops tda665x_ops = {
+static const struct dvb_tuner_ops tda665x_ops = {
.get_status = tda665x_get_status,
.set_params = tda665x_set_params,
.get_frequency = tda665x_get_frequency,
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index 3285b1bc4642..e0df93191b9e 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -161,7 +161,7 @@ static int tda8261_release(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_tuner_ops tda8261_ops = {
+static const struct dvb_tuner_ops tda8261_ops = {
.info = {
.name = "TDA8261",
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index 04bbcc24de0a..2ec671df1441 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -129,7 +129,7 @@ static int tda826x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops tda826x_tuner_ops = {
+static const struct dvb_tuner_ops tda826x_tuner_ops = {
.info = {
.name = "Philips TDA826X",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 14b410ffe612..a9f6bbea6df3 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -496,7 +496,7 @@ static int ts2020_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-static struct dvb_tuner_ops ts2020_tuner_ops = {
+static const struct dvb_tuner_ops ts2020_tuner_ops = {
.info = {
.name = "TS2020",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/ts2020.h b/drivers/media/dvb-frontends/ts2020.h
index 9220e5cf0d21..facc54f0a6af 100644
--- a/drivers/media/dvb-frontends/ts2020.h
+++ b/drivers/media/dvb-frontends/ts2020.h
@@ -22,7 +22,6 @@
#ifndef TS2020_H
#define TS2020_H
-#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
struct ts2020_config {
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 029384d1fddd..6da12b9e55eb 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -157,7 +157,7 @@ static int tua6100_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops tua6100_tuner_ops = {
+static const struct dvb_tuner_ops tua6100_tuner_ops = {
.info = {
.name = "Infineon TUA6100",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 0903d461b8fa..7ed81315965f 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -446,7 +446,7 @@ static int zl10036_init(struct dvb_frontend *fe)
return ret;
}
-static struct dvb_tuner_ops zl10036_tuner_ops = {
+static const struct dvb_tuner_ops zl10036_tuner_ops = {
.info = {
.name = "Zarlink ZL10036",
.frequency_min = 950000,
diff --git a/drivers/media/dvb-frontends/zl10036.h b/drivers/media/dvb-frontends/zl10036.h
index 670e76a654ee..c568d8d59de3 100644
--- a/drivers/media/dvb-frontends/zl10036.h
+++ b/drivers/media/dvb-frontends/zl10036.h
@@ -21,7 +21,6 @@
#ifndef DVB_ZL10036_H
#define DVB_ZL10036_H
-#include <linux/kconfig.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index ee09ec26c553..f8c271be196c 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -255,7 +255,7 @@ static int zl10039_release(struct dvb_frontend *fe)
return 0;
}
-static struct dvb_tuner_ops zl10039_ops = {
+static const struct dvb_tuner_ops zl10039_ops = {
.release = zl10039_release,
.init = zl10039_init,
.sleep = zl10039_sleep,
diff --git a/drivers/media/dvb-frontends/zl10039.h b/drivers/media/dvb-frontends/zl10039.h
index 070929444e71..66e708569375 100644
--- a/drivers/media/dvb-frontends/zl10039.h
+++ b/drivers/media/dvb-frontends/zl10039.h
@@ -22,8 +22,6 @@
#ifndef ZL10039_H
#define ZL10039_H
-#include <linux/kconfig.h>
-
#if IS_REACHABLE(CONFIG_DVB_ZL10039)
struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
u8 i2c_addr,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index ce9006e10a30..2669b4bad910 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -21,7 +21,7 @@ config VIDEO_IR_I2C
# Encoder / Decoder module configuration
#
-menu "Encoders, decoders, sensors and other helper chips"
+menu "I2C Encoders, decoders, sensors and other helper chips"
visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
comment "Audio decoders, processors and mixers"
@@ -187,7 +187,7 @@ comment "Video decoders"
config VIDEO_ADV7180
tristate "Analog Devices ADV7180 decoder"
- depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
---help---
Support for the Analog Devices ADV7180 video decoder.
@@ -295,6 +295,13 @@ config VIDEO_ML86V7667
To compile this driver as a module, choose M here: the
module will be called ml86v7667.
+config VIDEO_AD5820
+ tristate "AD5820 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ ---help---
+ This is a driver for the AD5820 camera lens voice coil.
+ It is used for example in Nokia N900 (RX-51).
+
config VIDEO_SAA7110
tristate "Philips SAA7110 video decoder"
depends on VIDEO_V4L2 && I2C
@@ -571,6 +578,13 @@ config VIDEO_MT9M032
This driver supports MT9M032 camera sensors from Aptina, monochrome
models only.
+config VIDEO_MT9M111
+ tristate "mt9m111, mt9m112 and mt9m131 support"
+ depends on I2C && VIDEO_V4L2
+ help
+ This driver supports MT9M111, MT9M112 and MT9M131 cameras from
+ Micron/Aptina
+
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 94f2c99e890d..92773b2e6225 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIDEO_SAA717X) += saa717x.o
obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
+obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
+obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
new file mode 100644
index 000000000000..beab2f381b81
--- /dev/null
+++ b/drivers/media/i2c/ad5820.c
@@ -0,0 +1,372 @@
+/*
+ * drivers/media/i2c/ad5820.c
+ *
+ * AD5820 DAC driver for camera voice coil focus.
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2007 Texas Instruments
+ * Copyright (C) 2016 Pavel Machek <pavel@ucw.cz>
+ *
+ * Contact: Tuukka Toivonen <tuukkat76@gmail.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * Based on af_d88.c by Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#define AD5820_NAME "ad5820"
+
+/* Register definitions */
+#define AD5820_POWER_DOWN (1 << 15)
+#define AD5820_DAC_SHIFT 4
+#define AD5820_RAMP_MODE_LINEAR (0 << 3)
+#define AD5820_RAMP_MODE_64_16 (1 << 3)
+
+#define CODE_TO_RAMP_US(s) ((s) == 0 ? 0 : (1 << ((s) - 1)) * 50)
+#define RAMP_US_TO_CODE(c) fls(((c) + ((c)>>1)) / 50)
+
+#define to_ad5820_device(sd) container_of(sd, struct ad5820_device, subdev)
+
+struct ad5820_device {
+ struct v4l2_subdev subdev;
+ struct ad5820_platform_data *platform_data;
+ struct regulator *vana;
+
+ struct v4l2_ctrl_handler ctrls;
+ u32 focus_absolute;
+ u32 focus_ramp_time;
+ u32 focus_ramp_mode;
+
+ struct mutex power_lock;
+ int power_count;
+
+ bool standby;
+};
+
+static int ad5820_write(struct ad5820_device *coil, u16 data)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
+ struct i2c_msg msg;
+ int r;
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data = cpu_to_be16(data);
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = (u8 *)&data;
+
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r < 0) {
+ dev_err(&client->dev, "write failed, error %d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculate status word and write it to the device based on current
+ * values of V4L2 controls. It is assumed that the stored V4L2 control
+ * values are properly limited and rounded.
+ */
+static int ad5820_update_hw(struct ad5820_device *coil)
+{
+ u16 status;
+
+ status = RAMP_US_TO_CODE(coil->focus_ramp_time);
+ status |= coil->focus_ramp_mode
+ ? AD5820_RAMP_MODE_64_16 : AD5820_RAMP_MODE_LINEAR;
+ status |= coil->focus_absolute << AD5820_DAC_SHIFT;
+
+ if (coil->standby)
+ status |= AD5820_POWER_DOWN;
+
+ return ad5820_write(coil, status);
+}
+
+/*
+ * Power handling
+ */
+static int ad5820_power_off(struct ad5820_device *coil, bool standby)
+{
+ int ret = 0, ret2;
+
+ /*
+ * Go to standby first as real power off my be denied by the hardware
+ * (single power line control for both coil and sensor).
+ */
+ if (standby) {
+ coil->standby = true;
+ ret = ad5820_update_hw(coil);
+ }
+
+ ret2 = regulator_disable(coil->vana);
+ if (ret)
+ return ret;
+ return ret2;
+}
+
+static int ad5820_power_on(struct ad5820_device *coil, bool restore)
+{
+ int ret;
+
+ ret = regulator_enable(coil->vana);
+ if (ret < 0)
+ return ret;
+
+ if (restore) {
+ /* Restore the hardware settings. */
+ coil->standby = false;
+ ret = ad5820_update_hw(coil);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+
+fail:
+ coil->standby = true;
+ regulator_disable(coil->vana);
+
+ return ret;
+}
+
+/*
+ * V4L2 controls
+ */
+static int ad5820_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ad5820_device *coil =
+ container_of(ctrl->handler, struct ad5820_device, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ coil->focus_absolute = ctrl->val;
+ return ad5820_update_hw(coil);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ad5820_ctrl_ops = {
+ .s_ctrl = ad5820_set_ctrl,
+};
+
+
+static int ad5820_init_controls(struct ad5820_device *coil)
+{
+ v4l2_ctrl_handler_init(&coil->ctrls, 1);
+
+ /*
+ * V4L2_CID_FOCUS_ABSOLUTE
+ *
+ * Minimum current is 0 mA, maximum is 100 mA. Thus, 1 code is
+ * equivalent to 100/1023 = 0.0978 mA. Nevertheless, we do not use [mA]
+ * for focus position, because it is meaningless for user. Meaningful
+ * would be to use focus distance or even its inverse, but since the
+ * driver doesn't have sufficiently knowledge to do the conversion, we
+ * will just use abstract codes here. In any case, smaller value = focus
+ * position farther from camera. The default zero value means focus at
+ * infinity, and also least current consumption.
+ */
+ v4l2_ctrl_new_std(&coil->ctrls, &ad5820_ctrl_ops,
+ V4L2_CID_FOCUS_ABSOLUTE, 0, 1023, 1, 0);
+
+ if (coil->ctrls.error)
+ return coil->ctrls.error;
+
+ coil->focus_absolute = 0;
+ coil->focus_ramp_time = 0;
+ coil->focus_ramp_mode = 0;
+
+ coil->subdev.ctrl_handler = &coil->ctrls;
+
+ return 0;
+}
+
+/*
+ * V4L2 subdev operations
+ */
+static int ad5820_registered(struct v4l2_subdev *subdev)
+{
+ struct ad5820_device *coil = to_ad5820_device(subdev);
+
+ return ad5820_init_controls(coil);
+}
+
+static int
+ad5820_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct ad5820_device *coil = to_ad5820_device(subdev);
+ int ret = 0;
+
+ mutex_lock(&coil->power_lock);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (coil->power_count == !on) {
+ ret = on ? ad5820_power_on(coil, true) :
+ ad5820_power_off(coil, true);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* Update the power count. */
+ coil->power_count += on ? 1 : -1;
+ WARN_ON(coil->power_count < 0);
+
+done:
+ mutex_unlock(&coil->power_lock);
+ return ret;
+}
+
+static int ad5820_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return ad5820_set_power(sd, 1);
+}
+
+static int ad5820_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return ad5820_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_core_ops ad5820_core_ops = {
+ .s_power = ad5820_set_power,
+};
+
+static const struct v4l2_subdev_ops ad5820_ops = {
+ .core = &ad5820_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ad5820_internal_ops = {
+ .registered = ad5820_registered,
+ .open = ad5820_open,
+ .close = ad5820_close,
+};
+
+/*
+ * I2C driver
+ */
+static int __maybe_unused ad5820_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ad5820_device *coil = to_ad5820_device(subdev);
+
+ if (!coil->power_count)
+ return 0;
+
+ return ad5820_power_off(coil, false);
+}
+
+static int __maybe_unused ad5820_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ad5820_device *coil = to_ad5820_device(subdev);
+
+ if (!coil->power_count)
+ return 0;
+
+ return ad5820_power_on(coil, true);
+}
+
+static int ad5820_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ad5820_device *coil;
+ int ret;
+
+ coil = devm_kzalloc(&client->dev, sizeof(*coil), GFP_KERNEL);
+ if (!coil)
+ return -ENOMEM;
+
+ coil->vana = devm_regulator_get(&client->dev, "VANA");
+ if (IS_ERR(coil->vana)) {
+ ret = PTR_ERR(coil->vana);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return ret;
+ }
+
+ mutex_init(&coil->power_lock);
+
+ v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
+ coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ coil->subdev.internal_ops = &ad5820_internal_ops;
+ strcpy(coil->subdev.name, "ad5820 focus");
+
+ ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
+ if (ret < 0)
+ goto cleanup2;
+
+ ret = v4l2_async_register_subdev(&coil->subdev);
+ if (ret < 0)
+ goto cleanup;
+
+ return ret;
+
+cleanup2:
+ mutex_destroy(&coil->power_lock);
+cleanup:
+ media_entity_cleanup(&coil->subdev.entity);
+ return ret;
+}
+
+static int __exit ad5820_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct ad5820_device *coil = to_ad5820_device(subdev);
+
+ v4l2_device_unregister_subdev(&coil->subdev);
+ v4l2_ctrl_handler_free(&coil->ctrls);
+ media_entity_cleanup(&coil->subdev.entity);
+ mutex_destroy(&coil->power_lock);
+ return 0;
+}
+
+static const struct i2c_device_id ad5820_id_table[] = {
+ { AD5820_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+
+static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
+
+static struct i2c_driver ad5820_i2c_driver = {
+ .driver = {
+ .name = AD5820_NAME,
+ .pm = &ad5820_pm,
+ },
+ .probe = ad5820_probe,
+ .remove = __exit_p(ad5820_remove),
+ .id_table = ad5820_id_table,
+};
+
+module_i2c_driver(ad5820_i2c_driver);
+
+MODULE_AUTHOR("Tuukka Toivonen");
+MODULE_DESCRIPTION("AD5820 camera lens driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 0462f461e679..50f354144ee7 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -98,7 +98,6 @@ struct ad9389b_state {
struct ad9389b_state_edid edid;
/* Running counter of the number of detected EDIDs (for debugging) */
unsigned edid_detect_counter;
- struct workqueue_struct *work_queue;
struct delayed_work edid_handler; /* work entry */
};
@@ -843,8 +842,7 @@ static void ad9389b_edid_handler(struct work_struct *work)
v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
- queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ schedule_delayed_work(&state->edid_handler, EDID_DELAY);
return;
}
}
@@ -933,8 +931,7 @@ static void ad9389b_update_monitor_present_status(struct v4l2_subdev *sd)
ad9389b_setup(sd);
ad9389b_notify_monitor_detect(sd);
state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ schedule_delayed_work(&state->edid_handler, EDID_DELAY);
} else if (!(status & MASK_AD9389B_HPD_DETECT)) {
v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
state->have_monitor = false;
@@ -1065,8 +1062,7 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
ad9389b_wr(sd, 0xc9, 0xf);
ad9389b_wr(sd, 0xc4, state->edid.segments);
state->edid.read_retries = EDID_MAX_RETRIES;
- queue_delayed_work(state->work_queue,
- &state->edid_handler, EDID_DELAY);
+ schedule_delayed_work(&state->edid_handler, EDID_DELAY);
return false;
}
@@ -1170,13 +1166,6 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_entity;
}
- state->work_queue = create_singlethread_workqueue(sd->name);
- if (state->work_queue == NULL) {
- v4l2_err(sd, "could not create workqueue\n");
- err = -ENOMEM;
- goto err_unreg;
- }
-
INIT_DELAYED_WORK(&state->edid_handler, ad9389b_edid_handler);
state->dv_timings = dv1080p60;
@@ -1187,8 +1176,6 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
client->addr << 1, client->adapter->name);
return 0;
-err_unreg:
- i2c_unregister_device(state->edid_i2c_client);
err_entity:
media_entity_cleanup(&sd->entity);
err_hdl:
@@ -1211,9 +1198,8 @@ static int ad9389b_remove(struct i2c_client *client)
ad9389b_s_stream(sd, false);
ad9389b_s_audio_stream(sd, false);
ad9389b_init_setup(sd);
- cancel_delayed_work(&state->edid_handler);
+ cancel_delayed_work_sync(&state->edid_handler);
i2c_unregister_device(state->edid_i2c_client);
- destroy_workqueue(state->work_queue);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
@@ -1231,7 +1217,6 @@ MODULE_DEVICE_TABLE(i2c, ad9389b_id);
static struct i2c_driver ad9389b_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ad9389b",
},
.probe = ad9389b_probe,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 95cbc857f36e..cbed2bc29325 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -56,10 +57,11 @@
#define ADV7182_REG_INPUT_VIDSEL 0x0002
+#define ADV7180_REG_OUTPUT_CONTROL 0x0003
#define ADV7180_REG_EXTENDED_OUTPUT_CONTROL 0x0004
#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
-#define ADV7180_REG_AUTODETECT_ENABLE 0x07
+#define ADV7180_REG_AUTODETECT_ENABLE 0x0007
#define ADV7180_AUTODETECT_DEFAULT 0x7f
/* Contrast */
#define ADV7180_REG_CON 0x0008 /*Unsigned */
@@ -100,6 +102,20 @@
#define ADV7180_REG_IDENT 0x0011
#define ADV7180_ID_7180 0x18
+#define ADV7180_REG_STATUS3 0x0013
+#define ADV7180_REG_ANALOG_CLAMP_CTL 0x0014
+#define ADV7180_REG_SHAP_FILTER_CTL_1 0x0017
+#define ADV7180_REG_CTRL_2 0x001d
+#define ADV7180_REG_VSYNC_FIELD_CTL_1 0x0031
+#define ADV7180_REG_MANUAL_WIN_CTL_1 0x003d
+#define ADV7180_REG_MANUAL_WIN_CTL_2 0x003e
+#define ADV7180_REG_MANUAL_WIN_CTL_3 0x003f
+#define ADV7180_REG_LOCK_CNT 0x0051
+#define ADV7180_REG_CVBS_TRIM 0x0052
+#define ADV7180_REG_CLAMP_ADJ 0x005a
+#define ADV7180_REG_RES_CIR 0x005f
+#define ADV7180_REG_DIFF_MODE 0x0060
+
#define ADV7180_REG_ICONF1 0x2040
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
@@ -129,9 +145,15 @@
#define ADV7180_REG_VPP_SLAVE_ADDR 0xFD
#define ADV7180_REG_CSI_SLAVE_ADDR 0xFE
-#define ADV7180_REG_FLCONTROL 0x40e0
+#define ADV7180_REG_ACE_CTRL1 0x4080
+#define ADV7180_REG_ACE_CTRL5 0x4084
+#define ADV7180_REG_FLCONTROL 0x40e0
#define ADV7180_FLCONTROL_FL_ENABLE 0x1
+#define ADV7180_REG_RST_CLAMP 0x809c
+#define ADV7180_REG_AGC_ADJ1 0x80b6
+#define ADV7180_REG_AGC_ADJ2 0x80c0
+
#define ADV7180_CSI_REG_PWRDN 0x00
#define ADV7180_CSI_PWRDN 0x80
@@ -192,6 +214,7 @@ struct adv7180_state {
struct media_pad pad;
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
+ struct gpio_desc *pwdn_gpio;
v4l2_std_id curr_norm;
bool powered;
bool streaming;
@@ -442,6 +465,19 @@ static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
+static void adv7180_set_power_pin(struct adv7180_state *state, bool on)
+{
+ if (!state->pwdn_gpio)
+ return;
+
+ if (on) {
+ gpiod_set_value_cansleep(state->pwdn_gpio, 0);
+ usleep_range(5000, 10000);
+ } else {
+ gpiod_set_value_cansleep(state->pwdn_gpio, 1);
+ }
+}
+
static int adv7180_set_power(struct adv7180_state *state, bool on)
{
u8 val;
@@ -597,7 +633,7 @@ static int adv7180_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index != 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
return 0;
}
@@ -607,7 +643,7 @@ static int adv7180_mbus_fmt(struct v4l2_subdev *sd,
{
struct adv7180_state *state = to_state(sd);
- fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
fmt->width = 720;
fmt->height = state->curr_norm & V4L2_STD_525_60 ? 480 : 576;
@@ -675,6 +711,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
{
struct adv7180_state *state = to_state(sd);
struct v4l2_mbus_framefmt *framefmt;
+ int ret;
switch (format->format.field) {
case V4L2_FIELD_NONE:
@@ -686,8 +723,9 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
break;
}
+ ret = adv7180_mbus_fmt(sd, &format->format);
+
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- framefmt = &format->format;
if (state->field != format->format.field) {
state->field = format->format.field;
adv7180_set_power(state, false);
@@ -699,7 +737,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
*framefmt = format->format;
}
- return adv7180_mbus_fmt(sd, framefmt);
+ return ret;
}
static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
@@ -725,16 +763,16 @@ static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static int adv7180_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *cropcap)
+static int adv7180_g_pixelaspect(struct v4l2_subdev *sd, struct v4l2_fract *aspect)
{
struct adv7180_state *state = to_state(sd);
if (state->curr_norm & V4L2_STD_525_60) {
- cropcap->pixelaspect.numerator = 11;
- cropcap->pixelaspect.denominator = 10;
+ aspect->numerator = 11;
+ aspect->denominator = 10;
} else {
- cropcap->pixelaspect.numerator = 54;
- cropcap->pixelaspect.denominator = 59;
+ aspect->numerator = 54;
+ aspect->denominator = 59;
}
return 0;
@@ -787,7 +825,7 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
.g_mbus_config = adv7180_g_mbus_config,
- .cropcap = adv7180_cropcap,
+ .g_pixelaspect = adv7180_g_pixelaspect,
.g_tvnorms = adv7180_g_tvnorms,
.s_stream = adv7180_s_stream,
};
@@ -886,16 +924,20 @@ static int adv7182_init(struct adv7180_state *state)
/* ADI required writes */
if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
- adv7180_write(state, 0x0003, 0x4e);
- adv7180_write(state, 0x0004, 0x57);
- adv7180_write(state, 0x001d, 0xc0);
+ adv7180_write(state, ADV7180_REG_OUTPUT_CONTROL, 0x4e);
+ adv7180_write(state, ADV7180_REG_EXTENDED_OUTPUT_CONTROL, 0x57);
+ adv7180_write(state, ADV7180_REG_CTRL_2, 0xc0);
} else {
if (state->chip_info->flags & ADV7180_FLAG_V2)
- adv7180_write(state, 0x0004, 0x17);
+ adv7180_write(state,
+ ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
+ 0x17);
else
- adv7180_write(state, 0x0004, 0x07);
- adv7180_write(state, 0x0003, 0x0c);
- adv7180_write(state, 0x001d, 0x40);
+ adv7180_write(state,
+ ADV7180_REG_EXTENDED_OUTPUT_CONTROL,
+ 0x07);
+ adv7180_write(state, ADV7180_REG_OUTPUT_CONTROL, 0x0c);
+ adv7180_write(state, ADV7180_REG_CTRL_2, 0x40);
}
adv7180_write(state, 0x0013, 0x00);
@@ -972,8 +1014,8 @@ static int adv7182_select_input(struct adv7180_state *state, unsigned int input)
return ret;
/* Reset clamp circuitry - ADI recommended writes */
- adv7180_write(state, 0x809c, 0x00);
- adv7180_write(state, 0x809c, 0xff);
+ adv7180_write(state, ADV7180_REG_RST_CLAMP, 0x00);
+ adv7180_write(state, ADV7180_REG_RST_CLAMP, 0xff);
input_type = adv7182_get_input_type(input);
@@ -981,10 +1023,10 @@ static int adv7182_select_input(struct adv7180_state *state, unsigned int input)
case ADV7182_INPUT_TYPE_CVBS:
case ADV7182_INPUT_TYPE_DIFF_CVBS:
/* ADI recommends to use the SH1 filter */
- adv7180_write(state, 0x0017, 0x41);
+ adv7180_write(state, ADV7180_REG_SHAP_FILTER_CTL_1, 0x41);
break;
default:
- adv7180_write(state, 0x0017, 0x01);
+ adv7180_write(state, ADV7180_REG_SHAP_FILTER_CTL_1, 0x01);
break;
}
@@ -994,21 +1036,21 @@ static int adv7182_select_input(struct adv7180_state *state, unsigned int input)
lbias = adv7182_lbias_settings[input_type];
for (i = 0; i < ARRAY_SIZE(adv7182_lbias_settings[0]); i++)
- adv7180_write(state, 0x0052 + i, lbias[i]);
+ adv7180_write(state, ADV7180_REG_CVBS_TRIM + i, lbias[i]);
if (input_type == ADV7182_INPUT_TYPE_DIFF_CVBS) {
/* ADI required writes to make differential CVBS work */
- adv7180_write(state, 0x005f, 0xa8);
- adv7180_write(state, 0x005a, 0x90);
- adv7180_write(state, 0x0060, 0xb0);
- adv7180_write(state, 0x80b6, 0x08);
- adv7180_write(state, 0x80c0, 0xa0);
+ adv7180_write(state, ADV7180_REG_RES_CIR, 0xa8);
+ adv7180_write(state, ADV7180_REG_CLAMP_ADJ, 0x90);
+ adv7180_write(state, ADV7180_REG_DIFF_MODE, 0xb0);
+ adv7180_write(state, ADV7180_REG_AGC_ADJ1, 0x08);
+ adv7180_write(state, ADV7180_REG_AGC_ADJ2, 0xa0);
} else {
- adv7180_write(state, 0x005f, 0xf0);
- adv7180_write(state, 0x005a, 0xd0);
- adv7180_write(state, 0x0060, 0x10);
- adv7180_write(state, 0x80b6, 0x9c);
- adv7180_write(state, 0x80c0, 0x00);
+ adv7180_write(state, ADV7180_REG_RES_CIR, 0xf0);
+ adv7180_write(state, ADV7180_REG_CLAMP_ADJ, 0xd0);
+ adv7180_write(state, ADV7180_REG_DIFF_MODE, 0x10);
+ adv7180_write(state, ADV7180_REG_AGC_ADJ1, 0x9c);
+ adv7180_write(state, ADV7180_REG_AGC_ADJ2, 0x00);
}
return 0;
@@ -1185,6 +1227,8 @@ static int init_device(struct adv7180_state *state)
mutex_lock(&state->mutex);
+ adv7180_set_power_pin(state, true);
+
adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
usleep_range(5000, 10000);
@@ -1254,6 +1298,14 @@ static int adv7180_probe(struct i2c_client *client,
state->field = V4L2_FIELD_INTERLACED;
state->chip_info = (struct adv7180_chip_info *)id->driver_data;
+ state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(state->pwdn_gpio)) {
+ ret = PTR_ERR(state->pwdn_gpio);
+ v4l_err(client, "request for power pin failed: %d\n", ret);
+ return ret;
+ }
+
if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
state->csi_client = i2c_new_dummy(client->adapter,
ADV7180_DEFAULT_CSI_I2C_ADDR);
@@ -1345,6 +1397,8 @@ static int adv7180_remove(struct i2c_client *client)
if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2)
i2c_unregister_device(state->csi_client);
+ adv7180_set_power_pin(state, false);
+
mutex_destroy(&state->mutex);
return 0;
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index 2bec737881e9..04eecda74d66 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -644,7 +644,6 @@ MODULE_DEVICE_TABLE(i2c, adv7183_id);
static struct i2c_driver adv7183_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7183",
},
.probe = adv7183_probe,
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index 76d987476e35..f19ad4ecd11e 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -456,7 +456,6 @@ MODULE_DEVICE_TABLE(i2c, adv7393_id);
static struct i2c_driver adv7393_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "adv7393",
},
.probe = adv7393_probe,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 53030d631653..5ba0f21bcfe4 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1898,6 +1898,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
state->i2c_cec_addr >> 1);
if (state->i2c_cec == NULL) {
v4l2_err(sd, "failed to register cec i2c client\n");
+ err = -ENOMEM;
goto err_unreg_edid;
}
adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index d9f2b6b76d59..3a795dcb7d8e 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -124,21 +124,27 @@ static int ak881x_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ak881x_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ak881x *ak881x = to_ak881x(client);
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = 720;
- a->bounds.height = ak881x->lines;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = 720;
+ sel->r.height = ak881x->lines;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
@@ -207,13 +213,13 @@ static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = {
};
static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = {
- .cropcap = ak881x_cropcap,
.s_std_output = ak881x_s_std_output,
.s_stream = ak881x_s_stream,
};
static const struct v4l2_subdev_pad_ops ak881x_subdev_pad_ops = {
.enum_mbus_code = ak881x_enum_mbus_code,
+ .get_selection = ak881x_get_selection,
.set_fmt = ak881x_fill_fmt,
.get_fmt = ak881x_fill_fmt,
};
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index d28b4f37fe5f..7da5f69cace6 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(i2c, cs3308_id);
static struct i2c_driver cs3308_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "cs3308",
},
.probe = cs3308_probe,
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index bf82726fd3f4..cede3975d04b 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -35,6 +35,7 @@
*
*/
+#include <asm/unaligned.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -63,51 +64,80 @@ module_param(debug, int, 0644); /* debug level (0,1,2) */
/* ----------------------------------------------------------------------- */
static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
- u32 *scancode, u8 *ptoggle, int size, int offset)
+ u32 *scancode, u8 *ptoggle, int size)
{
unsigned char buf[6];
- int start, range, toggle, dev, code, ircode;
+ int start, range, toggle, dev, code, ircode, vendor;
/* poll IR chip */
if (size != i2c_master_recv(ir->c, buf, size))
return -EIO;
- /* split rc5 data block ... */
- start = (buf[offset] >> 7) & 1;
- range = (buf[offset] >> 6) & 1;
- toggle = (buf[offset] >> 5) & 1;
- dev = buf[offset] & 0x1f;
- code = (buf[offset+1] >> 2) & 0x3f;
+ if (buf[0] & 0x80) {
+ int offset = (size == 6) ? 3 : 0;
- /* rc5 has two start bits
- * the first bit must be one
- * the second bit defines the command range (1 = 0-63, 0 = 64 - 127)
- */
- if (!start)
- /* no key pressed */
- return 0;
+ /* split rc5 data block ... */
+ start = (buf[offset] >> 7) & 1;
+ range = (buf[offset] >> 6) & 1;
+ toggle = (buf[offset] >> 5) & 1;
+ dev = buf[offset] & 0x1f;
+ code = (buf[offset+1] >> 2) & 0x3f;
- /* filter out invalid key presses */
- ircode = (start << 12) | (toggle << 11) | (dev << 6) | code;
- if ((ircode & 0x1fff) == 0x1fff)
- return 0;
+ /* rc5 has two start bits
+ * the first bit must be one
+ * the second bit defines the command range:
+ * 1 = 0-63, 0 = 64 - 127
+ */
+ if (!start)
+ /* no key pressed */
+ return 0;
- if (!range)
- code += 64;
+ /* filter out invalid key presses */
+ ircode = (start << 12) | (toggle << 11) | (dev << 6) | code;
+ if ((ircode & 0x1fff) == 0x1fff)
+ return 0;
- dprintk(1,"ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
- start, range, toggle, dev, code);
+ if (!range)
+ code += 64;
- *protocol = RC_TYPE_RC5;
- *scancode = RC_SCANCODE_RC5(dev, code);
- *ptoggle = toggle;
- return 1;
+ dprintk(1, "ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
+ start, range, toggle, dev, code);
+
+ *protocol = RC_TYPE_RC5;
+ *scancode = RC_SCANCODE_RC5(dev, code);
+ *ptoggle = toggle;
+
+ return 1;
+ } else if (size == 6 && (buf[0] & 0x40)) {
+ code = buf[4];
+ dev = buf[3];
+ vendor = get_unaligned_be16(buf + 1);
+
+ if (vendor == 0x800f) {
+ *ptoggle = (dev & 0x80) != 0;
+ *protocol = RC_TYPE_RC6_MCE;
+ dev &= 0x7f;
+ dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
+ *ptoggle, vendor, dev, code);
+ } else {
+ *ptoggle = 0;
+ *protocol = RC_TYPE_RC6_6A_32;
+ dprintk(1, "ir hauppauge (rc6-6a-32): vendor=%d dev=%d code=%d\n",
+ vendor, dev, code);
+ }
+
+ *scancode = RC_SCANCODE_RC6_6A(vendor, dev, code);
+
+ return 1;
+ }
+
+ return 0;
}
static int get_key_haup(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
- return get_key_haup_common (ir, protocol, scancode, toggle, 3, 0);
+ return get_key_haup_common(ir, protocol, scancode, toggle, 3);
}
static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
@@ -126,7 +156,7 @@ static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
if (ret != 1)
return (ret < 0) ? ret : -EINVAL;
- return get_key_haup_common(ir, protocol, scancode, toggle, 6, 3);
+ return get_key_haup_common(ir, protocol, scancode, toggle, 6);
}
static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol,
@@ -347,7 +377,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
- rc_type = RC_BIT_RC5;
+ rc_type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 6dfaead6aaa8..72e71b762827 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -16,10 +16,11 @@
#include <linux/v4l2-mediabus.h>
#include <linux/module.h>
-#include <media/soc_camera.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
/*
* MT9M111, MT9M112 and MT9M131:
@@ -187,10 +188,10 @@ struct mt9m111_datafmt {
};
static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
- {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG},
- {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG},
- {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
- {MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
+ {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB},
+ {MEDIA_BUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_SRGB},
{MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
{MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
{MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
@@ -383,30 +384,36 @@ static int mt9m111_reset(struct mt9m111 *mt9m111)
return ret;
}
-static int mt9m111_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9m111_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct v4l2_rect rect = a->c;
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct v4l2_rect rect = sel->r;
int width, height;
- int ret;
+ int ret, align = 0;
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
if (mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR8_1X8 ||
mt9m111->fmt->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
- rect.width = ALIGN(rect.width, 2);
- rect.height = ALIGN(rect.height, 2);
+ align = 1;
/* Let the user play with the starting pixel */
}
/* FIXME: the datasheet doesn't specify minimum sizes */
- soc_camera_limit_side(&rect.left, &rect.width,
- MT9M111_MIN_DARK_COLS, 2, MT9M111_MAX_WIDTH);
-
- soc_camera_limit_side(&rect.top, &rect.height,
- MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
+ v4l_bound_align_image(&rect.width, 2, MT9M111_MAX_WIDTH, align,
+ &rect.height, 2, MT9M111_MAX_HEIGHT, align, 0);
+ rect.left = clamp(rect.left, MT9M111_MIN_DARK_COLS,
+ MT9M111_MIN_DARK_COLS + MT9M111_MAX_WIDTH -
+ (__s32)rect.width);
+ rect.top = clamp(rect.top, MT9M111_MIN_DARK_ROWS,
+ MT9M111_MIN_DARK_ROWS + MT9M111_MAX_HEIGHT -
+ (__s32)rect.height);
width = min(mt9m111->width, rect.width);
height = min(mt9m111->height, rect.height);
@@ -421,30 +428,30 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return ret;
}
-static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9m111_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
-
- a->c = mt9m111->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
-static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- a->bounds.left = MT9M111_MIN_DARK_COLS;
- a->bounds.top = MT9M111_MIN_DARK_ROWS;
- a->bounds.width = MT9M111_MAX_WIDTH;
- a->bounds.height = MT9M111_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = MT9M111_MIN_DARK_COLS;
+ sel->r.top = MT9M111_MIN_DARK_ROWS;
+ sel->r.width = MT9M111_MAX_WIDTH;
+ sel->r.height = MT9M111_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = mt9m111->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int mt9m111_get_fmt(struct v4l2_subdev *sd,
@@ -775,17 +782,16 @@ static int mt9m111_init(struct mt9m111 *mt9m111)
static int mt9m111_power_on(struct mt9m111 *mt9m111)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- ret = soc_camera_power_on(&client->dev, ssdd, mt9m111->clk);
+ ret = v4l2_clk_enable(mt9m111->clk);
if (ret < 0)
return ret;
ret = mt9m111_resume(mt9m111);
if (ret < 0) {
dev_err(&client->dev, "Failed to resume the sensor: %d\n", ret);
- soc_camera_power_off(&client->dev, ssdd, mt9m111->clk);
+ v4l2_clk_disable(mt9m111->clk);
}
return ret;
@@ -793,11 +799,8 @@ static int mt9m111_power_on(struct mt9m111 *mt9m111)
static void mt9m111_power_off(struct mt9m111 *mt9m111)
{
- struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
mt9m111_suspend(mt9m111);
- soc_camera_power_off(&client->dev, ssdd, mt9m111->clk);
+ v4l2_clk_disable(mt9m111->clk);
}
static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
@@ -854,27 +857,22 @@ static int mt9m111_enum_mbus_code(struct v4l2_subdev *sd,
static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
- .s_crop = mt9m111_s_crop,
- .g_crop = mt9m111_g_crop,
- .cropcap = mt9m111_cropcap,
.g_mbus_config = mt9m111_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
.enum_mbus_code = mt9m111_enum_mbus_code,
+ .get_selection = mt9m111_get_selection,
+ .set_selection = mt9m111_set_selection,
.get_fmt = mt9m111_get_fmt,
.set_fmt = mt9m111_set_fmt,
};
@@ -933,20 +931,8 @@ static int mt9m111_probe(struct i2c_client *client,
{
struct mt9m111 *mt9m111;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (client->dev.of_node) {
- ssdd = devm_kzalloc(&client->dev, sizeof(*ssdd), GFP_KERNEL);
- if (!ssdd)
- return -ENOMEM;
- client->dev.platform_data = ssdd;
- }
- if (!ssdd) {
- dev_err(&client->dev, "mt9m111: driver needs platform data\n");
- return -EINVAL;
- }
-
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&adapter->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
@@ -992,10 +978,6 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->lastpage = -1;
mutex_init(&mt9m111->power_lock);
- ret = soc_camera_power_init(&client->dev, ssdd);
- if (ret < 0)
- goto out_hdlfree;
-
ret = mt9m111_video_probe(client);
if (ret < 0)
goto out_hdlfree;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index be5a7fd4f076..502c72238a4a 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -23,6 +23,7 @@
#include <linux/videodev2.h>
#include <media/media-entity.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -1520,6 +1521,10 @@ static int ov965x_probe(struct i2c_client *client,
/* Update exposure time min/max to match frame format */
ov965x_update_exposure_ctrl(ov965x);
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto err_ctrls;
+
return 0;
err_ctrls:
v4l2_ctrl_handler_free(sd->ctrl_handler);
@@ -1532,7 +1537,7 @@ static int ov965x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- v4l2_device_unregister_subdev(sd);
+ v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 08af58fb8e7d..3844853ab0a0 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1706,7 +1706,7 @@ static int s5c73m3_probe(struct i2c_client *client,
state->oif_pads[OIF_ISP_PAD].flags = MEDIA_PAD_FL_SINK;
state->oif_pads[OIF_JPEG_PAD].flags = MEDIA_PAD_FL_SINK;
state->oif_pads[OIF_SOURCE_PAD].flags = MEDIA_PAD_FL_SOURCE;
- oif_sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ oif_sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
ret = media_entity_pads_init(&oif_sd->entity, OIF_NUM_PADS,
state->oif_pads);
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index 8a0f22da590f..6ebcf254989a 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -1019,7 +1019,6 @@ MODULE_DEVICE_TABLE(i2c, s5k4ecgx_id);
static struct i2c_driver v4l2_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = S5K4ECGX_DRIVER_NAME,
},
.probe = s5k4ecgx_probe,
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index cbe4711e9b31..769964057881 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -331,6 +331,7 @@ static int s5k6a3_probe(struct i2c_client *client,
sensor->format.width = S5K6A3_DEFAULT_WIDTH;
sensor->format.height = S5K6A3_DEFAULT_HEIGHT;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sd->entity, 1, &sensor->pad);
if (ret < 0)
@@ -376,7 +377,6 @@ static struct i2c_driver s5k6a3_driver = {
.driver = {
.of_match_table = of_match_ptr(s5k6a3_of_match),
.name = S5K6A3_DRV_NAME,
- .owner = THIS_MODULE,
},
.probe = s5k6a3_probe,
.remove = s5k6a3_remove,
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index d08ab6c8357c..44f8c7e10a35 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -24,8 +24,8 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/smiapp.h>
@@ -328,6 +328,14 @@ static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
* orders must be defined.
*/
static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
+ { MEDIA_BUS_FMT_SGRBG16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG16_1X16, 16, 16, SMIAPP_PIXEL_ORDER_GBRG, },
+ { MEDIA_BUS_FMT_SGRBG14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_GRBG, },
+ { MEDIA_BUS_FMT_SRGGB14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_RGGB, },
+ { MEDIA_BUS_FMT_SBGGR14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_BGGR, },
+ { MEDIA_BUS_FMT_SGBRG14_1X14, 14, 14, SMIAPP_PIXEL_ORDER_GBRG, },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
@@ -625,12 +633,12 @@ static int smiapp_init_late_controls(struct smiapp_sensor *sensor)
0, max_value, 1, max_value);
}
- for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+ for (max = 0; sensor->hwcfg->op_sys_clock[max + 1]; max++);
sensor->link_freq = v4l2_ctrl_new_int_menu(
&sensor->src->ctrl_handler, &smiapp_ctrl_ops,
V4L2_CID_LINK_FREQ, __fls(*valid_link_freqs),
- __ffs(*valid_link_freqs), sensor->platform_data->op_sys_clock);
+ __ffs(*valid_link_freqs), sensor->hwcfg->op_sys_clock);
return sensor->src->ctrl_handler.error;
}
@@ -833,8 +841,8 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
pll->bits_per_pixel = f->compressed;
- for (j = 0; sensor->platform_data->op_sys_clock[j]; j++) {
- pll->link_freq = sensor->platform_data->op_sys_clock[j];
+ for (j = 0; sensor->hwcfg->op_sys_clock[j]; j++) {
+ pll->link_freq = sensor->hwcfg->op_sys_clock[j];
rval = smiapp_pll_try(sensor, pll);
dev_dbg(&client->dev, "link freq %u Hz, bpp %u %s\n",
@@ -1032,22 +1040,22 @@ static int smiapp_change_cci_addr(struct smiapp_sensor *sensor)
int rval;
u32 val;
- client->addr = sensor->platform_data->i2c_addr_dfl;
+ client->addr = sensor->hwcfg->i2c_addr_dfl;
rval = smiapp_write(sensor,
SMIAPP_REG_U8_CCI_ADDRESS_CONTROL,
- sensor->platform_data->i2c_addr_alt << 1);
+ sensor->hwcfg->i2c_addr_alt << 1);
if (rval)
return rval;
- client->addr = sensor->platform_data->i2c_addr_alt;
+ client->addr = sensor->hwcfg->i2c_addr_alt;
/* verify addr change went ok */
rval = smiapp_read(sensor, SMIAPP_REG_U8_CCI_ADDRESS_CONTROL, &val);
if (rval)
return rval;
- if (val != sensor->platform_data->i2c_addr_alt << 1)
+ if (val != sensor->hwcfg->i2c_addr_alt << 1)
return -ENODEV;
return 0;
@@ -1061,13 +1069,13 @@ static int smiapp_change_cci_addr(struct smiapp_sensor *sensor)
static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
{
struct smiapp_flash_strobe_parms *strobe_setup;
- unsigned int ext_freq = sensor->platform_data->ext_clk;
+ unsigned int ext_freq = sensor->hwcfg->ext_clk;
u32 tmp;
u32 strobe_adjustment;
u32 strobe_width_high_rs;
int rval;
- strobe_setup = sensor->platform_data->strobe_setup;
+ strobe_setup = sensor->hwcfg->strobe_setup;
/*
* How to calculate registers related to strobe length. Please
@@ -1179,7 +1187,7 @@ static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
strobe_setup->trigger);
out:
- sensor->platform_data->strobe_setup->trigger = 0;
+ sensor->hwcfg->strobe_setup->trigger = 0;
return rval;
}
@@ -1201,21 +1209,16 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
}
usleep_range(1000, 1000);
- if (sensor->platform_data->set_xclk)
- rval = sensor->platform_data->set_xclk(
- &sensor->src->sd, sensor->platform_data->ext_clk);
- else
- rval = clk_prepare_enable(sensor->ext_clk);
+ rval = clk_prepare_enable(sensor->ext_clk);
if (rval < 0) {
dev_dbg(&client->dev, "failed to enable xclk\n");
goto out_xclk_fail;
}
usleep_range(1000, 1000);
- if (gpio_is_valid(sensor->platform_data->xshutdown))
- gpio_set_value(sensor->platform_data->xshutdown, 1);
+ gpiod_set_value(sensor->xshutdown, 1);
- sleep = SMIAPP_RESET_DELAY(sensor->platform_data->ext_clk);
+ sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
usleep_range(sleep, sleep);
/*
@@ -1229,7 +1232,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
* is found.
*/
- if (sensor->platform_data->i2c_addr_alt) {
+ if (sensor->hwcfg->i2c_addr_alt) {
rval = smiapp_change_cci_addr(sensor);
if (rval) {
dev_err(&client->dev, "cci address change error\n");
@@ -1244,7 +1247,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
goto out_cci_addr_fail;
}
- if (sensor->platform_data->i2c_addr_alt) {
+ if (sensor->hwcfg->i2c_addr_alt) {
rval = smiapp_change_cci_addr(sensor);
if (rval) {
dev_err(&client->dev, "cci address change error\n");
@@ -1261,14 +1264,14 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
rval = smiapp_write(
sensor, SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ,
- sensor->platform_data->ext_clk / (1000000 / (1 << 8)));
+ sensor->hwcfg->ext_clk / (1000000 / (1 << 8)));
if (rval) {
dev_err(&client->dev, "extclk frequency set failed\n");
goto out_cci_addr_fail;
}
rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_LANE_MODE,
- sensor->platform_data->lanes - 1);
+ sensor->hwcfg->lanes - 1);
if (rval) {
dev_err(&client->dev, "csi lane mode set failed\n");
goto out_cci_addr_fail;
@@ -1282,7 +1285,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
}
rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_SIGNALLING_MODE,
- sensor->platform_data->csi_signalling_mode);
+ sensor->hwcfg->csi_signalling_mode);
if (rval) {
dev_err(&client->dev, "csi signalling mode set failed\n");
goto out_cci_addr_fail;
@@ -1322,12 +1325,8 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
return 0;
out_cci_addr_fail:
- if (gpio_is_valid(sensor->platform_data->xshutdown))
- gpio_set_value(sensor->platform_data->xshutdown, 0);
- if (sensor->platform_data->set_xclk)
- sensor->platform_data->set_xclk(&sensor->src->sd, 0);
- else
- clk_disable_unprepare(sensor->ext_clk);
+ gpiod_set_value(sensor->xshutdown, 0);
+ clk_disable_unprepare(sensor->ext_clk);
out_xclk_fail:
regulator_disable(sensor->vana);
@@ -1343,17 +1342,13 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
* really see a power off and next time the cci address change
* will fail. So do a soft reset explicitly here.
*/
- if (sensor->platform_data->i2c_addr_alt)
+ if (sensor->hwcfg->i2c_addr_alt)
smiapp_write(sensor,
SMIAPP_REG_U8_SOFTWARE_RESET,
SMIAPP_SOFTWARE_RESET);
- if (gpio_is_valid(sensor->platform_data->xshutdown))
- gpio_set_value(sensor->platform_data->xshutdown, 0);
- if (sensor->platform_data->set_xclk)
- sensor->platform_data->set_xclk(&sensor->src->sd, 0);
- else
- clk_disable_unprepare(sensor->ext_clk);
+ gpiod_set_value(sensor->xshutdown, 0);
+ clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
regulator_disable(sensor->vana);
sensor->streaming = false;
@@ -1491,8 +1486,8 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if ((sensor->limits[SMIAPP_LIMIT_FLASH_MODE_CAPABILITY] &
(SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
- sensor->platform_data->strobe_setup != NULL &&
- sensor->platform_data->strobe_setup->trigger != 0) {
+ sensor->hwcfg->strobe_setup != NULL &&
+ sensor->hwcfg->strobe_setup->trigger != 0) {
rval = smiapp_setup_flash_strobe(sensor);
if (rval)
goto out;
@@ -2309,7 +2304,7 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
if (!sensor->nvm_size) {
/* NVM not read yet - read it now */
- sensor->nvm_size = sensor->platform_data->nvm_size;
+ sensor->nvm_size = sensor->hwcfg->nvm_size;
if (smiapp_set_power(subdev, 1) < 0)
return -ENODEV;
if (smiapp_read_nvm(sensor, sensor->nvm)) {
@@ -2554,35 +2549,27 @@ static int smiapp_init(struct smiapp_sensor *sensor)
return PTR_ERR(sensor->vana);
}
- if (!sensor->platform_data->set_xclk) {
- sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock\n");
- return PTR_ERR(sensor->ext_clk);
- }
-
- rval = clk_set_rate(sensor->ext_clk,
- sensor->platform_data->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
- sensor->platform_data->ext_clk);
- return rval;
- }
+ sensor->ext_clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock (%ld)\n",
+ PTR_ERR(sensor->ext_clk));
+ return -EPROBE_DEFER;
}
- if (gpio_is_valid(sensor->platform_data->xshutdown)) {
- rval = devm_gpio_request_one(
- &client->dev, sensor->platform_data->xshutdown, 0,
- "SMIA++ xshutdown");
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to acquire reset gpio %d\n",
- sensor->platform_data->xshutdown);
- return rval;
- }
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
+ return rval;
}
+ sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->xshutdown))
+ return PTR_ERR(sensor->xshutdown);
+
rval = smiapp_power_on(sensor);
if (rval)
return -ENODEV;
@@ -2612,7 +2599,7 @@ static int smiapp_init(struct smiapp_sensor *sensor)
*
* Rotation also changes the bayer pattern.
*/
- if (sensor->platform_data->module_board_orient ==
+ if (sensor->hwcfg->module_board_orient ==
SMIAPP_MODULE_BOARD_ORIENT_180)
sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
SMIAPP_IMAGE_ORIENTATION_VFLIP;
@@ -2661,9 +2648,9 @@ static int smiapp_init(struct smiapp_sensor *sensor)
/* SMIA++ NVM initialization - it will be read from the sensor
* when it is first requested by userspace.
*/
- if (sensor->minfo.smiapp_version && sensor->platform_data->nvm_size) {
+ if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
sensor->nvm = devm_kzalloc(&client->dev,
- sensor->platform_data->nvm_size, GFP_KERNEL);
+ sensor->hwcfg->nvm_size, GFP_KERNEL);
if (sensor->nvm == NULL) {
dev_err(&client->dev, "nvm buf allocation failed\n");
rval = -ENOMEM;
@@ -2706,8 +2693,8 @@ static int smiapp_init(struct smiapp_sensor *sensor)
/* prepare PLL configuration input values */
pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
- pll->csi2.lanes = sensor->platform_data->lanes;
- pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+ pll->csi2.lanes = sensor->hwcfg->lanes;
+ pll->ext_clk_freq_hz = sensor->hwcfg->ext_clk;
pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
/* Profile 0 sensors have no separate OP clock branch. */
if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
@@ -2984,9 +2971,9 @@ static int smiapp_resume(struct device *dev)
#endif /* CONFIG_PM */
-static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
+static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
{
- struct smiapp_platform_data *pdata;
+ struct smiapp_hwconfig *hwcfg;
struct v4l2_of_endpoint *bus_cfg;
struct device_node *ep;
int i;
@@ -3003,58 +2990,55 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
if (IS_ERR(bus_cfg))
goto out_err;
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ hwcfg = devm_kzalloc(dev, sizeof(*hwcfg), GFP_KERNEL);
+ if (!hwcfg)
goto out_err;
switch (bus_cfg->bus_type) {
case V4L2_MBUS_CSI2:
- pdata->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
+ hwcfg->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
break;
/* FIXME: add CCP2 support. */
default:
goto out_err;
}
- pdata->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
- dev_dbg(dev, "lanes %u\n", pdata->lanes);
-
- /* xshutdown GPIO is optional */
- pdata->xshutdown = of_get_named_gpio(dev->of_node, "reset-gpios", 0);
+ hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
+ dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
/* NVM size is not mandatory */
of_property_read_u32(dev->of_node, "nokia,nvm-size",
- &pdata->nvm_size);
+ &hwcfg->nvm_size);
rval = of_property_read_u32(dev->of_node, "clock-frequency",
- &pdata->ext_clk);
+ &hwcfg->ext_clk);
if (rval) {
dev_warn(dev, "can't get clock-frequency\n");
goto out_err;
}
- dev_dbg(dev, "reset %d, nvm %d, clk %d, csi %d\n", pdata->xshutdown,
- pdata->nvm_size, pdata->ext_clk, pdata->csi_signalling_mode);
+ dev_dbg(dev, "nvm %d, clk %d, csi %d\n", hwcfg->nvm_size,
+ hwcfg->ext_clk, hwcfg->csi_signalling_mode);
if (!bus_cfg->nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
goto out_err;
}
- pdata->op_sys_clock = devm_kcalloc(
+ hwcfg->op_sys_clock = devm_kcalloc(
dev, bus_cfg->nr_of_link_frequencies + 1 /* guardian */,
- sizeof(*pdata->op_sys_clock), GFP_KERNEL);
- if (!pdata->op_sys_clock)
+ sizeof(*hwcfg->op_sys_clock), GFP_KERNEL);
+ if (!hwcfg->op_sys_clock)
goto out_err;
for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
- pdata->op_sys_clock[i] = bus_cfg->link_frequencies[i];
- dev_dbg(dev, "freq %d: %lld\n", i, pdata->op_sys_clock[i]);
+ hwcfg->op_sys_clock[i] = bus_cfg->link_frequencies[i];
+ dev_dbg(dev, "freq %d: %lld\n", i, hwcfg->op_sys_clock[i]);
}
v4l2_of_free_endpoint(bus_cfg);
of_node_put(ep);
- return pdata;
+ return hwcfg;
out_err:
v4l2_of_free_endpoint(bus_cfg);
@@ -3066,17 +3050,17 @@ static int smiapp_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
struct smiapp_sensor *sensor;
- struct smiapp_platform_data *pdata = smiapp_get_pdata(&client->dev);
+ struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
int rval;
- if (pdata == NULL)
+ if (hwcfg == NULL)
return -ENODEV;
sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL)
return -ENOMEM;
- sensor->platform_data = pdata;
+ sensor->hwcfg = hwcfg;
mutex_init(&sensor->mutex);
mutex_init(&sensor->power_mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
@@ -3119,12 +3103,8 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
if (sensor->power_count) {
- if (gpio_is_valid(sensor->platform_data->xshutdown))
- gpio_set_value(sensor->platform_data->xshutdown, 0);
- if (sensor->platform_data->set_xclk)
- sensor->platform_data->set_xclk(&sensor->src->sd, 0);
- else
- clk_disable_unprepare(sensor->ext_clk);
+ gpiod_set_value(sensor->xshutdown, 0);
+ clk_disable_unprepare(sensor->ext_clk);
sensor->power_count = 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index abf9ea7a0fb7..cb128eae9c54 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -26,7 +26,7 @@ static int smiapp_write_8(struct smiapp_sensor *sensor, u16 reg, u8 val)
}
static int smiapp_write_8s(struct smiapp_sensor *sensor,
- struct smiapp_reg_8 *regs, int len)
+ const struct smiapp_reg_8 *regs, int len)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
@@ -71,7 +71,7 @@ static int jt8ew9_limits(struct smiapp_sensor *sensor)
static int jt8ew9_post_poweron(struct smiapp_sensor *sensor)
{
- struct smiapp_reg_8 regs[] = {
+ const struct smiapp_reg_8 regs[] = {
{ 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */
{ 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
{ 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
@@ -115,7 +115,7 @@ const struct smiapp_quirk smiapp_jt8ew9_quirk = {
static int imx125es_post_poweron(struct smiapp_sensor *sensor)
{
/* Taken from v02. No idea what the other two are. */
- struct smiapp_reg_8 regs[] = {
+ const struct smiapp_reg_8 regs[] = {
/*
* 0x3302: clk during frame blanking:
* 0x00 - HS mode, 0x01 - LP11
@@ -145,8 +145,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
-
- struct smiapp_reg_8 regs[] = {
+ const struct smiapp_reg_8 regs[] = {
{ 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */
{ 0x30a3, 0xd0 }, /* FLASH STROBE enable */
{ 0x3237, 0x00 }, /* For control of pulse timing for ADC */
@@ -167,8 +166,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
{ 0x33cf, 0xec }, /* For Black sun */
{ 0x3328, 0x80 }, /* Ugh. No idea what's this. */
};
-
- struct smiapp_reg_8 regs_96[] = {
+ const struct smiapp_reg_8 regs_96[] = {
{ 0x30ae, 0x00 }, /* For control of ADC clock */
{ 0x30af, 0xd0 },
{ 0x30b0, 0x01 },
@@ -178,13 +176,13 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
if (rval < 0)
return rval;
- switch (sensor->platform_data->ext_clk) {
+ switch (sensor->hwcfg->ext_clk) {
case 9600000:
return smiapp_write_8s(sensor, regs_96,
ARRAY_SIZE(regs_96));
default:
dev_warn(&client->dev, "no MSRs for %d Hz ext_clk\n",
- sensor->platform_data->ext_clk);
+ sensor->hwcfg->ext_clk);
return 0;
}
}
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 6b6c20b61397..1e501c06d18c 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -188,7 +188,8 @@ int smiapp_read_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 *val)
SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY));
}
-int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+static int smiapp_read_quirk(struct smiapp_sensor *sensor, u32 reg, u32 *val,
+ bool force8)
{
int rval;
@@ -199,21 +200,20 @@ int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
if (rval < 0)
return rval;
+ if (force8)
+ return __smiapp_read(sensor, reg, val, true);
+
return smiapp_read_no_quirk(sensor, reg, val);
}
-int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
{
- int rval;
-
- *val = 0;
- rval = smiapp_call_quirk(sensor, reg_access, false, &reg, val);
- if (rval == -ENOIOCTLCMD)
- return 0;
- if (rval < 0)
- return rval;
+ return smiapp_read_quirk(sensor, reg, val, false);
+}
- return __smiapp_read(sensor, reg, val, true);
+int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+ return smiapp_read_quirk(sensor, reg, val, true);
}
int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index 2174f89a00db..aae72bc87bf7 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -151,7 +151,7 @@ struct smiapp_csi_data_format {
#define SMIAPP_PADS 2
#define SMIAPP_COMPRESSED_BASE 8
-#define SMIAPP_COMPRESSED_MAX 12
+#define SMIAPP_COMPRESSED_MAX 16
#define SMIAPP_NR_OF_COMPRESSED (SMIAPP_COMPRESSED_MAX - \
SMIAPP_COMPRESSED_BASE + 1)
@@ -197,9 +197,10 @@ struct smiapp_sensor {
struct smiapp_subdev *binner;
struct smiapp_subdev *scaler;
struct smiapp_subdev *pixel_array;
- struct smiapp_platform_data *platform_data;
+ struct smiapp_hwconfig *hwcfg;
struct regulator *vana;
struct clk *ext_clk;
+ struct gpio_desc *xshutdown;
u32 limits[SMIAPP_LIMIT_LAST];
u8 nbinning_subtypes;
struct smiapp_binning_subtype binning_subtypes[SMIAPP_BINNING_SUBTYPES];
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig
index 23d352f0adf0..7704bcf5cc25 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/media/i2c/soc_camera/Kconfig
@@ -14,11 +14,14 @@ config SOC_CAMERA_MT9M001
and colour models.
config SOC_CAMERA_MT9M111
- tristate "mt9m111, mt9m112 and mt9m131 support"
+ tristate "legacy soc_camera mt9m111, mt9m112 and mt9m131 support"
depends on SOC_CAMERA && I2C
+ select VIDEO_MT9M111
help
This driver supports MT9M111, MT9M112 and MT9M131 cameras from
- Micron/Aptina
+ Micron/Aptina.
+ This is the legacy configuration which shouldn't be used anymore,
+ while VIDEO_MT9M111 should be used instead.
config SOC_CAMERA_MT9T031
tristate "mt9t031 support"
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index d0421feaa796..6f994f9353a0 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
-obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index f68c2352c63c..05b55cfe8147 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -209,31 +209,26 @@ static int imx074_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int imx074_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int imx074_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct v4l2_rect *rect = &a->c;
-
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- rect->top = 0;
- rect->left = 0;
- rect->width = IMX074_WIDTH;
- rect->height = IMX074_HEIGHT;
-
- return 0;
-}
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
-static int imx074_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = IMX074_WIDTH;
- a->bounds.height = IMX074_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = IMX074_WIDTH;
+ sel->r.height = IMX074_HEIGHT;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int imx074_enum_mbus_code(struct v4l2_subdev *sd,
@@ -278,8 +273,6 @@ static int imx074_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
.s_stream = imx074_s_stream,
- .g_crop = imx074_g_crop,
- .cropcap = imx074_cropcap,
.g_mbus_config = imx074_g_mbus_config,
};
@@ -289,6 +282,7 @@ static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
static const struct v4l2_subdev_pad_ops imx074_subdev_pad_ops = {
.enum_mbus_code = imx074_enum_mbus_code,
+ .get_selection = imx074_get_selection,
.get_fmt = imx074_get_fmt,
.set_fmt = imx074_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 69becc358659..3d6378d4491c 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -171,13 +171,19 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9m001_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9m001_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct v4l2_rect rect = a->c;
- int ret;
+ struct v4l2_rect rect = sel->r;
const u16 hblank = 9, vblank = 25;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
if (mt9m001->fmts == mt9m001_colour_fmts)
/*
@@ -225,29 +231,30 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return ret;
}
-static int mt9m001_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9m001_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
- a->c = mt9m001->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = MT9M001_COLUMN_SKIP;
- a->bounds.top = MT9M001_ROW_SKIP;
- a->bounds.width = MT9M001_MAX_WIDTH;
- a->bounds.height = MT9M001_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = MT9M001_COLUMN_SKIP;
+ sel->r.top = MT9M001_ROW_SKIP;
+ sel->r.width = MT9M001_MAX_WIDTH;
+ sel->r.height = MT9M001_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = mt9m001->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int mt9m001_get_fmt(struct v4l2_subdev *sd,
@@ -275,18 +282,18 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct v4l2_crop a = {
- .c = {
- .left = mt9m001->rect.left,
- .top = mt9m001->rect.top,
- .width = mf->width,
- .height = mf->height,
- },
+ struct v4l2_subdev_selection sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ .r.left = mt9m001->rect.left,
+ .r.top = mt9m001->rect.top,
+ .r.width = mf->width,
+ .r.height = mf->height,
};
int ret;
/* No support for scaling so far, just crop. TODO: use skipping */
- ret = mt9m001_s_crop(sd, &a);
+ ret = mt9m001_set_selection(sd, NULL, &sel);
if (!ret) {
mf->width = mt9m001->rect.width;
mf->height = mt9m001->rect.height;
@@ -625,9 +632,6 @@ static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
- .s_crop = mt9m001_s_crop,
- .g_crop = mt9m001_g_crop,
- .cropcap = mt9m001_cropcap,
.g_mbus_config = mt9m001_g_mbus_config,
.s_mbus_config = mt9m001_s_mbus_config,
};
@@ -638,6 +642,8 @@ static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
.enum_mbus_code = mt9m001_enum_mbus_code,
+ .get_selection = mt9m001_get_selection,
+ .set_selection = mt9m001_set_selection,
.get_fmt = mt9m001_get_fmt,
.set_fmt = mt9m001_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 5c8e3ffe3b27..3aa5569065ad 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -264,7 +264,7 @@ static int mt9t031_set_params(struct i2c_client *client,
/*
* The caller provides a supported format, as guaranteed by
- * .set_fmt(FORMAT_TRY), soc_camera_s_crop() and soc_camera_cropcap()
+ * .set_fmt(FORMAT_TRY), soc_camera_s_selection() and soc_camera_cropcap()
*/
if (ret >= 0)
ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
@@ -294,11 +294,17 @@ static int mt9t031_set_params(struct i2c_client *client,
return ret < 0 ? ret : 0;
}
-static int mt9t031_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9t031_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct v4l2_rect rect = a->c;
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
+ struct v4l2_rect rect = sel->r;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
rect.width = ALIGN(rect.width, 2);
rect.height = ALIGN(rect.height, 2);
@@ -312,29 +318,30 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip);
}
-static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9t031_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
- a->c = mt9t031->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = MT9T031_COLUMN_SKIP;
- a->bounds.top = MT9T031_ROW_SKIP;
- a->bounds.width = MT9T031_MAX_WIDTH;
- a->bounds.height = MT9T031_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = MT9T031_COLUMN_SKIP;
+ sel->r.top = MT9T031_ROW_SKIP;
+ sel->r.width = MT9T031_MAX_WIDTH;
+ sel->r.height = MT9T031_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = mt9t031->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int mt9t031_get_fmt(struct v4l2_subdev *sd,
@@ -721,9 +728,6 @@ static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.s_stream = mt9t031_s_stream,
- .s_crop = mt9t031_s_crop,
- .g_crop = mt9t031_g_crop,
- .cropcap = mt9t031_cropcap,
.g_mbus_config = mt9t031_g_mbus_config,
.s_mbus_config = mt9t031_s_mbus_config,
};
@@ -734,6 +738,8 @@ static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
static const struct v4l2_subdev_pad_ops mt9t031_subdev_pad_ops = {
.enum_mbus_code = mt9t031_enum_mbus_code,
+ .get_selection = mt9t031_get_selection,
+ .set_selection = mt9t031_set_selection,
.get_fmt = mt9t031_get_fmt,
.set_fmt = mt9t031_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 6a1b2a9f9a09..2ef22241ec14 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -867,39 +867,48 @@ static int mt9t112_set_params(struct mt9t112_priv *priv,
return 0;
}
-static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = MAX_WIDTH;
- a->bounds.height = MAX_HEIGHT;
- a->defrect.left = 0;
- a->defrect.top = 0;
- a->defrect.width = VGA_WIDTH;
- a->defrect.height = VGA_HEIGHT;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
-}
-
-static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9t112_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
- a->c = priv->frame;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = MAX_WIDTH;
+ sel->r.height = MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = VGA_WIDTH;
+ sel->r.height = VGA_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = priv->frame;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static int mt9t112_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9t112_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t112_priv *priv = to_mt9t112(client);
- const struct v4l2_rect *rect = &a->c;
+ const struct v4l2_rect *rect = &sel->r;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
return mt9t112_set_params(priv, rect, priv->format->code);
}
@@ -1024,15 +1033,14 @@ static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
.s_stream = mt9t112_s_stream,
- .cropcap = mt9t112_cropcap,
- .g_crop = mt9t112_g_crop,
- .s_crop = mt9t112_s_crop,
.g_mbus_config = mt9t112_g_mbus_config,
.s_mbus_config = mt9t112_s_mbus_config,
};
static const struct v4l2_subdev_pad_ops mt9t112_subdev_pad_ops = {
.enum_mbus_code = mt9t112_enum_mbus_code,
+ .get_selection = mt9t112_get_selection,
+ .set_selection = mt9t112_set_selection,
.get_fmt = mt9t112_get_fmt,
.set_fmt = mt9t112_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 2721e583bfa0..6a14ab5e4f2d 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -276,14 +276,20 @@ static int mt9v022_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int mt9v022_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct v4l2_rect rect = a->c;
+ struct v4l2_rect rect = sel->r;
int min_row, min_blank;
int ret;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
/* Bayer format - even size lengths */
if (mt9v022->fmts == mt9v022_colour_fmts) {
rect.width = ALIGN(rect.width, 2);
@@ -350,29 +356,30 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return 0;
}
-static int mt9v022_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int mt9v022_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- a->c = mt9v022->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = MT9V022_COLUMN_SKIP;
- a->bounds.top = MT9V022_ROW_SKIP;
- a->bounds.width = MT9V022_MAX_WIDTH;
- a->bounds.height = MT9V022_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = MT9V022_COLUMN_SKIP;
+ sel->r.top = MT9V022_ROW_SKIP;
+ sel->r.width = MT9V022_MAX_WIDTH;
+ sel->r.height = MT9V022_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = mt9v022->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int mt9v022_get_fmt(struct v4l2_subdev *sd,
@@ -400,13 +407,13 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct v4l2_crop a = {
- .c = {
- .left = mt9v022->rect.left,
- .top = mt9v022->rect.top,
- .width = mf->width,
- .height = mf->height,
- },
+ struct v4l2_subdev_selection sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ .r.left = mt9v022->rect.left,
+ .r.top = mt9v022->rect.top,
+ .r.width = mf->width,
+ .r.height = mf->height,
};
int ret;
@@ -430,7 +437,7 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
}
/* No support for scaling on this camera, just crop. */
- ret = mt9v022_s_crop(sd, &a);
+ ret = mt9v022_set_selection(sd, NULL, &sel);
if (!ret) {
mf->width = mt9v022->rect.width;
mf->height = mt9v022->rect.height;
@@ -853,9 +860,6 @@ static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.s_stream = mt9v022_s_stream,
- .s_crop = mt9v022_s_crop,
- .g_crop = mt9v022_g_crop,
- .cropcap = mt9v022_cropcap,
.g_mbus_config = mt9v022_g_mbus_config,
.s_mbus_config = mt9v022_s_mbus_config,
};
@@ -866,6 +870,8 @@ static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
static const struct v4l2_subdev_pad_ops mt9v022_subdev_pad_ops = {
.enum_mbus_code = mt9v022_enum_mbus_code,
+ .get_selection = mt9v022_get_selection,
+ .set_selection = mt9v022_set_selection,
.get_fmt = mt9v022_get_fmt,
.set_fmt = mt9v022_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
index 9b4f5deec748..56de18263359 100644
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ b/drivers/media/i2c/soc_camera/ov2640.c
@@ -928,29 +928,25 @@ static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ov2640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = UXGA_WIDTH;
- a->c.height = UXGA_HEIGHT;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int ov2640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ov2640_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = UXGA_WIDTH;
- a->bounds.height = UXGA_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = UXGA_WIDTH;
+ sel->r.height = UXGA_HEIGHT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ov2640_video_probe(struct i2c_client *client)
@@ -1024,13 +1020,12 @@ static int ov2640_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
.s_stream = ov2640_s_stream,
- .cropcap = ov2640_cropcap,
- .g_crop = ov2640_g_crop,
.g_mbus_config = ov2640_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
.enum_mbus_code = ov2640_enum_mbus_code,
+ .get_selection = ov2640_get_selection,
.get_fmt = ov2640_get_fmt,
.set_fmt = ov2640_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index bab9ac0c1764..3d185bd622a3 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -850,13 +850,19 @@ static int ov5642_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ov5642_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int ov5642_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov5642 *priv = to_ov5642(client);
- struct v4l2_rect rect = a->c;
+ struct v4l2_rect rect = sel->r;
int ret;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
v4l_bound_align_image(&rect.width, 48, OV5642_MAX_WIDTH, 1,
&rect.height, 32, OV5642_MAX_HEIGHT, 1, 0);
@@ -878,32 +884,30 @@ static int ov5642_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return ret;
}
-static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov5642_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov5642 *priv = to_ov5642(client);
- struct v4l2_rect *rect = &a->c;
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- *rect = priv->crop_rect;
-
- return 0;
-}
-
-static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = OV5642_MAX_WIDTH;
- a->bounds.height = OV5642_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = OV5642_MAX_WIDTH;
+ sel->r.height = OV5642_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = priv->crop_rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ov5642_g_mbus_config(struct v4l2_subdev *sd,
@@ -940,14 +944,13 @@ static int ov5642_s_power(struct v4l2_subdev *sd, int on)
}
static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
- .s_crop = ov5642_s_crop,
- .g_crop = ov5642_g_crop,
- .cropcap = ov5642_cropcap,
.g_mbus_config = ov5642_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov5642_subdev_pad_ops = {
.enum_mbus_code = ov5642_enum_mbus_code,
+ .get_selection = ov5642_get_selection,
+ .set_selection = ov5642_set_selection,
.get_fmt = ov5642_get_fmt,
.set_fmt = ov5642_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 1f8af1ee8352..4bf2995e1cb8 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -432,25 +432,43 @@ static int ov6650_s_power(struct v4l2_subdev *sd, int on)
return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
}
-static int ov6650_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov6650_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->c = priv->rect;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = DEF_HSTRT << 1;
+ sel->r.top = DEF_VSTRT << 1;
+ sel->r.width = W_CIF;
+ sel->r.height = H_CIF;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = priv->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
-static int ov6650_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int ov6650_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = a->c;
+ struct v4l2_rect rect = sel->r;
int ret;
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
rect.left = ALIGN(rect.left, 2);
@@ -483,22 +501,6 @@ static int ov6650_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return ret;
}
-static int ov6650_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- a->bounds.left = DEF_HSTRT << 1;
- a->bounds.top = DEF_VSTRT << 1;
- a->bounds.width = W_CIF;
- a->bounds.height = H_CIF;
- a->defrect = a->bounds;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
-}
-
static int ov6650_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -549,16 +551,15 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
- struct v4l2_crop a = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .c = {
- .left = priv->rect.left + (priv->rect.width >> 1) -
- (mf->width >> (1 - half_scale)),
- .top = priv->rect.top + (priv->rect.height >> 1) -
- (mf->height >> (1 - half_scale)),
- .width = mf->width << half_scale,
- .height = mf->height << half_scale,
- },
+ struct v4l2_subdev_selection sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ .r.left = priv->rect.left + (priv->rect.width >> 1) -
+ (mf->width >> (1 - half_scale)),
+ .r.top = priv->rect.top + (priv->rect.height >> 1) -
+ (mf->height >> (1 - half_scale)),
+ .r.width = mf->width << half_scale,
+ .r.height = mf->height << half_scale,
};
u32 code = mf->code;
unsigned long mclk, pclk;
@@ -672,7 +673,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
mclk / pclk, 10 * mclk % pclk / pclk);
- ret = ov6650_s_crop(sd, &a);
+ ret = ov6650_set_selection(sd, NULL, &sel);
if (!ret)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
if (!ret)
@@ -943,9 +944,6 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
- .cropcap = ov6650_cropcap,
- .g_crop = ov6650_g_crop,
- .s_crop = ov6650_s_crop,
.g_parm = ov6650_g_parm,
.s_parm = ov6650_s_parm,
.g_mbus_config = ov6650_g_mbus_config,
@@ -954,6 +952,8 @@ static struct v4l2_subdev_video_ops ov6650_video_ops = {
static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
.enum_mbus_code = ov6650_enum_mbus_code,
+ .get_selection = ov6650_get_selection,
+ .set_selection = ov6650_set_selection,
.get_fmt = ov6650_get_fmt,
.set_fmt = ov6650_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index a43410c1e254..7e68762b3a4b 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -851,29 +851,28 @@ ov772x_set_fmt_error:
return ret;
}
-static int ov772x_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = VGA_WIDTH;
- a->c.height = VGA_HEIGHT;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ov772x_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = OV772X_MAX_WIDTH;
- a->bounds.height = OV772X_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.width = OV772X_MAX_WIDTH;
+ sel->r.height = OV772X_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.width = VGA_WIDTH;
+ sel->r.height = VGA_HEIGHT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ov772x_get_fmt(struct v4l2_subdev *sd,
@@ -1030,13 +1029,12 @@ static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.s_stream = ov772x_s_stream,
- .cropcap = ov772x_cropcap,
- .g_crop = ov772x_g_crop,
.g_mbus_config = ov772x_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
.enum_mbus_code = ov772x_enum_mbus_code,
+ .get_selection = ov772x_get_selection,
.get_fmt = ov772x_get_fmt,
.set_fmt = ov772x_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 8caae1c07541..8c93c57af71c 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -561,29 +561,25 @@ static int ov9640_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = W_SXGA;
- a->c.height = H_SXGA;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int ov9640_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int ov9640_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = W_SXGA;
- a->bounds.height = H_SXGA;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.width = W_SXGA;
+ sel->r.height = H_SXGA;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ov9640_video_probe(struct i2c_client *client)
@@ -667,13 +663,12 @@ static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops ov9640_video_ops = {
.s_stream = ov9640_s_stream,
- .cropcap = ov9640_cropcap,
- .g_crop = ov9640_g_crop,
.g_mbus_config = ov9640_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov9640_pad_ops = {
.enum_mbus_code = ov9640_enum_mbus_code,
+ .get_selection = ov9640_get_selection,
.set_fmt = ov9640_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 03a7fc7316ae..0da632d7d33a 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -737,29 +737,25 @@ static int ov9740_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int ov9740_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = OV9740_MAX_WIDTH;
- a->bounds.height = OV9740_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
-}
-
-static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int ov9740_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = OV9740_MAX_WIDTH;
- a->c.height = OV9740_MAX_HEIGHT;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = OV9740_MAX_WIDTH;
+ sel->r.height = OV9740_MAX_HEIGHT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
/* Set status of additional camera capabilities */
@@ -914,8 +910,6 @@ static int ov9740_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops ov9740_video_ops = {
.s_stream = ov9740_s_stream,
- .cropcap = ov9740_cropcap,
- .g_crop = ov9740_g_crop,
.g_mbus_config = ov9740_g_mbus_config,
};
@@ -929,6 +923,7 @@ static struct v4l2_subdev_core_ops ov9740_core_ops = {
static const struct v4l2_subdev_pad_ops ov9740_pad_ops = {
.enum_mbus_code = ov9740_enum_mbus_code,
+ .get_selection = ov9740_get_selection,
.set_fmt = ov9740_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index aa7bfbb4ad71..bc8ec59a3fbd 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -538,15 +538,21 @@ static int rj54n1_commit(struct i2c_client *client)
static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h,
s32 *out_w, s32 *out_h);
-static int rj54n1_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int rj54n1_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
- const struct v4l2_rect *rect = &a->c;
+ const struct v4l2_rect *rect = &sel->r;
int dummy = 0, output_w, output_h,
input_w = rect->width, input_h = rect->height;
int ret;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
/* arbitrary minimum width and height, edges unimportant */
soc_camera_limit_side(&dummy, &input_w,
RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
@@ -573,29 +579,30 @@ static int rj54n1_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return 0;
}
-static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int rj54n1_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct rj54n1 *rj54n1 = to_rj54n1(client);
- a->c = rj54n1->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- a->bounds.left = RJ54N1_COLUMN_SKIP;
- a->bounds.top = RJ54N1_ROW_SKIP;
- a->bounds.width = RJ54N1_MAX_WIDTH;
- a->bounds.height = RJ54N1_MAX_HEIGHT;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = RJ54N1_COLUMN_SKIP;
+ sel->r.top = RJ54N1_ROW_SKIP;
+ sel->r.width = RJ54N1_MAX_WIDTH;
+ sel->r.height = RJ54N1_MAX_HEIGHT;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = rj54n1->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int rj54n1_get_fmt(struct v4l2_subdev *sd,
@@ -1246,15 +1253,14 @@ static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.s_stream = rj54n1_s_stream,
- .g_crop = rj54n1_g_crop,
- .s_crop = rj54n1_s_crop,
- .cropcap = rj54n1_cropcap,
.g_mbus_config = rj54n1_g_mbus_config,
.s_mbus_config = rj54n1_s_mbus_config,
};
static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
.enum_mbus_code = rj54n1_enum_mbus_code,
+ .get_selection = rj54n1_get_selection,
+ .set_selection = rj54n1_set_selection,
.get_fmt = rj54n1_get_fmt,
.set_fmt = rj54n1_set_fmt,
};
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 06aff81787a7..4002c07f3857 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -676,44 +676,28 @@ tw9910_set_fmt_error:
return ret;
}
-static int tw9910_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+static int tw9910_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct tw9910_priv *priv = to_tw9910(client);
- a->c.left = 0;
- a->c.top = 0;
- if (priv->norm & V4L2_STD_NTSC) {
- a->c.width = 640;
- a->c.height = 480;
- } else {
- a->c.width = 768;
- a->c.height = 576;
- }
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct tw9910_priv *priv = to_tw9910(client);
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+ /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
+ if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
+ return -EINVAL;
- a->bounds.left = 0;
- a->bounds.top = 0;
+ sel->r.left = 0;
+ sel->r.top = 0;
if (priv->norm & V4L2_STD_NTSC) {
- a->bounds.width = 640;
- a->bounds.height = 480;
+ sel->r.width = 640;
+ sel->r.height = 480;
} else {
- a->bounds.width = 768;
- a->bounds.height = 576;
+ sel->r.width = 768;
+ sel->r.height = 576;
}
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
return 0;
}
@@ -921,8 +905,6 @@ static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
.s_std = tw9910_s_std,
.g_std = tw9910_g_std,
.s_stream = tw9910_s_stream,
- .cropcap = tw9910_cropcap,
- .g_crop = tw9910_g_crop,
.g_mbus_config = tw9910_g_mbus_config,
.s_mbus_config = tw9910_s_mbus_config,
.g_tvnorms = tw9910_g_tvnorms,
@@ -930,6 +912,7 @@ static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
static const struct v4l2_subdev_pad_ops tw9910_subdev_pad_ops = {
.enum_mbus_code = tw9910_enum_mbus_code,
+ .get_selection = tw9910_get_selection,
.get_fmt = tw9910_get_fmt,
.set_fmt = tw9910_set_fmt,
};
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 73fc42bc2de6..42340e364cea 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -499,7 +499,6 @@ MODULE_DEVICE_TABLE(of, ths8200_of_match);
static struct i2c_driver ths8200_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "ths8200",
.of_match_table = of_match_ptr(ths8200_of_match),
},
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index 0370dd89f1fc..2e06c06cac9b 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -210,7 +210,6 @@ MODULE_DEVICE_TABLE(i2c, tlv320aic23b_id);
static struct i2c_driver tlv320aic23b_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "tlv320aic23b",
},
.probe = tlv320aic23b_probe,
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 7cdd94842938..d5c9347f4c6d 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -977,7 +977,7 @@ static const struct v4l2_subdev_ops tvp514x_ops = {
.pad = &tvp514x_pad_ops,
};
-static struct tvp514x_decoder tvp514x_dev = {
+static const struct tvp514x_decoder tvp514x_dev = {
.streaming = 0,
.fmt_list = tvp514x_fmt_list,
.num_fmts = ARRAY_SIZE(tvp514x_fmt_list),
@@ -1233,7 +1233,6 @@ MODULE_DEVICE_TABLE(of, tvp514x_of_match);
static struct i2c_driver tvp514x_driver = {
.driver = {
.of_match_table = of_match_ptr(tvp514x_of_match),
- .owner = THIS_MODULE,
.name = TVP514X_MODULE_NAME,
},
.probe = tvp514x_probe,
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 0b6d46c453bf..4740da39d698 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -871,19 +871,22 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+static int tvp5150_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct v4l2_rect rect = a->c;
struct tvp5150 *decoder = to_tvp5150(sd);
+ struct v4l2_rect rect = sel->r;
v4l2_std_id std;
- unsigned int hmax;
+ int hmax;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
__func__, rect.left, rect.top, rect.width, rect.height);
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
/* tvp5150 has some special limits */
rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
rect.width = clamp_t(unsigned int, rect.width,
@@ -924,44 +927,39 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
return 0;
}
-static int tvp5150_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
-{
- struct tvp5150 *decoder = to_tvp5150(sd);
-
- a->c = decoder->rect;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+static int tvp5150_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
- struct tvp5150 *decoder = to_tvp5150(sd);
+ struct tvp5150 *decoder = container_of(sd, struct tvp5150, sd);
v4l2_std_id std;
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
return -EINVAL;
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = TVP5150_H_MAX;
-
- /* Calculate height based on current standard */
- if (decoder->norm == V4L2_STD_ALL)
- std = tvp5150_read_std(sd);
- else
- std = decoder->norm;
-
- if (std & V4L2_STD_525_60)
- a->bounds.height = TVP5150_V_MAX_525_60;
- else
- a->bounds.height = TVP5150_V_MAX_OTHERS;
-
- a->defrect = a->bounds;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
-
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = TVP5150_H_MAX;
+
+ /* Calculate height based on current standard */
+ if (decoder->norm == V4L2_STD_ALL)
+ std = tvp5150_read_std(sd);
+ else
+ std = decoder->norm;
+ if (std & V4L2_STD_525_60)
+ sel->r.height = TVP5150_V_MAX_525_60;
+ else
+ sel->r.height = TVP5150_V_MAX_OTHERS;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = decoder->rect;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
@@ -1173,7 +1171,7 @@ static int tvp5150_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
}
-static int tvp5150_registered_async(struct v4l2_subdev *sd)
+static int tvp5150_registered(struct v4l2_subdev *sd)
{
#ifdef CONFIG_MEDIA_CONTROLLER
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1222,7 +1220,6 @@ static const struct v4l2_subdev_core_ops tvp5150_core_ops = {
.g_register = tvp5150_g_register,
.s_register = tvp5150_s_register,
#endif
- .registered_async = tvp5150_registered_async,
};
static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
@@ -1233,9 +1230,6 @@ static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.s_std = tvp5150_s_std,
.s_stream = tvp5150_s_stream,
.s_routing = tvp5150_s_routing,
- .s_crop = tvp5150_s_crop,
- .g_crop = tvp5150_g_crop,
- .cropcap = tvp5150_cropcap,
.g_mbus_config = tvp5150_g_mbus_config,
};
@@ -1251,6 +1245,8 @@ static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
.enum_frame_size = tvp5150_enum_frame_size,
.set_fmt = tvp5150_fill_fmt,
.get_fmt = tvp5150_fill_fmt,
+ .get_selection = tvp5150_get_selection,
+ .set_selection = tvp5150_set_selection,
};
static const struct v4l2_subdev_ops tvp5150_ops = {
@@ -1261,6 +1257,10 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
.pad = &tvp5150_pad_ops,
};
+static const struct v4l2_subdev_internal_ops tvp5150_internal_ops = {
+ .registered = tvp5150_registered,
+};
+
/****************************************************************************
I2C Client & Driver
@@ -1474,6 +1474,7 @@ static int tvp5150_probe(struct i2c_client *c,
}
v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+ sd->internal_ops = &tvp5150_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
#if defined(CONFIG_MEDIA_CONTROLLER)
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4df640c3aa40..3dc3341c4896 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1086,7 +1086,6 @@ MODULE_DEVICE_TABLE(of, tvp7002_of_match);
static struct i2c_driver tvp7002_driver = {
.driver = {
.of_match_table = of_match_ptr(tvp7002_of_match),
- .owner = THIS_MODULE,
.name = TVP7002_MODULE_NAME,
},
.probe = tvp7002_probe,
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 4c72a18c0b8c..be4cb7a8bdeb 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -863,7 +863,6 @@ MODULE_DEVICE_TABLE(i2c, vs6624_id);
static struct i2c_driver vs6624_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = "vs6624",
},
.probe = vs6624_probe,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 1795abeda658..2783531f9fc0 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -59,27 +59,24 @@ static int media_device_close(struct file *filp)
}
static int media_device_get_info(struct media_device *dev,
- struct media_device_info __user *__info)
+ struct media_device_info *info)
{
- struct media_device_info info;
-
- memset(&info, 0, sizeof(info));
+ memset(info, 0, sizeof(*info));
if (dev->driver_name[0])
- strlcpy(info.driver, dev->driver_name, sizeof(info.driver));
+ strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
else
- strlcpy(info.driver, dev->dev->driver->name, sizeof(info.driver));
+ strlcpy(info->driver, dev->dev->driver->name,
+ sizeof(info->driver));
- strlcpy(info.model, dev->model, sizeof(info.model));
- strlcpy(info.serial, dev->serial, sizeof(info.serial));
- strlcpy(info.bus_info, dev->bus_info, sizeof(info.bus_info));
+ strlcpy(info->model, dev->model, sizeof(info->model));
+ strlcpy(info->serial, dev->serial, sizeof(info->serial));
+ strlcpy(info->bus_info, dev->bus_info, sizeof(info->bus_info));
- info.media_version = MEDIA_API_VERSION;
- info.hw_revision = dev->hw_revision;
- info.driver_version = dev->driver_version;
+ info->media_version = MEDIA_API_VERSION;
+ info->hw_revision = dev->hw_revision;
+ info->driver_version = dev->driver_version;
- if (copy_to_user(__info, &info, sizeof(*__info)))
- return -EFAULT;
return 0;
}
@@ -101,29 +98,25 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id)
}
static long media_device_enum_entities(struct media_device *mdev,
- struct media_entity_desc __user *uent)
+ struct media_entity_desc *entd)
{
struct media_entity *ent;
- struct media_entity_desc u_ent;
-
- memset(&u_ent, 0, sizeof(u_ent));
- if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
- return -EFAULT;
-
- ent = find_entity(mdev, u_ent.id);
+ ent = find_entity(mdev, entd->id);
if (ent == NULL)
return -EINVAL;
- u_ent.id = media_entity_id(ent);
+ memset(entd, 0, sizeof(*entd));
+
+ entd->id = media_entity_id(ent);
if (ent->name)
- strlcpy(u_ent.name, ent->name, sizeof(u_ent.name));
- u_ent.type = ent->function;
- u_ent.revision = 0; /* Unused */
- u_ent.flags = ent->flags;
- u_ent.group_id = 0; /* Unused */
- u_ent.pads = ent->num_pads;
- u_ent.links = ent->num_links - ent->num_backlinks;
+ strlcpy(entd->name, ent->name, sizeof(entd->name));
+ entd->type = ent->function;
+ entd->revision = 0; /* Unused */
+ entd->flags = ent->flags;
+ entd->group_id = 0; /* Unused */
+ entd->pads = ent->num_pads;
+ entd->links = ent->num_links - ent->num_backlinks;
/*
* Workaround for a bug at media-ctl <= v1.10 that makes it to
@@ -139,14 +132,13 @@ static long media_device_enum_entities(struct media_device *mdev,
if (ent->function < MEDIA_ENT_F_OLD_BASE ||
ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
if (is_media_entity_v4l2_subdev(ent))
- u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
else if (ent->function != MEDIA_ENT_F_IO_V4L)
- u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+ entd->type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
}
- memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
- if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
- return -EFAULT;
+ memcpy(&entd->raw, &ent->info, sizeof(ent->info));
+
return 0;
}
@@ -158,8 +150,8 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad,
upad->flags = kpad->flags;
}
-static long __media_device_enum_links(struct media_device *mdev,
- struct media_links_enum *links)
+static long media_device_enum_links(struct media_device *mdev,
+ struct media_links_enum *links)
{
struct media_entity *entity;
@@ -206,64 +198,35 @@ static long __media_device_enum_links(struct media_device *mdev,
return 0;
}
-static long media_device_enum_links(struct media_device *mdev,
- struct media_links_enum __user *ulinks)
-{
- struct media_links_enum links;
- int rval;
-
- if (copy_from_user(&links, ulinks, sizeof(links)))
- return -EFAULT;
-
- rval = __media_device_enum_links(mdev, &links);
- if (rval < 0)
- return rval;
-
- if (copy_to_user(ulinks, &links, sizeof(*ulinks)))
- return -EFAULT;
-
- return 0;
-}
-
static long media_device_setup_link(struct media_device *mdev,
- struct media_link_desc __user *_ulink)
+ struct media_link_desc *linkd)
{
struct media_link *link = NULL;
- struct media_link_desc ulink;
struct media_entity *source;
struct media_entity *sink;
- int ret;
-
- if (copy_from_user(&ulink, _ulink, sizeof(ulink)))
- return -EFAULT;
/* Find the source and sink entities and link.
*/
- source = find_entity(mdev, ulink.source.entity);
- sink = find_entity(mdev, ulink.sink.entity);
+ source = find_entity(mdev, linkd->source.entity);
+ sink = find_entity(mdev, linkd->sink.entity);
if (source == NULL || sink == NULL)
return -EINVAL;
- if (ulink.source.index >= source->num_pads ||
- ulink.sink.index >= sink->num_pads)
+ if (linkd->source.index >= source->num_pads ||
+ linkd->sink.index >= sink->num_pads)
return -EINVAL;
- link = media_entity_find_link(&source->pads[ulink.source.index],
- &sink->pads[ulink.sink.index]);
+ link = media_entity_find_link(&source->pads[linkd->source.index],
+ &sink->pads[linkd->sink.index]);
if (link == NULL)
return -EINVAL;
/* Setup the link on both entities. */
- ret = __media_entity_setup_link(link, ulink.flags);
-
- if (copy_to_user(_ulink, &ulink, sizeof(ulink)))
- return -EFAULT;
-
- return ret;
+ return __media_entity_setup_link(link, linkd->flags);
}
-static long __media_device_get_topology(struct media_device *mdev,
+static long media_device_get_topology(struct media_device *mdev,
struct media_v2_topology *topo)
{
struct media_entity *entity;
@@ -400,63 +363,98 @@ static long __media_device_get_topology(struct media_device *mdev,
return ret;
}
-static long media_device_get_topology(struct media_device *mdev,
- struct media_v2_topology __user *utopo)
+static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
{
- struct media_v2_topology ktopo;
- int ret;
-
- if (copy_from_user(&ktopo, utopo, sizeof(ktopo)))
+ /* All media IOCTLs are _IOWR() */
+ if (copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
return -EFAULT;
- ret = __media_device_get_topology(mdev, &ktopo);
- if (ret < 0)
- return ret;
+ return 0;
+}
- if (copy_to_user(utopo, &ktopo, sizeof(*utopo)))
+static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
+{
+ /* All media IOCTLs are _IOWR() */
+ if (copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
}
+/* Do acquire the graph mutex */
+#define MEDIA_IOC_FL_GRAPH_MUTEX BIT(0)
+
+#define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \
+ [_IOC_NR(MEDIA_IOC_##__cmd)] = { \
+ .cmd = MEDIA_IOC_##__cmd, \
+ .fn = (long (*)(struct media_device *, void *))func, \
+ .flags = fl, \
+ .arg_from_user = from_user, \
+ .arg_to_user = to_user, \
+ }
+
+#define MEDIA_IOC(__cmd, func, fl) \
+ MEDIA_IOC_ARG(__cmd, func, fl, copy_arg_from_user, copy_arg_to_user)
+
+/* the table is indexed by _IOC_NR(cmd) */
+struct media_ioctl_info {
+ unsigned int cmd;
+ unsigned short flags;
+ long (*fn)(struct media_device *dev, void *arg);
+ long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd);
+ long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd);
+};
+
+static const struct media_ioctl_info ioctl_info[] = {
+ MEDIA_IOC(DEVICE_INFO, media_device_get_info, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(ENUM_ENTITIES, media_device_enum_entities, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
+};
+
static long media_device_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+ unsigned long __arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
struct media_device *dev = devnode->media_dev;
+ const struct media_ioctl_info *info;
+ void __user *arg = (void __user *)__arg;
+ char __karg[256], *karg = __karg;
long ret;
- mutex_lock(&dev->graph_mutex);
- switch (cmd) {
- case MEDIA_IOC_DEVICE_INFO:
- ret = media_device_get_info(dev,
- (struct media_device_info __user *)arg);
- break;
+ if (_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_info)
+ || ioctl_info[_IOC_NR(cmd)].cmd != cmd)
+ return -ENOIOCTLCMD;
- case MEDIA_IOC_ENUM_ENTITIES:
- ret = media_device_enum_entities(dev,
- (struct media_entity_desc __user *)arg);
- break;
+ info = &ioctl_info[_IOC_NR(cmd)];
- case MEDIA_IOC_ENUM_LINKS:
- ret = media_device_enum_links(dev,
- (struct media_links_enum __user *)arg);
- break;
+ if (_IOC_SIZE(info->cmd) > sizeof(__karg)) {
+ karg = kmalloc(_IOC_SIZE(info->cmd), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ }
- case MEDIA_IOC_SETUP_LINK:
- ret = media_device_setup_link(dev,
- (struct media_link_desc __user *)arg);
- break;
+ if (info->arg_from_user) {
+ ret = info->arg_from_user(karg, arg, cmd);
+ if (ret)
+ goto out_free;
+ }
- case MEDIA_IOC_G_TOPOLOGY:
- ret = media_device_get_topology(dev,
- (struct media_v2_topology __user *)arg);
- break;
+ if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+ mutex_lock(&dev->graph_mutex);
- default:
- ret = -ENOIOCTLCMD;
- }
- mutex_unlock(&dev->graph_mutex);
+ ret = info->fn(dev, karg);
+
+ if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+ mutex_unlock(&dev->graph_mutex);
+
+ if (!ret && info->arg_to_user)
+ ret = info->arg_to_user(arg, karg, cmd);
+
+out_free:
+ if (karg != __karg)
+ kfree(karg);
return ret;
}
@@ -486,7 +484,7 @@ static long media_device_enum_links32(struct media_device *mdev,
links.pads = compat_ptr(pads_ptr);
links.links = compat_ptr(links_ptr);
- return __media_device_enum_links(mdev, &links);
+ return media_device_enum_links(mdev, &links);
}
#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index d8a2299f0c2a..c68239e60487 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -65,6 +65,8 @@ static inline const char *intf_type(struct media_interface *intf)
return "v4l-subdev";
case MEDIA_INTF_T_V4L_SWRADIO:
return "v4l-swradio";
+ case MEDIA_INTF_T_V4L_TOUCH:
+ return "v4l-touch";
case MEDIA_INTF_T_ALSA_PCM_CAPTURE:
return "alsa-pcm-capture";
case MEDIA_INTF_T_ALSA_PCM_PLAYBACK:
@@ -806,17 +808,18 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
mdev = source->graph_obj.mdev;
- if (mdev->link_notify) {
- ret = mdev->link_notify(link, flags,
- MEDIA_DEV_NOTIFY_PRE_LINK_CH);
+ if (mdev->ops && mdev->ops->link_notify) {
+ ret = mdev->ops->link_notify(link, flags,
+ MEDIA_DEV_NOTIFY_PRE_LINK_CH);
if (ret < 0)
return ret;
}
ret = __media_entity_setup_link_notify(link, flags);
- if (mdev->link_notify)
- mdev->link_notify(link, flags, MEDIA_DEV_NOTIFY_POST_LINK_CH);
+ if (mdev->ops && mdev->ops->link_notify)
+ mdev->ops->link_notify(link, flags,
+ MEDIA_DEV_NOTIFY_POST_LINK_CH);
return ret;
}
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 4f6467fbaeb4..da28e68c87d8 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -13,6 +13,7 @@ if MEDIA_CAMERA_SUPPORT
source "drivers/media/pci/meye/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
+source "drivers/media/pci/tw5864/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/tw686x/Kconfig"
source "drivers/media/pci/zoran/Kconfig"
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 2e54c36441f7..a7e8af0f64a7 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_VIDEO_MEYE) += meye/
obj-$(CONFIG_STA2X11_VIP) += sta2x11/
obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_COBALT) += cobalt/
+obj-$(CONFIG_VIDEO_TW5864) += tw5864/
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index df54e17ef864..97b91a9f9fa9 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2804,30 +2804,44 @@ static int bttv_cropcap(struct file *file, void *priv,
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- *cap = bttv_tvnorms[btv->tvnorm].cropcap;
+ /* defrect and bounds are set via g_selection */
+ cap->pixelaspect = bttv_tvnorms[btv->tvnorm].cropcap.pixelaspect;
return 0;
}
-static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
+static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- /* No fh->do_crop = 1; because btv->crop[1] may be
- inconsistent with fh->width or fh->height and apps
- do not expect a change here. */
-
- crop->c = btv->crop[!!fh->do_crop].rect;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * No fh->do_crop = 1; because btv->crop[1] may be
+ * inconsistent with fh->width or fh->height and apps
+ * do not expect a change here.
+ */
+ sel->r = btv->crop[!!fh->do_crop].rect;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r = bttv_tvnorms[btv->tvnorm].cropcap.defrect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = bttv_tvnorms[btv->tvnorm].cropcap.bounds;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static int bttv_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
+static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
struct bttv_fh *fh = f;
struct bttv *btv = fh->btv;
@@ -2839,8 +2853,11 @@ static int bttv_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
__s32 b_right;
__s32 b_bottom;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
/* Make sure tvnorm, vbi_end and the current cropping
@@ -2864,22 +2881,24 @@ static int bttv_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
}
/* Min. scaled size 48 x 32. */
- c.rect.left = clamp_t(s32, crop->c.left, b_left, b_right - 48);
+ c.rect.left = clamp_t(s32, sel->r.left, b_left, b_right - 48);
c.rect.left = min(c.rect.left, (__s32) MAX_HDELAY);
- c.rect.width = clamp_t(s32, crop->c.width,
+ c.rect.width = clamp_t(s32, sel->r.width,
48, b_right - c.rect.left);
- c.rect.top = clamp_t(s32, crop->c.top, b_top, b_bottom - 32);
+ c.rect.top = clamp_t(s32, sel->r.top, b_top, b_bottom - 32);
/* Top and height must be a multiple of two. */
c.rect.top = (c.rect.top + 1) & ~1;
- c.rect.height = clamp_t(s32, crop->c.height,
+ c.rect.height = clamp_t(s32, sel->r.height,
32, b_bottom - c.rect.top);
c.rect.height = (c.rect.height + 1) & ~1;
bttv_crop_calc_limits(&c);
+ sel->r = c.rect;
+
btv->crop[1] = c;
fh->do_crop = 1;
@@ -3047,10 +3066,10 @@ static int bttv_open(struct file *file)
which only change on request. These are stored in btv->crop[1].
However for compatibility with V4L apps and cropping unaware
V4L2 apps we now reset the cropping parameters as seen through
- this fh, which is to say VIDIOC_G_CROP and scaling limit checks
+ this fh, which is to say VIDIOC_G_SELECTION and scaling limit checks
will use btv->crop[0], the default cropping parameters for the
current video standard, and VIDIOC_S_FMT will not implicitely
- change the cropping parameters until VIDIOC_S_CROP has been
+ change the cropping parameters until VIDIOC_S_SELECTION has been
called. */
fh->do_crop = !reset_crop; /* module parameter */
@@ -3159,8 +3178,8 @@ static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
.vidioc_streamoff = bttv_streamoff,
.vidioc_g_tuner = bttv_g_tuner,
.vidioc_s_tuner = bttv_s_tuner,
- .vidioc_g_crop = bttv_g_crop,
- .vidioc_s_crop = bttv_s_crop,
+ .vidioc_g_selection = bttv_g_selection,
+ .vidioc_s_selection = bttv_s_selection,
.vidioc_g_fbuf = bttv_g_fbuf,
.vidioc_s_fbuf = bttv_s_fbuf,
.vidioc_overlay = bttv_overlay,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index b1e0023f923c..9efc4559fa8e 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -232,7 +232,7 @@ struct bttv_fh {
const struct bttv_format *ovfmt;
struct bttv_overlay ov;
- /* Application called VIDIOC_S_CROP. */
+ /* Application called VIDIOC_S_SELECTION. */
int do_crop;
/* vbi capture */
diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
index f0bdf10cfd57..49013c6b8646 100644
--- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
+++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
@@ -510,7 +510,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
-static struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
.open = snd_cobalt_pcm_capture_open,
.close = snd_cobalt_pcm_capture_close,
.ioctl = snd_cobalt_pcm_ioctl,
@@ -522,7 +522,7 @@ static struct snd_pcm_ops snd_cobalt_pcm_capture_ops = {
.page = snd_pcm_get_vmalloc_page,
};
-static struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
+static const struct snd_pcm_ops snd_cobalt_pcm_playback_ops = {
.open = snd_cobalt_pcm_playback_open,
.close = snd_cobalt_pcm_playback_close,
.ioctl = snd_cobalt_pcm_ioctl,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 476f7f0dcf81..979634000597 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -60,30 +60,31 @@ MODULE_DESCRIPTION("cobalt driver");
MODULE_LICENSE("GPL");
static u8 edid[256] = {
- 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
- 0x50, 0x21, 0x9C, 0x27, 0x00, 0x00, 0x00, 0x00,
- 0x19, 0x12, 0x01, 0x03, 0x80, 0x00, 0x00, 0x78,
- 0x0E, 0x00, 0xB2, 0xA0, 0x57, 0x49, 0x9B, 0x26,
- 0x10, 0x48, 0x4F, 0x2F, 0xCF, 0x00, 0x31, 0x59,
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x50, 0x21, 0x32, 0x27, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x1a, 0x01, 0x03, 0x80, 0x30, 0x1b, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
0x45, 0x59, 0x61, 0x59, 0x81, 0x99, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A,
- 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C,
- 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,
- 0x00, 0x00, 0x00, 0xFD, 0x00, 0x31, 0x55, 0x18,
- 0x5E, 0x11, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x43,
- 0x20, 0x39, 0x30, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A,
- 0x0A, 0x0A, 0x0A, 0x0A, 0x00, 0x00, 0x00, 0x10,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x46, 0x00, 0xe0, 0x0e, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x63,
+ 0x6f, 0x62, 0x61, 0x6c, 0x74, 0x0a, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x9c,
+
+ 0x02, 0x03, 0x1f, 0xf0, 0x4a, 0x90, 0x1f, 0x04,
+ 0x13, 0x22, 0x21, 0x20, 0x02, 0x11, 0x01, 0x23,
+ 0x09, 0x07, 0x07, 0x68, 0x03, 0x0c, 0x00, 0x10,
+ 0x00, 0x00, 0x22, 0x0f, 0xe2, 0x00, 0xea, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x68,
- 0x02, 0x03, 0x1a, 0xc0, 0x48, 0xa2, 0x10, 0x04,
- 0x02, 0x01, 0x21, 0x14, 0x13, 0x23, 0x09, 0x07,
- 0x07, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0xe2,
- 0x00, 0x2a, 0x01, 0x1d, 0x00, 0x80, 0x51, 0xd0,
- 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a,
- 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -91,7 +92,7 @@ static u8 edid[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7,
};
static void cobalt_set_interrupt(struct cobalt *cobalt, bool enable)
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index d05672fe9ff9..5c76637900d0 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -161,8 +161,11 @@ static void cobalt_enable_output(struct cobalt_stream *s)
struct v4l2_subdev_format sd_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ u64 clk = bt->pixelclock;
- if (!cobalt_cpld_set_freq(cobalt, bt->pixelclock)) {
+ if (bt->flags & V4L2_DV_FL_REDUCED_FPS)
+ clk = div_u64(clk * 1000ULL, 1001);
+ if (!cobalt_cpld_set_freq(cobalt, clk)) {
cobalt_err("pixelclock out of range\n");
return;
}
@@ -644,7 +647,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
return 0;
}
- if (v4l2_match_dv_timings(timings, &s->timings, 0, false))
+ if (v4l2_match_dv_timings(timings, &s->timings, 0, true))
return 0;
if (vb2_is_busy(&s->q))
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index ffb6acdc575f..5344510fbea3 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -311,7 +311,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
-static struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_cx18_pcm_capture_ops = {
.open = snd_cx18_pcm_capture_open,
.close = snd_cx18_pcm_capture_close,
.ioctl = snd_cx18_pcm_ioctl,
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index 4af8cd6df95d..c9329371a3f8 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -98,7 +98,8 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
case CX18_HW_Z8F0811_IR_RX_HAUP:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5;
+ init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+ RC_BIT_RC6_6A_32;
init_data->name = cx->card_name;
info.platform_data = init_data;
break;
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 6c511723fd1b..57a40c84b46e 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -20,8 +20,6 @@
#ifndef __ALTERA_CI_H
#define __ALTERA_CI_H
-#include <linux/kconfig.h>
-
#define ALT_DATA 0x000000ff
#define ALT_TDI 0x00008000
#define ALT_TDO 0x00004000
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 4d080da7afaf..da892f3e3c29 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1223,7 +1223,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
cx23885_cancel_buffers(&dev->ts1);
}
-static struct vb2_ops cx23885_qops = {
+static const struct vb2_ops cx23885_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ae7c2e89ad1c..6115d4e148ba 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -506,7 +506,7 @@ static struct page *snd_cx23885_page(struct snd_pcm_substream *substream,
/*
* operators
*/
-static struct snd_pcm_ops snd_cx23885_pcm_ops = {
+static const struct snd_pcm_ops snd_cx23885_pcm_ops = {
.open = snd_cx23885_pcm_open,
.close = snd_cx23885_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 4abf50f2694f..99ba8d6328f0 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -770,6 +770,11 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC] = {
+ .name = "Hauppauge WinTV-QuadHD-ATSC",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1073,6 +1078,14 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x0070,
.subdevice = 0x6b28,
.card = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB, /* Tuner Pair 2 */
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x6a18,
+ .card = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC, /* Tuner Pair 1 */
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x6b18,
+ .card = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC, /* Tuner Pair 2 */
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1278,6 +1291,18 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
/* WinTV-QuadHD (DVB) Tuner Pair 2 (PCIe, IR, half height,
DVB-T/T2/C, DVB-T/T2/C */
break;
+ case 165100:
+ /*
+ * WinTV-QuadHD (ATSC) Tuner Pair 1 (PCIe, IR, half height,
+ * ATSC, ATSC
+ */
+ break;
+ case 165101:
+ /*
+ * WinTV-QuadHD (DVB) Tuner Pair 2 (PCIe, IR, half height,
+ * ATSC, ATSC
+ */
+ break;
default:
printk(KERN_WARNING "%s: warning: "
"unknown hauppauge model #%d\n",
@@ -1751,6 +1776,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
break;
case CX23885_BOARD_HAUPPAUGE_HVR5525:
case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
/*
* HVR5525 GPIO Details:
* GPIO-00 IR_WIDE
@@ -1826,6 +1852,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
/* FIXME: Implement me */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
@@ -2025,6 +2052,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
case CX23885_BOARD_HAUPPAUGE_HVR5525:
case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
@@ -2171,6 +2199,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index e5748a93c479..818f3c2fc98d 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -74,6 +74,7 @@
#include "sp2.h"
#include "m88ds3103.h"
#include "m88rs6000t.h"
+#include "lgdt3306a.h"
static unsigned int debug;
@@ -172,7 +173,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
cx23885_cancel_buffers(port);
}
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
@@ -574,6 +575,30 @@ static struct stb6100_config prof_8000_stb6100_config = {
.refclock = 27000000,
};
+static struct lgdt3306a_config hauppauge_quadHD_ATSC_a_config = {
+ .i2c_addr = 0x59,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+ .deny_i2c_rptr = 1, /* Disabled */
+ .spectral_inversion = 0, /* Disabled */
+ .mpeg_mode = LGDT3306A_MPEG_SERIAL,
+ .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE,
+ .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH,
+ .xtalMHz = 25, /* 24 or 25 */
+};
+
+static struct lgdt3306a_config hauppauge_quadHD_ATSC_b_config = {
+ .i2c_addr = 0x0e,
+ .qam_if_khz = 4000,
+ .vsb_if_khz = 3250,
+ .deny_i2c_rptr = 1, /* Disabled */
+ .spectral_inversion = 0, /* Disabled */
+ .mpeg_mode = LGDT3306A_MPEG_SERIAL,
+ .tpclk_edge = LGDT3306A_TPCLK_RISING_EDGE,
+ .tpvalid_polarity = LGDT3306A_TP_VALID_HIGH,
+ .xtalMHz = 25, /* 24 or 25 */
+};
+
static int p8000_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
@@ -867,12 +892,6 @@ static const struct tda10071_platform_data hauppauge_tda10071_pdata = {
.tuner_i2c_addr = 0x54,
};
-static const struct si2165_config hauppauge_hvr4400_si2165_config = {
- .i2c_addr = 0x64,
- .chip_mode = SI2165_MODE_PLL_XTAL,
- .ref_freq_Hz = 16000000,
-};
-
static const struct m88ds3103_config dvbsky_t9580_m88ds3103_config = {
.i2c_addr = 0x68,
.clock = 27000000,
@@ -1182,6 +1201,7 @@ static int dvb_register(struct cx23885_tsport *port)
struct cx23885_i2c *i2c_bus = NULL, *i2c_bus2 = NULL;
struct vb2_dvb_frontend *fe0, *fe1 = NULL;
struct si2168_config si2168_config;
+ struct si2165_platform_data si2165_pdata;
struct si2157_config si2157_config;
struct ts2020_config ts2020_config;
struct i2c_board_info info;
@@ -1700,6 +1720,9 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
+ if (port->nr > 2)
+ return 0;
+
i2c_bus = &dev->i2c_bus[0];
mfe_shared = 1;/* MFE */
port->frontends.gate = 0;/* not clear for me yet */
@@ -1839,9 +1862,26 @@ static int dvb_register(struct cx23885_tsport *port)
break;
/* port c */
case 2:
- fe0->dvb.frontend = dvb_attach(si2165_attach,
- &hauppauge_hvr4400_si2165_config,
- &i2c_bus->i2c_adap);
+ /* attach frontend */
+ memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+ si2165_pdata.fe = &fe0->dvb.frontend;
+ si2165_pdata.chip_mode = SI2165_MODE_PLL_XTAL,
+ si2165_pdata.ref_freq_Hz = 16000000,
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2165_pdata;
+ request_module(info.type);
+ client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ if (client_demod == NULL ||
+ client_demod->dev.driver == NULL)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
if (fe0->dvb.frontend == NULL)
break;
fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
@@ -2365,6 +2405,81 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC:
+ switch (port->nr) {
+ /* port b - Terrestrial/cable */
+ case 1:
+ /* attach frontend */
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(lgdt3306a_attach,
+ &hauppauge_quadHD_ATSC_a_config, &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ si2157_config.if_port = 1;
+ si2157_config.inversion = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!client_tuner || !client_tuner->dev.driver) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+
+ /* port c - terrestrial/cable */
+ case 2:
+ /* attach frontend */
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(lgdt3306a_attach,
+ &hauppauge_quadHD_ATSC_b_config, &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend == NULL)
+ break;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ si2157_config.if_port = 1;
+ si2157_config.inversion = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x62;
+ info.platform_data = &si2157_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!client_tuner || !client_tuner->dev.driver) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+ }
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index ae061b358591..61591225be9a 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -258,7 +258,7 @@ static u32 cx23885_functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
-static struct i2c_algorithm cx23885_i2c_algo_template = {
+static const struct i2c_algorithm cx23885_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = cx23885_functionality,
};
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 64328d08ac2f..410c3141c163 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -293,7 +293,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
- allowed_protos = RC_BIT_NEC;
+ allowed_protos = RC_BIT_ALL;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 6d735222a958..33d168ef278d 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -517,7 +517,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops cx23885_video_qops = {
+static const struct vb2_ops cx23885_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 24a0a6c5b501..a6735afe2269 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -103,7 +103,8 @@
#define CX23885_BOARD_HAUPPAUGE_STARBURST 53
#define CX23885_BOARD_VIEWCAST_260E 54
#define CX23885_BOARD_VIEWCAST_460E 55
-#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB 56
+#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB 56
+#define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC 57
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -256,7 +257,7 @@ struct cx23885_dmaqueue {
struct cx23885_tsport {
struct cx23885_dev *dev;
- int nr;
+ unsigned nr;
int sram_chno;
struct vb2_dvb_frontends frontends;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index df189b16af12..4711583de8fe 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -649,7 +649,7 @@ static struct page *snd_cx25821_page(struct snd_pcm_substream *substream,
/*
* operators
*/
-static struct snd_pcm_ops snd_cx25821_pcm_ops = {
+static const struct snd_pcm_ops snd_cx25821_pcm_ops = {
.open = snd_cx25821_pcm_open,
.close = snd_cx25821_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx25821/cx25821-audio-upstream.c b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
index 68dbc2dbc982..7c8edb6181ec 100644
--- a/drivers/media/pci/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/pci/cx25821/cx25821-audio-upstream.c
@@ -242,8 +242,7 @@ void cx25821_stop_upstream_audio(struct cx25821_dev *dev)
dev->_audioframe_count = 0;
dev->_audiofile_status = END_OF_FILE;
- kfree(dev->_irq_audio_queues);
- dev->_irq_audio_queues = NULL;
+ flush_work(&dev->_audio_work_entry);
kfree(dev->_audiofilename);
}
@@ -446,8 +445,7 @@ static int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
dev->_audioframe_index = dev->_last_index_irq;
- queue_work(dev->_irq_audio_queues,
- &dev->_audio_work_entry);
+ schedule_work(&dev->_audio_work_entry);
}
if (dev->_is_first_audio_frame) {
@@ -639,14 +637,6 @@ int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select)
/* Work queue */
INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler);
- dev->_irq_audio_queues =
- create_singlethread_workqueue("cx25821_audioworkqueue");
-
- if (!dev->_irq_audio_queues) {
- printk(KERN_DEBUG
- pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n"));
- return -ENOMEM;
- }
dev->_last_index_irq = 0;
dev->_audio_is_running = 0;
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index dca37c7dba73..63ba25b82692 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -281,7 +281,7 @@ static u32 cx25821_functionality(struct i2c_adapter *adap)
I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA;
}
-static struct i2c_algorithm cx25821_i2c_algo_template = {
+static const struct i2c_algorithm cx25821_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = cx25821_functionality,
#ifdef NEED_ALGO_CONTROL
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index adcd09be347d..7ce352a0f2d3 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -307,7 +307,7 @@ static void cx25821_stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops cx25821_video_qops = {
+static const struct vb2_ops cx25821_video_qops = {
.queue_setup = cx25821_queue_setup,
.buf_prepare = cx25821_buffer_prepare,
.buf_finish = cx25821_buffer_finish,
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 35c7375e4617..ef61dea982e8 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -293,7 +293,6 @@ struct cx25821_dev {
u32 audio_upstream_riscbuf_size;
u32 audio_upstream_databuf_size;
int _audioframe_index;
- struct workqueue_struct *_irq_audio_queues;
struct work_struct _audio_work_entry;
char *input_audiofilename;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index f3f13eb0c16e..723f06462104 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -599,7 +599,7 @@ static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
/*
* operators
*/
-static struct snd_pcm_ops snd_cx88_pcm_ops = {
+static const struct snd_pcm_ops snd_cx88_pcm_ops = {
.open = snd_cx88_pcm_open,
.close = snd_cx88_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 04fe9af2a802..b532e49e8f33 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -756,7 +756,7 @@ static void stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops blackbird_qops = {
+static const struct vb2_ops blackbird_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 5bb63e7a5691..ac2392d8887a 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -156,7 +156,7 @@ static void stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 3f1342c98b46..cd7687183381 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -144,7 +144,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
scancode = RC_SCANCODE_NECX(addr, cmd);
if (0 == (gpio & ir->mask_keyup))
- rc_keydown_notimeout(ir->dev, RC_TYPE_NEC, scancode, 0);
+ rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
+ 0);
else
rc_keyup(ir->dev);
@@ -345,7 +346,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
* 002-T mini RC, provided with newer PV hardware
*/
ir_codes = RC_MAP_PIXELVIEW_MK12;
- rc_type = RC_BIT_NEC;
+ rc_type = RC_BIT_NECX;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keyup = 0x80;
ir->polling = 10; /* ms */
@@ -631,7 +632,8 @@ void cx88_i2c_init_ir(struct cx88_core *core)
/* Hauppauge XVR */
core->init_data.name = "cx88 Hauppauge XVR remote";
core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- core->init_data.type = RC_BIT_RC5;
+ core->init_data.type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+ RC_BIT_RC6_6A_32;
core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 5dc1e3f08d50..d83eb3b10f54 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -567,7 +567,7 @@ static void stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops cx8800_video_qops = {
+static const struct vb2_ops cx8800_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 47def73b3502..18e3a4deee64 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1643,53 +1643,53 @@ fail:
/******************************************************************************/
/******************************************************************************/
-static struct ddb_info ddb_none = {
+static const struct ddb_info ddb_none = {
.type = DDB_NONE,
.name = "Digital Devices PCIe bridge",
};
-static struct ddb_info ddb_octopus = {
+static const struct ddb_info ddb_octopus = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus DVB adapter",
.port_num = 4,
};
-static struct ddb_info ddb_octopus_le = {
+static const struct ddb_info ddb_octopus_le = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus LE DVB adapter",
.port_num = 2,
};
-static struct ddb_info ddb_octopus_mini = {
+static const struct ddb_info ddb_octopus_mini = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus Mini",
.port_num = 4,
};
-static struct ddb_info ddb_v6 = {
+static const struct ddb_info ddb_v6 = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Cine S2 V6 DVB adapter",
.port_num = 3,
};
-static struct ddb_info ddb_v6_5 = {
+static const struct ddb_info ddb_v6_5 = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Cine S2 V6.5 DVB adapter",
.port_num = 4,
};
-static struct ddb_info ddb_dvbct = {
+static const struct ddb_info ddb_dvbct = {
.type = DDB_OCTOPUS,
.name = "Digital Devices DVBCT V6.1 DVB adapter",
.port_num = 3,
};
-static struct ddb_info ddb_satixS2v3 = {
+static const struct ddb_info ddb_satixS2v3 = {
.type = DDB_OCTOPUS,
.name = "Mystique SaTiX-S2 V3 DVB adapter",
.port_num = 3,
};
-static struct ddb_info ddb_octopusv3 = {
+static const struct ddb_info ddb_octopusv3 = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus V3 DVB adapter",
.port_num = 4,
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index f198b9826ed8..a26f9800eca3 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -318,7 +318,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
-static struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
+static const struct snd_pcm_ops snd_ivtv_pcm_capture_ops = {
.open = snd_ivtv_pcm_capture_open,
.close = snd_ivtv_pcm_capture_close,
.ioctl = snd_ivtv_pcm_ioctl,
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 374033a5bdaf..ee48c3e09de4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -750,7 +750,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
spin_lock_init(&itv->lock);
spin_lock_init(&itv->dma_reg_lock);
- init_kthread_worker(&itv->irq_worker);
+ kthread_init_worker(&itv->irq_worker);
itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
"%s", itv->v4l2_dev.name);
if (IS_ERR(itv->irq_worker_task)) {
@@ -760,7 +760,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
/* must use the FIFO scheduler as it is realtime sensitive */
sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
- init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+ kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
/* Initial settings */
itv->cxhdl.port = CX2341X_PORT_MEMORY;
@@ -1441,7 +1441,7 @@ static void ivtv_remove(struct pci_dev *pdev)
del_timer_sync(&itv->dma_timer);
/* Kill irq worker */
- flush_kthread_worker(&itv->irq_worker);
+ kthread_flush_worker(&itv->irq_worker);
kthread_stop(itv->irq_worker_task);
ivtv_streams_cleanup(itv);
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index bccbf2d18e30..dea80efd5836 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -215,7 +215,8 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5;
+ init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+ RC_BIT_RC6_6A_32;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -625,7 +626,7 @@ static u32 ivtv_functionality(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm ivtv_algo = {
+static const struct i2c_algorithm ivtv_algo = {
.master_xfer = ivtv_xfer,
.functionality = ivtv_functionality,
};
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 36ca2d67c812..6efe1f71262c 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1062,7 +1062,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
}
if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
- queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+ kthread_queue_work(&itv->irq_worker, &itv->irq_work);
}
spin_unlock(&itv->dma_reg_lock);
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
- 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+ dma->map, FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
y_pages = get_user_pages_unlocked(y_dma.uaddr,
- y_dma.page_count, 0, 1, &dma->map[0]);
+ y_dma.page_count, &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
- uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+ uv_dma.page_count, &dma->map[y_pages],
+ FOLL_FORCE);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index ac547cb84de8..b078ac2a682c 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -353,7 +353,7 @@ static void netup_unidvb_stop_streaming(struct vb2_queue *q)
netup_unidvb_queue_cleanup(dma);
}
-static struct vb2_ops dvb_qops = {
+static const struct vb2_ops dvb_qops = {
.queue_setup = netup_unidvb_queue_setup,
.buf_prepare = netup_unidvb_buf_prepare,
.buf_queue = netup_unidvb_buf_queue,
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index c09c52bc6eab..b49e4f9788e8 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -327,11 +327,8 @@ static int netup_i2c_init(struct netup_unidvb_dev *ndev, int bus_num)
i2c->adap.dev.parent = &ndev->pci_dev->dev;
i2c_set_adapdata(&i2c->adap, i2c);
ret = i2c_add_adapter(&i2c->adap);
- if (ret) {
- dev_err(&ndev->pci_dev->dev,
- "%s(): failed to add I2C adapter\n", __func__);
+ if (ret)
return ret;
- }
dev_info(&ndev->pci_dev->dev,
"%s(): registered I2C bus %d at 0x%x\n",
__func__,
diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
index 4e783a68bf4a..423e8c889310 100644
--- a/drivers/media/pci/ngene/ngene-cards.c
+++ b/drivers/media/pci/ngene/ngene-cards.c
@@ -613,7 +613,7 @@ static struct stv6110x_config tuner_cineS2_1 = {
.clk_div = 1,
};
-static struct ngene_info ngene_info_cineS2 = {
+static const struct ngene_info ngene_info_cineS2 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
@@ -627,7 +627,7 @@ static struct ngene_info ngene_info_cineS2 = {
.msi_supported = true,
};
-static struct ngene_info ngene_info_satixS2 = {
+static const struct ngene_info ngene_info_satixS2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
@@ -641,7 +641,7 @@ static struct ngene_info ngene_info_satixS2 = {
.msi_supported = true,
};
-static struct ngene_info ngene_info_satixS2v2 = {
+static const struct ngene_info ngene_info_satixS2v2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual (v2)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -656,7 +656,7 @@ static struct ngene_info ngene_info_satixS2v2 = {
.msi_supported = true,
};
-static struct ngene_info ngene_info_cineS2v5 = {
+static const struct ngene_info ngene_info_cineS2v5 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -672,7 +672,7 @@ static struct ngene_info ngene_info_cineS2v5 = {
};
-static struct ngene_info ngene_info_duoFlex = {
+static const struct ngene_info ngene_info_duoFlex = {
.type = NGENE_SIDEWINDER,
.name = "Digital Devices DuoFlex PCIe or miniPCIe",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
@@ -687,7 +687,7 @@ static struct ngene_info ngene_info_duoFlex = {
.msi_supported = true,
};
-static struct ngene_info ngene_info_m780 = {
+static const struct ngene_info ngene_info_m780 = {
.type = NGENE_APP,
.name = "Aver M780 ATSC/QAM-B",
@@ -727,7 +727,7 @@ static struct drxd_config fe_terratec_dvbt_1 = {
.osc_deviation = osc_deviation,
};
-static struct ngene_info ngene_info_terratec = {
+static const struct ngene_info ngene_info_terratec = {
.type = NGENE_TERRATEC,
.name = "Terratec Integra/Cinergy2400i Dual DVB-T",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index eff5e9f51ace..7fb649e523f4 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -798,10 +798,8 @@ static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strlcpy(i2c->name, DRV_NAME, sizeof(i2c->name));
i2c_set_adapdata(i2c, pt3);
ret = i2c_add_adapter(i2c);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add i2c adapter\n");
+ if (ret < 0)
goto err_i2cbuf;
- }
for (i = 0; i < PT3_NUM_FE; i++) {
ret = pt3_alloc_adapter(pt3, i);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 94f816244407..dc0e2fc5f68b 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -877,7 +877,7 @@ static struct page *snd_card_saa7134_page(struct snd_pcm_substream *substream,
* ALSA capture callbacks definition
*/
-static struct snd_pcm_ops snd_card_saa7134_capture_ops = {
+static const struct snd_pcm_ops snd_card_saa7134_capture_ops = {
.open = snd_card_saa7134_capture_open,
.close = snd_card_saa7134_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 791a5161809b..f0fe2524259f 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -85,7 +85,7 @@ static void stop_streaming(struct vb2_queue *vq)
dev->empress_started = 0;
}
-static struct vb2_ops saa7134_empress_qops = {
+static const struct vb2_ops saa7134_empress_qops = {
.queue_setup = saa7134_ts_queue_setup,
.buf_init = saa7134_ts_buffer_init,
.buf_prepare = saa7134_ts_buffer_prepare,
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8ef6399d794f..2dac48fa1386 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -338,7 +338,7 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm saa7134_algo = {
+static const struct i2c_algorithm saa7134_algo = {
.master_xfer = saa7134_i2c_xfer,
.functionality = functionality,
};
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index c8042c3888cd..eff52bbbfd66 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -345,7 +345,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol,
if (data[9] != (unsigned char)(~data[8]))
return 0;
- *protocol = RC_TYPE_NEC;
+ *protocol = RC_TYPE_NECX;
*scancode = RC_SCANCODE_NECX(data[11] << 8 | data[10], data[9]);
*toggle = 0;
return 1;
@@ -1035,7 +1035,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
dev->init_data.ir_codes = RC_MAP_BEHOLD;
- dev->init_data.type = RC_BIT_NEC;
+ dev->init_data.type = RC_BIT_NECX;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 8a6ebd087889..cbb173d99085 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1054,7 +1054,7 @@ void saa7134_vb2_stop_streaming(struct vb2_queue *vq)
pm_qos_remove_request(&dev->qos_request);
}
-static struct vb2_ops vb2_qops = {
+static const struct vb2_ops vb2_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -1659,8 +1659,6 @@ static int saa7134_cropcap(struct file *file, void *priv,
if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- cap->bounds = dev->crop_bounds;
- cap->defrect = dev->crop_defrect;
cap->pixelaspect.numerator = 1;
cap->pixelaspect.denominator = 1;
if (dev->tvnorm->id & V4L2_STD_525_60) {
@@ -1674,25 +1672,41 @@ static int saa7134_cropcap(struct file *file, void *priv,
return 0;
}
-static int saa7134_g_crop(struct file *file, void *f, struct v4l2_crop *crop)
+static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- crop->c = dev->crop_current;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = dev->crop_current;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r = dev->crop_defrect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = dev->crop_bounds;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *crop)
+static int saa7134_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
struct v4l2_rect *c = &dev->crop_current;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- crop->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
if (dev->overlay_owner)
@@ -1700,7 +1714,7 @@ static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *cr
if (vb2_is_streaming(&dev->video_vbq))
return -EBUSY;
- *c = crop->c;
+ *c = sel->r;
if (c->top < b->top)
c->top = b->top;
if (c->top > b->top + b->height)
@@ -1714,6 +1728,7 @@ static int saa7134_s_crop(struct file *file, void *f, const struct v4l2_crop *cr
c->left = b->left + b->width;
if (c->width > b->left - c->left + b->width)
c->width = b->left - c->left + b->width;
+ sel->r = *c;
return 0;
}
@@ -1989,8 +2004,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = saa7134_g_tuner,
.vidioc_s_tuner = saa7134_s_tuner,
- .vidioc_g_crop = saa7134_g_crop,
- .vidioc_s_crop = saa7134_s_crop,
+ .vidioc_g_selection = saa7134_g_selection,
+ .vidioc_s_selection = saa7134_s_selection,
.vidioc_g_fbuf = saa7134_g_fbuf,
.vidioc_s_fbuf = saa7134_s_fbuf,
.vidioc_overlay = saa7134_overlay,
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 0342d84913b8..024f4e29e840 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -75,7 +75,7 @@ static u32 saa7164_functionality(struct i2c_adapter *adap)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm saa7164_i2c_algo_template = {
+static const struct i2c_algorithm saa7164_i2c_algo_template = {
.master_xfer = i2c_xfer,
.functionality = saa7164_functionality,
};
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index 83981d611a79..6dbe3b4d09ce 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -1060,7 +1060,7 @@ static void smi_remove(struct pci_dev *pdev)
}
/* DVBSky cards */
-static struct smi_cfg_info dvbsky_s950_cfg = {
+static const struct smi_cfg_info dvbsky_s950_cfg = {
.type = SMI_DVBSKY_S950,
.name = "DVBSky S950 V3",
.ts_0 = SMI_TS_NULL,
@@ -1070,7 +1070,7 @@ static struct smi_cfg_info dvbsky_s950_cfg = {
.rc_map = RC_MAP_DVBSKY,
};
-static struct smi_cfg_info dvbsky_s952_cfg = {
+static const struct smi_cfg_info dvbsky_s952_cfg = {
.type = SMI_DVBSKY_S952,
.name = "DVBSky S952 V3",
.ts_0 = SMI_TS_DMA_BOTH,
@@ -1080,7 +1080,7 @@ static struct smi_cfg_info dvbsky_s952_cfg = {
.rc_map = RC_MAP_DVBSKY,
};
-static struct smi_cfg_info dvbsky_t9580_cfg = {
+static const struct smi_cfg_info dvbsky_t9580_cfg = {
.type = SMI_DVBSKY_T9580,
.name = "DVBSky T9580 V3",
.ts_0 = SMI_TS_DMA_BOTH,
@@ -1090,7 +1090,7 @@ static struct smi_cfg_info dvbsky_t9580_cfg = {
.rc_map = RC_MAP_DVBSKY,
};
-static struct smi_cfg_info technotrend_s2_4200_cfg = {
+static const struct smi_cfg_info technotrend_s2_4200_cfg = {
.type = SMI_TECHNOTREND_S2_4200,
.name = "TechnoTrend TT-budget S2-4200 Twin",
.ts_0 = SMI_TS_DMA_BOTH,
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 4a37a1c51c48..6a35107aca25 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -252,7 +252,7 @@ static int snd_solo_pcm_copy(struct snd_pcm_substream *ss, int channel,
return 0;
}
-static struct snd_pcm_ops snd_solo_pcm_ops = {
+static const struct snd_pcm_ops snd_solo_pcm_ops = {
.open = snd_solo_pcm_open,
.close = snd_solo_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 399164314c28..25a2137ab799 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -759,7 +759,7 @@ static void solo_enc_buf_finish(struct vb2_buffer *vb)
}
}
-static struct vb2_ops solo_enc_video_qops = {
+static const struct vb2_ops solo_enc_video_qops = {
.queue_setup = solo_enc_queue_setup,
.buf_queue = solo_enc_buf_queue,
.buf_finish = solo_enc_buf_finish,
diff --git a/drivers/media/pci/tw5864/Kconfig b/drivers/media/pci/tw5864/Kconfig
new file mode 100644
index 000000000000..87c8f327e2d4
--- /dev/null
+++ b/drivers/media/pci/tw5864/Kconfig
@@ -0,0 +1,12 @@
+config VIDEO_TW5864
+ tristate "Techwell TW5864 video/audio grabber and encoder"
+ depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Support for boards based on Techwell TW5864 chip which provides
+ multichannel video & audio grabbing and encoding (H.264, MJPEG,
+ ADPCM G.726).
+
+ To compile this driver as a module, choose M here: the
+ module will be called tw5864.
diff --git a/drivers/media/pci/tw5864/Makefile b/drivers/media/pci/tw5864/Makefile
new file mode 100644
index 000000000000..4fc8b3b1a45a
--- /dev/null
+++ b/drivers/media/pci/tw5864/Makefile
@@ -0,0 +1,3 @@
+tw5864-objs := tw5864-core.o tw5864-video.o tw5864-h264.o tw5864-util.o
+
+obj-$(CONFIG_VIDEO_TW5864) += tw5864.o
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
new file mode 100644
index 000000000000..1d43b96958ea
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -0,0 +1,359 @@
+/*
+ * TW5864 driver - core functions
+ *
+ * Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/sound.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/pci_ids.h>
+#include <linux/jiffies.h>
+#include <asm/dma.h>
+#include <media/v4l2-dev.h>
+
+#include "tw5864.h"
+#include "tw5864-reg.h"
+
+MODULE_DESCRIPTION("V4L2 driver module for tw5864-based multimedia capture & encoding devices");
+MODULE_AUTHOR("Bluecherry Maintainers <maintainers@bluecherrydvr.com>");
+MODULE_AUTHOR("Andrey Utkin <andrey.utkin@corp.bluecherry.net>");
+MODULE_LICENSE("GPL");
+
+/*
+ * BEWARE OF KNOWN ISSUES WITH VIDEO QUALITY
+ *
+ * This driver was developed by Bluecherry LLC by deducing behaviour of
+ * original manufacturer's driver, from both source code and execution traces.
+ * It is known that there are some artifacts on output video with this driver:
+ * - on all known hardware samples: random pixels of wrong color (mostly
+ * white, red or blue) appearing and disappearing on sequences of P-frames;
+ * - on some hardware samples (known with H.264 core version e006:2800):
+ * total madness on P-frames: blocks of wrong luminance; blocks of wrong
+ * colors "creeping" across the picture.
+ * There is a workaround for both issues: avoid P-frames by setting GOP size
+ * to 1. To do that, run this command on device files created by this driver:
+ *
+ * v4l2-ctl --device /dev/videoX --set-ctrl=video_gop_size=1
+ *
+ * These issues are not decoding errors; all produced H.264 streams are decoded
+ * properly. Streams without P-frames don't have these artifacts so it's not
+ * analog-to-digital conversion issues nor internal memory errors; we conclude
+ * it's internal H.264 encoder issues.
+ * We cannot even check the original driver's behaviour because it has never
+ * worked properly at all in our development environment. So these issues may
+ * be actually related to firmware or hardware. However it may be that there's
+ * just some more register settings missing in the driver which would please
+ * the hardware.
+ * Manufacturer didn't help much on our inquiries, but feel free to disturb
+ * again the support of Intersil (owner of former Techwell).
+ */
+
+/* take first free /dev/videoX indexes by default */
+static unsigned int video_nr[] = {[0 ... (TW5864_INPUTS - 1)] = -1 };
+
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "video devices numbers array");
+
+/*
+ * Please add any new PCI IDs to: http://pci-ids.ucw.cz. This keeps
+ * the PCI ID database up to date. Note that the entries must be
+ * added under vendor 0x1797 (Techwell Inc.) as subsystem IDs.
+ */
+static const struct pci_device_id tw5864_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, PCI_DEVICE_ID_TECHWELL_5864)},
+ {0,}
+};
+
+void tw5864_irqmask_apply(struct tw5864_dev *dev)
+{
+ tw_writel(TW5864_INTR_ENABLE_L, dev->irqmask & 0xffff);
+ tw_writel(TW5864_INTR_ENABLE_H, (dev->irqmask >> 16));
+}
+
+static void tw5864_interrupts_disable(struct tw5864_dev *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ dev->irqmask = 0;
+ tw5864_irqmask_apply(dev);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static void tw5864_timer_isr(struct tw5864_dev *dev);
+static void tw5864_h264_isr(struct tw5864_dev *dev);
+
+static irqreturn_t tw5864_isr(int irq, void *dev_id)
+{
+ struct tw5864_dev *dev = dev_id;
+ u32 status;
+
+ status = tw_readl(TW5864_INTR_STATUS_L) |
+ tw_readl(TW5864_INTR_STATUS_H) << 16;
+ if (!status)
+ return IRQ_NONE;
+
+ tw_writel(TW5864_INTR_CLR_L, 0xffff);
+ tw_writel(TW5864_INTR_CLR_H, 0xffff);
+
+ if (status & TW5864_INTR_VLC_DONE)
+ tw5864_h264_isr(dev);
+
+ if (status & TW5864_INTR_TIMER)
+ tw5864_timer_isr(dev);
+
+ if (!(status & (TW5864_INTR_TIMER | TW5864_INTR_VLC_DONE))) {
+ dev_dbg(&dev->pci->dev, "Unknown interrupt, status 0x%08X\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tw5864_h264_isr(struct tw5864_dev *dev)
+{
+ int channel = tw_readl(TW5864_DSP) & TW5864_DSP_ENC_CHN;
+ struct tw5864_input *input = &dev->inputs[channel];
+ int cur_frame_index, next_frame_index;
+ struct tw5864_h264_frame *cur_frame, *next_frame;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+
+ cur_frame_index = dev->h264_buf_w_index;
+ next_frame_index = (cur_frame_index + 1) % H264_BUF_CNT;
+ cur_frame = &dev->h264_buf[cur_frame_index];
+ next_frame = &dev->h264_buf[next_frame_index];
+
+ if (next_frame_index != dev->h264_buf_r_index) {
+ cur_frame->vlc_len = tw_readl(TW5864_VLC_LENGTH) << 2;
+ cur_frame->checksum = tw_readl(TW5864_VLC_CRC_REG);
+ cur_frame->input = input;
+ cur_frame->timestamp = ktime_get_ns();
+ cur_frame->seqno = input->frame_seqno;
+ cur_frame->gop_seqno = input->frame_gop_seqno;
+
+ dev->h264_buf_w_index = next_frame_index;
+ tasklet_schedule(&dev->tasklet);
+
+ cur_frame = next_frame;
+
+ spin_lock(&input->slock);
+ input->frame_seqno++;
+ input->frame_gop_seqno++;
+ if (input->frame_gop_seqno >= input->gop)
+ input->frame_gop_seqno = 0;
+ spin_unlock(&input->slock);
+ } else {
+ dev_err(&dev->pci->dev,
+ "Skipped frame on input %d because all buffers busy\n",
+ channel);
+ }
+
+ dev->encoder_busy = 0;
+
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ tw_writel(TW5864_VLC_STREAM_BASE_ADDR, cur_frame->vlc.dma_addr);
+ tw_writel(TW5864_MV_STREAM_BASE_ADDR, cur_frame->mv.dma_addr);
+
+ /* Additional ack for this interrupt */
+ tw_writel(TW5864_VLC_DSP_INTR, 0x00000001);
+ tw_writel(TW5864_PCI_INTR_STATUS, TW5864_VLC_DONE_INTR);
+}
+
+static void tw5864_input_deadline_update(struct tw5864_input *input)
+{
+ input->new_frame_deadline = jiffies + msecs_to_jiffies(1000);
+}
+
+static void tw5864_timer_isr(struct tw5864_dev *dev)
+{
+ unsigned long flags;
+ int i;
+ int encoder_busy;
+
+ /* Additional ack for this interrupt */
+ tw_writel(TW5864_PCI_INTR_STATUS, TW5864_TIMER_INTR);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ encoder_busy = dev->encoder_busy;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ if (encoder_busy)
+ return;
+
+ /*
+ * Traversing inputs in round-robin fashion, starting from next to the
+ * last processed one
+ */
+ for (i = 0; i < TW5864_INPUTS; i++) {
+ int next_input = (i + dev->next_input) % TW5864_INPUTS;
+ struct tw5864_input *input = &dev->inputs[next_input];
+ int raw_buf_id; /* id of internal buf with last raw frame */
+
+ spin_lock_irqsave(&input->slock, flags);
+ if (!input->enabled)
+ goto next;
+
+ /* Check if new raw frame is available */
+ raw_buf_id = tw_mask_shift_readl(TW5864_SENIF_ORG_FRM_PTR1, 0x3,
+ 2 * input->nr);
+
+ if (input->buf_id != raw_buf_id) {
+ input->buf_id = raw_buf_id;
+ tw5864_input_deadline_update(input);
+ spin_unlock_irqrestore(&input->slock, flags);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ dev->encoder_busy = 1;
+ dev->next_input = (next_input + 1) % TW5864_INPUTS;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ tw5864_request_encoded_frame(input);
+ break;
+ }
+
+ /* No new raw frame; check if channel is stuck */
+ if (time_is_after_jiffies(input->new_frame_deadline)) {
+ /* If stuck, request new raw frames again */
+ tw_mask_shift_writel(TW5864_ENC_BUF_PTR_REC1, 0x3,
+ 2 * input->nr, input->buf_id + 3);
+ tw5864_input_deadline_update(input);
+ }
+next:
+ spin_unlock_irqrestore(&input->slock, flags);
+ }
+}
+
+static int tw5864_initdev(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct tw5864_dev *dev;
+ int err;
+
+ dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ snprintf(dev->name, sizeof(dev->name), "tw5864:%s", pci_name(pci_dev));
+
+ err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
+ if (err)
+ return err;
+
+ /* pci init */
+ dev->pci = pci_dev;
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ dev_err(&dev->pci->dev, "pci_enable_device() failed\n");
+ goto unreg_v4l2;
+ }
+
+ pci_set_master(pci_dev);
+
+ err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
+ goto disable_pci;
+ }
+
+ /* get mmio */
+ err = pci_request_regions(pci_dev, dev->name);
+ if (err) {
+ dev_err(&dev->pci->dev, "Cannot request regions for MMIO\n");
+ goto disable_pci;
+ }
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+ if (!dev->mmio) {
+ err = -EIO;
+ dev_err(&dev->pci->dev, "can't ioremap() MMIO memory\n");
+ goto release_mmio;
+ }
+
+ spin_lock_init(&dev->slock);
+
+ dev_info(&pci_dev->dev, "TW5864 hardware version: %04x\n",
+ tw_readl(TW5864_HW_VERSION));
+ dev_info(&pci_dev->dev, "TW5864 H.264 core version: %04x:%04x\n",
+ tw_readl(TW5864_H264REV),
+ tw_readl(TW5864_UNDECLARED_H264REV_PART2));
+
+ err = tw5864_video_init(dev, video_nr);
+ if (err)
+ goto unmap_mmio;
+
+ /* get irq */
+ err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw5864_isr,
+ IRQF_SHARED, "tw5864", dev);
+ if (err < 0) {
+ dev_err(&dev->pci->dev, "can't get IRQ %d\n", pci_dev->irq);
+ goto fini_video;
+ }
+
+ dev_info(&pci_dev->dev, "Note: there are known video quality issues. For details\n");
+ dev_info(&pci_dev->dev, "see the comment in drivers/media/pci/tw5864/tw5864-core.c.\n");
+
+ return 0;
+
+fini_video:
+ tw5864_video_fini(dev);
+unmap_mmio:
+ iounmap(dev->mmio);
+release_mmio:
+ pci_release_regions(pci_dev);
+disable_pci:
+ pci_disable_device(pci_dev);
+unreg_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return err;
+}
+
+static void tw5864_finidev(struct pci_dev *pci_dev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct tw5864_dev *dev =
+ container_of(v4l2_dev, struct tw5864_dev, v4l2_dev);
+
+ /* shutdown subsystems */
+ tw5864_interrupts_disable(dev);
+
+ /* unregister */
+ tw5864_video_fini(dev);
+
+ /* release resources */
+ iounmap(dev->mmio);
+ release_mem_region(pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ devm_kfree(&pci_dev->dev, dev);
+}
+
+static struct pci_driver tw5864_pci_driver = {
+ .name = "tw5864",
+ .id_table = tw5864_pci_tbl,
+ .probe = tw5864_initdev,
+ .remove = tw5864_finidev,
+};
+
+module_pci_driver(tw5864_pci_driver);
diff --git a/drivers/media/pci/tw5864/tw5864-h264.c b/drivers/media/pci/tw5864/tw5864-h264.c
new file mode 100644
index 000000000000..330d200f52cd
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-h264.c
@@ -0,0 +1,259 @@
+/*
+ * TW5864 driver - H.264 headers generation functions
+ *
+ * Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/log2.h>
+
+#include "tw5864.h"
+
+static u8 marker[] = { 0x00, 0x00, 0x00, 0x01 };
+
+/*
+ * Exponential-Golomb coding functions
+ *
+ * These functions are used for generation of H.264 bitstream headers.
+ *
+ * This code is derived from tw5864 reference driver by manufacturers, which
+ * itself apparently was derived from x264 project.
+ */
+
+/* Bitstream writing context */
+struct bs {
+ u8 *buf; /* pointer to buffer beginning */
+ u8 *buf_end; /* pointer to buffer end */
+ u8 *ptr; /* pointer to current byte in buffer */
+ unsigned int bits_left; /* number of available bits in current byte */
+};
+
+static void bs_init(struct bs *s, void *buf, int size)
+{
+ s->buf = buf;
+ s->ptr = buf;
+ s->buf_end = s->ptr + size;
+ s->bits_left = 8;
+}
+
+static int bs_len(struct bs *s)
+{
+ return s->ptr - s->buf;
+}
+
+static void bs_write(struct bs *s, int count, u32 bits)
+{
+ if (s->ptr >= s->buf_end - 4)
+ return;
+ while (count > 0) {
+ if (count < 32)
+ bits &= (1 << count) - 1;
+ if (count < s->bits_left) {
+ *s->ptr = (*s->ptr << count) | bits;
+ s->bits_left -= count;
+ break;
+ }
+ *s->ptr = (*s->ptr << s->bits_left) |
+ (bits >> (count - s->bits_left));
+ count -= s->bits_left;
+ s->ptr++;
+ s->bits_left = 8;
+ }
+}
+
+static void bs_write1(struct bs *s, u32 bit)
+{
+ if (s->ptr < s->buf_end) {
+ *s->ptr <<= 1;
+ *s->ptr |= bit;
+ s->bits_left--;
+ if (s->bits_left == 0) {
+ s->ptr++;
+ s->bits_left = 8;
+ }
+ }
+}
+
+static void bs_write_ue(struct bs *s, u32 val)
+{
+ if (val == 0) {
+ bs_write1(s, 1);
+ } else {
+ val++;
+ bs_write(s, 2 * fls(val) - 1, val);
+ }
+}
+
+static void bs_write_se(struct bs *s, int val)
+{
+ bs_write_ue(s, val <= 0 ? -val * 2 : val * 2 - 1);
+}
+
+static void bs_rbsp_trailing(struct bs *s)
+{
+ bs_write1(s, 1);
+ if (s->bits_left != 8)
+ bs_write(s, s->bits_left, 0x00);
+}
+
+/* H.264 headers generation functions */
+
+static int tw5864_h264_gen_sps_rbsp(u8 *buf, size_t size, int width, int height)
+{
+ struct bs bs, *s;
+
+ s = &bs;
+ bs_init(s, buf, size);
+ bs_write(s, 8, 0x42); /* profile_idc, baseline */
+ bs_write(s, 1, 1); /* constraint_set0_flag */
+ bs_write(s, 1, 1); /* constraint_set1_flag */
+ bs_write(s, 1, 0); /* constraint_set2_flag */
+ bs_write(s, 5, 0); /* reserved_zero_5bits */
+ bs_write(s, 8, 0x1e); /* level_idc */
+ bs_write_ue(s, 0); /* seq_parameter_set_id */
+ bs_write_ue(s, ilog2(MAX_GOP_SIZE) - 4); /* log2_max_frame_num_minus4 */
+ bs_write_ue(s, 0); /* pic_order_cnt_type */
+ /* log2_max_pic_order_cnt_lsb_minus4 */
+ bs_write_ue(s, ilog2(MAX_GOP_SIZE) - 4);
+ bs_write_ue(s, 1); /* num_ref_frames */
+ bs_write(s, 1, 0); /* gaps_in_frame_num_value_allowed_flag */
+ bs_write_ue(s, width / 16 - 1); /* pic_width_in_mbs_minus1 */
+ bs_write_ue(s, height / 16 - 1); /* pic_height_in_map_units_minus1 */
+ bs_write(s, 1, 1); /* frame_mbs_only_flag */
+ bs_write(s, 1, 0); /* direct_8x8_inference_flag */
+ bs_write(s, 1, 0); /* frame_cropping_flag */
+ bs_write(s, 1, 0); /* vui_parameters_present_flag */
+ bs_rbsp_trailing(s);
+ return bs_len(s);
+}
+
+static int tw5864_h264_gen_pps_rbsp(u8 *buf, size_t size, int qp)
+{
+ struct bs bs, *s;
+
+ s = &bs;
+ bs_init(s, buf, size);
+ bs_write_ue(s, 0); /* pic_parameter_set_id */
+ bs_write_ue(s, 0); /* seq_parameter_set_id */
+ bs_write(s, 1, 0); /* entropy_coding_mode_flag */
+ bs_write(s, 1, 0); /* pic_order_present_flag */
+ bs_write_ue(s, 0); /* num_slice_groups_minus1 */
+ bs_write_ue(s, 0); /* i_num_ref_idx_l0_active_minus1 */
+ bs_write_ue(s, 0); /* i_num_ref_idx_l1_active_minus1 */
+ bs_write(s, 1, 0); /* weighted_pred_flag */
+ bs_write(s, 2, 0); /* weighted_bipred_idc */
+ bs_write_se(s, qp - 26); /* pic_init_qp_minus26 */
+ bs_write_se(s, qp - 26); /* pic_init_qs_minus26 */
+ bs_write_se(s, 0); /* chroma_qp_index_offset */
+ bs_write(s, 1, 0); /* deblocking_filter_control_present_flag */
+ bs_write(s, 1, 0); /* constrained_intra_pred_flag */
+ bs_write(s, 1, 0); /* redundant_pic_cnt_present_flag */
+ bs_rbsp_trailing(s);
+ return bs_len(s);
+}
+
+static int tw5864_h264_gen_slice_head(u8 *buf, size_t size,
+ unsigned int idr_pic_id,
+ unsigned int frame_gop_seqno,
+ int *tail_nb_bits, u8 *tail)
+{
+ struct bs bs, *s;
+ int is_i_frame = frame_gop_seqno == 0;
+
+ s = &bs;
+ bs_init(s, buf, size);
+ bs_write_ue(s, 0); /* first_mb_in_slice */
+ bs_write_ue(s, is_i_frame ? 2 : 5); /* slice_type - I or P */
+ bs_write_ue(s, 0); /* pic_parameter_set_id */
+ bs_write(s, ilog2(MAX_GOP_SIZE), frame_gop_seqno); /* frame_num */
+ if (is_i_frame)
+ bs_write_ue(s, idr_pic_id);
+
+ /* pic_order_cnt_lsb */
+ bs_write(s, ilog2(MAX_GOP_SIZE), frame_gop_seqno);
+
+ if (is_i_frame) {
+ bs_write1(s, 0); /* no_output_of_prior_pics_flag */
+ bs_write1(s, 0); /* long_term_reference_flag */
+ } else {
+ bs_write1(s, 0); /* num_ref_idx_active_override_flag */
+ bs_write1(s, 0); /* ref_pic_list_reordering_flag_l0 */
+ bs_write1(s, 0); /* adaptive_ref_pic_marking_mode_flag */
+ }
+
+ bs_write_se(s, 0); /* slice_qp_delta */
+
+ if (s->bits_left != 8) {
+ *tail = ((s->ptr[0]) << s->bits_left);
+ *tail_nb_bits = 8 - s->bits_left;
+ } else {
+ *tail = 0;
+ *tail_nb_bits = 0;
+ }
+
+ return bs_len(s);
+}
+
+void tw5864_h264_put_stream_header(u8 **buf, size_t *space_left, int qp,
+ int width, int height)
+{
+ int nal_len;
+
+ /* SPS */
+ memcpy(*buf, marker, sizeof(marker));
+ *buf += 4;
+ *space_left -= 4;
+
+ **buf = 0x67; /* SPS NAL header */
+ *buf += 1;
+ *space_left -= 1;
+
+ nal_len = tw5864_h264_gen_sps_rbsp(*buf, *space_left, width, height);
+ *buf += nal_len;
+ *space_left -= nal_len;
+
+ /* PPS */
+ memcpy(*buf, marker, sizeof(marker));
+ *buf += 4;
+ *space_left -= 4;
+
+ **buf = 0x68; /* PPS NAL header */
+ *buf += 1;
+ *space_left -= 1;
+
+ nal_len = tw5864_h264_gen_pps_rbsp(*buf, *space_left, qp);
+ *buf += nal_len;
+ *space_left -= nal_len;
+}
+
+void tw5864_h264_put_slice_header(u8 **buf, size_t *space_left,
+ unsigned int idr_pic_id,
+ unsigned int frame_gop_seqno,
+ int *tail_nb_bits, u8 *tail)
+{
+ int nal_len;
+
+ memcpy(*buf, marker, sizeof(marker));
+ *buf += 4;
+ *space_left -= 4;
+
+ /* Frame NAL header */
+ **buf = (frame_gop_seqno == 0) ? 0x25 : 0x21;
+ *buf += 1;
+ *space_left -= 1;
+
+ nal_len = tw5864_h264_gen_slice_head(*buf, *space_left, idr_pic_id,
+ frame_gop_seqno, tail_nb_bits,
+ tail);
+ *buf += nal_len;
+ *space_left -= nal_len;
+}
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
new file mode 100644
index 000000000000..92a1b077ef8a
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -0,0 +1,2133 @@
+/*
+ * TW5864 driver - registers description
+ *
+ * Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* According to TW5864_datasheet_0.6d.pdf, tw5864b1-ds.pdf */
+
+/* Register Description - Direct Map Space */
+/* 0x0000 ~ 0x1ffc - H264 Register Map */
+/* [15:0] The Version register for H264 core (Read Only) */
+#define TW5864_H264REV 0x0000
+
+#define TW5864_EMU 0x0004
+/* Define controls in register TW5864_EMU */
+/* DDR controller enabled */
+#define TW5864_EMU_EN_DDR BIT(0)
+/* Enable bit for Inter module */
+#define TW5864_EMU_EN_ME BIT(1)
+/* Enable bit for Sensor Interface module */
+#define TW5864_EMU_EN_SEN BIT(2)
+/* Enable bit for Host Burst Access */
+#define TW5864_EMU_EN_BHOST BIT(3)
+/* Enable bit for Loop Filter module */
+#define TW5864_EMU_EN_LPF BIT(4)
+/* Enable bit for PLBK module */
+#define TW5864_EMU_EN_PLBK BIT(5)
+/*
+ * Video Frame mapping in DDR
+ * 00 CIF
+ * 01 D1
+ * 10 Reserved
+ * 11 Reserved
+ *
+ */
+#define TW5864_DSP_FRAME_TYPE (3 << 6)
+#define TW5864_DSP_FRAME_TYPE_D1 BIT(6)
+
+#define TW5864_UNDECLARED_H264REV_PART2 0x0008
+
+#define TW5864_SLICE 0x000c
+/* Define controls in register TW5864_SLICE */
+/* VLC Slice end flag */
+#define TW5864_VLC_SLICE_END BIT(0)
+/* Master Slice End Flag */
+#define TW5864_MAS_SLICE_END BIT(4)
+/* Host to start a new slice Address */
+#define TW5864_START_NSLICE BIT(15)
+
+/*
+ * [15:0] Two bit for each channel (channel 0 ~ 7). Each two bits are the buffer
+ * pointer for the last encoded frame of the corresponding channel.
+ */
+#define TW5864_ENC_BUF_PTR_REC1 0x0010
+
+/* [5:0] DSP_MB_QP and [15:10] DSP_LPF_OFFSET */
+#define TW5864_DSP_QP 0x0018
+/* Define controls in register TW5864_DSP_QP */
+/* [5:0] H264 QP Value for codec */
+#define TW5864_DSP_MB_QP 0x003f
+/*
+ * [15:10] H264 LPF_OFFSET Address
+ * (Default 0)
+ */
+#define TW5864_DSP_LPF_OFFSET 0xfc00
+
+#define TW5864_DSP_CODEC 0x001c
+/* Define controls in register TW5864_DSP_CODEC */
+/*
+ * 0: Encode (TW5864 Default)
+ * 1: Decode
+ */
+#define TW5864_DSP_CODEC_MODE BIT(0)
+/*
+ * 0->3 4 VLC data buffer in DDR (1M each)
+ * 0->7 8 VLC data buffer in DDR (512k each)
+ */
+#define TW5864_VLC_BUF_ID (7 << 2)
+/*
+ * 0 4CIF in 1 MB
+ * 1 1CIF in 1 MB
+ */
+#define TW5864_CIF_MAP_MD BIT(6)
+/*
+ * 0 2 falf D1 in 1 MB
+ * 1 1 half D1 in 1 MB
+ */
+#define TW5864_HD1_MAP_MD BIT(7)
+/* VLC Stream valid */
+#define TW5864_VLC_VLD BIT(8)
+/* MV Vector Valid */
+#define TW5864_MV_VECT_VLD BIT(9)
+/* MV Flag Valid */
+#define TW5864_MV_FLAG_VLD BIT(10)
+
+#define TW5864_DSP_SEN 0x0020
+/* Define controls in register TW5864_DSP_SEN */
+/* Org Buffer Base for Luma (default 0) */
+#define TW5864_DSP_SEN_PIC_LU 0x000f
+/* Org Buffer Base for Chroma (default 4) */
+#define TW5864_DSP_SEN_PIC_CHM 0x00f0
+/* Maximum Number of Buffers (default 4) */
+#define TW5864_DSP_SEN_PIC_MAX 0x0700
+/*
+ * Original Frame D1 or HD1 switch
+ * (Default 0)
+ */
+#define TW5864_DSP_SEN_HFULL 0x1000
+
+#define TW5864_DSP_REF_PIC 0x0024
+/* Define controls in register TW5864_DSP_REF_PIC */
+/* Ref Buffer Base for Luma (default 0) */
+#define TW5864_DSP_REF_PIC_LU 0x000f
+/* Ref Buffer Base for Chroma (default 4) */
+#define TW5864_DSP_REF_PIC_CHM 0x00f0
+/* Maximum Number of Buffers (default 4) */
+#define TW5864_DSP_REF_PIC_MAX 0x0700
+
+/* [15:0] SEN_EN_CH[n] SENIF original frame capture enable for each channel */
+#define TW5864_SEN_EN_CH 0x0028
+
+#define TW5864_DSP 0x002c
+/* Define controls in register TW5864_DSP */
+/* The ID for channel selected for encoding operation */
+#define TW5864_DSP_ENC_CHN 0x000f
+/* See DSP_MB_DELAY below */
+#define TW5864_DSP_MB_WAIT 0x0010
+/*
+ * DSP Chroma Switch
+ * 0 DDRB
+ * 1 DDRA
+ */
+#define TW5864_DSP_CHROM_SW 0x0020
+/* VLC Flow Control: 1 for enable */
+#define TW5864_DSP_FLW_CNTL 0x0040
+/*
+ * If DSP_MB_WAIT == 0, MB delay is DSP_MB_DELAY * 16
+ * If DSP_MB_DELAY == 1, MB delay is DSP_MB_DELAY * 128
+ */
+#define TW5864_DSP_MB_DELAY 0x0f00
+
+#define TW5864_DDR 0x0030
+/* Define controls in register TW5864_DDR */
+/* DDR Single Access Page Number */
+#define TW5864_DDR_PAGE_CNTL 0x00ff
+/* DDR-DPR Burst Read Enable */
+#define TW5864_DDR_BRST_EN BIT(13)
+/*
+ * DDR A/B Select as HOST access
+ * 0 Select DDRA
+ * 1 Select DDRB
+ */
+#define TW5864_DDR_AB_SEL BIT(14)
+/*
+ * DDR Access Mode Select
+ * 0 Single R/W Access (Host <-> DDR)
+ * 1 Burst R/W Access (Host <-> DPR)
+ */
+#define TW5864_DDR_MODE BIT(15)
+
+/* The original frame capture pointer. Two bits for each channel */
+/* SENIF_ORG_FRM_PTR [15:0] */
+#define TW5864_SENIF_ORG_FRM_PTR1 0x0038
+/* SENIF_ORG_FRM_PTR [31:16] */
+#define TW5864_SENIF_ORG_FRM_PTR2 0x003c
+
+#define TW5864_DSP_SEN_MODE 0x0040
+/* Define controls in register TW5864_DSP_SEN_MODE */
+#define TW5864_DSP_SEN_MODE_CH0 0x000f
+#define TW5864_DSP_SEN_MODE_CH1 0x00f0
+
+/*
+ * [15:0]: ENC_BUF_PTR_REC[31:16] Two bit for each channel (channel 8 ~ 15).
+ * Each two bits are the buffer pointer for the last encoded frame of a channel
+ */
+#define TW5864_ENC_BUF_PTR_REC2 0x004c
+
+/* Current MV Flag Status Pointer for Channel n. (Read only) */
+/*
+ * [1:0] CH0_MV_PTR, ..., [15:14] CH7_MV_PTR
+ */
+#define TW5864_CH_MV_PTR1 0x0060
+/*
+ * [1:0] CH8_MV_PTR, ..., [15:14] CH15_MV_PTR
+ */
+#define TW5864_CH_MV_PTR2 0x0064
+
+/*
+ * [15:0] Reset Current MV Flag Status Pointer for Channel n (one bit each)
+ */
+#define TW5864_RST_MV_PTR 0x0068
+#define TW5864_INTERLACING 0x0200
+/* Define controls in register TW5864_INTERLACING */
+/*
+ * Inter_Mode Start. 2-nd bit? A guess. Missing in datasheet. Without this bit
+ * set, the output video is interlaced (stripy).
+ */
+#define TW5864_DSP_INTER_ST BIT(1)
+/* Deinterlacer Enable */
+#define TW5864_DI_EN BIT(2)
+/*
+ * De-interlacer Mode
+ * 1 Shuffled frame
+ * 0 Normal Un-Shuffled Frame
+ */
+#define TW5864_DI_MD BIT(3)
+/*
+ * Down scale original frame in X direction
+ * 11: Un-used
+ * 10: down-sample to 1/4
+ * 01: down-sample to 1/2
+ * 00: down-sample disabled
+ */
+#define TW5864_DSP_DWN_X (3 << 4)
+/*
+ * Down scale original frame in Y direction
+ * 11: Un-used
+ * 10: down-sample to 1/4
+ * 01: down-sample to 1/2
+ * 00: down-sample disabled
+ */
+#define TW5864_DSP_DWN_Y (3 << 6)
+/*
+ * 1 Dual Stream
+ * 0 Single Stream
+ */
+#define TW5864_DUAL_STR BIT(8)
+
+#define TW5864_DSP_REF 0x0204
+/* Define controls in register TW5864_DSP_REF */
+/* Number of reference frame (Default 1 for TW5864B) */
+#define TW5864_DSP_REF_FRM 0x000f
+/* Window size */
+#define TW5864_DSP_WIN_SIZE 0x02f0
+
+#define TW5864_DSP_SKIP 0x0208
+/* Define controls in register TW5864_DSP_SKIP */
+/*
+ * Skip Offset Enable bit
+ * 0 DSP_SKIP_OFFSET value is not used (default 8)
+ * 1 DSP_SKIP_OFFSET value is used in HW
+ */
+#define TW5864_DSP_SKIP_OFEN 0x0080
+/* Skip mode cost offset (default 8) */
+#define TW5864_DSP_SKIP_OFFSET 0x007f
+
+#define TW5864_MOTION_SEARCH_ETC 0x020c
+/* Define controls in register TW5864_MOTION_SEARCH_ETC */
+/* Enable quarter pel search mode */
+#define TW5864_QPEL_EN BIT(0)
+/* Enable half pel search mode */
+#define TW5864_HPEL_EN BIT(1)
+/* Enable motion search mode */
+#define TW5864_ME_EN BIT(2)
+/* Enable Intra mode */
+#define TW5864_INTRA_EN BIT(3)
+/* Enable Skip Mode */
+#define TW5864_SKIP_EN BIT(4)
+/* Search Option (Default 2"b01) */
+#define TW5864_SRCH_OPT (3 << 5)
+
+#define TW5864_DSP_ENC_REC 0x0210
+/* Define controls in register TW5864_DSP_ENC_REC */
+/* Reference Buffer Pointer for encoding */
+#define TW5864_DSP_ENC_REF_PTR 0x0007
+/* Reconstruct Buffer pointer */
+#define TW5864_DSP_REC_BUF_PTR 0x7000
+
+/* [15:0] Lambda Value for H264 */
+#define TW5864_DSP_REF_MVP_LAMBDA 0x0214
+
+#define TW5864_DSP_PIC_MAX_MB 0x0218
+/* Define controls in register TW5864_DSP_PIC_MAX_MB */
+/* The MB number in Y direction for a frame */
+#define TW5864_DSP_PIC_MAX_MB_Y 0x007f
+/* The MB number in X direction for a frame */
+#define TW5864_DSP_PIC_MAX_MB_X 0x7f00
+
+/* The original frame pointer for encoding */
+#define TW5864_DSP_ENC_ORG_PTR_REG 0x021c
+/* Mask to use with TW5864_DSP_ENC_ORG_PTR */
+#define TW5864_DSP_ENC_ORG_PTR_MASK 0x7000
+/* Number of bits to shift with TW5864_DSP_ENC_ORG_PTR */
+#define TW5864_DSP_ENC_ORG_PTR_SHIFT 12
+
+/* DDR base address of OSD rectangle attribute data */
+#define TW5864_DSP_OSD_ATTRI_BASE 0x0220
+/* OSD enable bit for each channel */
+#define TW5864_DSP_OSD_ENABLE 0x0228
+
+/* 0x0280 ~ 0x029c – Motion Vector for 1st 4x4 Block, e.g., 80 (X), 84 (Y) */
+#define TW5864_ME_MV_VEC1 0x0280
+/* 0x02a0 ~ 0x02bc – Motion Vector for 2nd 4x4 Block, e.g., A0 (X), A4 (Y) */
+#define TW5864_ME_MV_VEC2 0x02a0
+/* 0x02c0 ~ 0x02dc – Motion Vector for 3rd 4x4 Block, e.g., C0 (X), C4 (Y) */
+#define TW5864_ME_MV_VEC3 0x02c0
+/* 0x02e0 ~ 0x02fc – Motion Vector for 4th 4x4 Block, e.g., E0 (X), E4 (Y) */
+#define TW5864_ME_MV_VEC4 0x02e0
+
+/*
+ * [5:0]
+ * if (intra16x16_cost < (intra4x4_cost+dsp_i4x4_offset))
+ * Intra_mode = intra16x16_mode
+ * Else
+ * Intra_mode = intra4x4_mode
+ */
+#define TW5864_DSP_I4x4_OFFSET 0x040c
+
+/*
+ * [6:4]
+ * 0x5 Only 4x4
+ * 0x6 Only 16x16
+ * 0x7 16x16 & 4x4
+ */
+#define TW5864_DSP_INTRA_MODE 0x0410
+#define TW5864_DSP_INTRA_MODE_SHIFT 4
+#define TW5864_DSP_INTRA_MODE_MASK (7 << 4)
+#define TW5864_DSP_INTRA_MODE_4x4 0x5
+#define TW5864_DSP_INTRA_MODE_16x16 0x6
+#define TW5864_DSP_INTRA_MODE_4x4_AND_16x16 0x7
+/*
+ * [5:0] WEIGHT Factor for I4x4 cost calculation (QP dependent)
+ */
+#define TW5864_DSP_I4x4_WEIGHT 0x0414
+
+/*
+ * [7:0] Offset used to affect Intra/ME model decision
+ * If (me_cost < intra_cost + dsp_resid_mode_offset)
+ * Pred_Mode = me_mode
+ * Else
+ * Pred_mode = intra_mode
+ */
+#define TW5864_DSP_RESID_MODE_OFFSET 0x0604
+
+/* 0x0800 ~ 0x09ff - Quantization TABLE Values */
+#define TW5864_QUAN_TAB 0x0800
+
+/* Valid channel value [0; f], frame value [0; 3] */
+#define TW5864_RT_CNTR_CH_FRM(channel, frame) \
+ (0x0c00 | (channel << 4) | (frame << 2))
+
+#define TW5864_FRAME_BUS1 0x0d00
+/*
+ * 1 Progressive in part A in bus n
+ * 0 Interlaced in part A in bus n
+ */
+#define TW5864_PROG_A BIT(0)
+/*
+ * 1 Progressive in part B in bus n
+ * 0 Interlaced in part B in bus n
+ */
+#define TW5864_PROG_B BIT(1)
+/*
+ * 1 Frame Mode in bus n
+ * 0 Field Mode in bus n
+ */
+#define TW5864_FRAME BIT(2)
+/*
+ * 0 4CIF in bus n
+ * 1 1D1 + 4 CIF in bus n
+ * 2 2D1 in bus n
+ */
+#define TW5864_BUS_D1 (3 << 3)
+/* Bus 1 goes in TW5864_FRAME_BUS1 in [4:0] */
+/* Bus 2 goes in TW5864_FRAME_BUS1 in [12:8] */
+#define TW5864_FRAME_BUS2 0x0d04
+/* Bus 3 goes in TW5864_FRAME_BUS2 in [4:0] */
+/* Bus 4 goes in TW5864_FRAME_BUS2 in [12:8] */
+
+/* [15:0] Horizontal Mirror for channel n */
+#define TW5864_SENIF_HOR_MIR 0x0d08
+/* [15:0] Vertical Mirror for channel n */
+#define TW5864_SENIF_VER_MIR 0x0d0c
+
+/*
+ * FRAME_WIDTH_BUSn_A
+ * 0x15f: 4 CIF
+ * 0x2cf: 1 D1 + 3 CIF
+ * 0x2cf: 2 D1
+ * FRAME_WIDTH_BUSn_B
+ * 0x15f: 4 CIF
+ * 0x2cf: 1 D1 + 3 CIF
+ * 0x2cf: 2 D1
+ * FRAME_HEIGHT_BUSn_A
+ * 0x11f: 4CIF (PAL)
+ * 0x23f: 1D1 + 3CIF (PAL)
+ * 0x23f: 2 D1 (PAL)
+ * 0x0ef: 4CIF (NTSC)
+ * 0x1df: 1D1 + 3CIF (NTSC)
+ * 0x1df: 2 D1 (NTSC)
+ * FRAME_HEIGHT_BUSn_B
+ * 0x11f: 4CIF (PAL)
+ * 0x23f: 1D1 + 3CIF (PAL)
+ * 0x23f: 2 D1 (PAL)
+ * 0x0ef: 4CIF (NTSC)
+ * 0x1df: 1D1 + 3CIF (NTSC)
+ * 0x1df: 2 D1 (NTSC)
+ */
+#define TW5864_FRAME_WIDTH_BUS_A(bus) (0x0d10 + 0x0010 * bus)
+#define TW5864_FRAME_WIDTH_BUS_B(bus) (0x0d14 + 0x0010 * bus)
+#define TW5864_FRAME_HEIGHT_BUS_A(bus) (0x0d18 + 0x0010 * bus)
+#define TW5864_FRAME_HEIGHT_BUS_B(bus) (0x0d1c + 0x0010 * bus)
+
+/*
+ * 1: the bus mapped Channel n Full D1
+ * 0: the bus mapped Channel n Half D1
+ */
+#define TW5864_FULL_HALF_FLAG 0x0d50
+
+/*
+ * 0 The bus mapped Channel select partA Mode
+ * 1 The bus mapped Channel select partB Mode
+ */
+#define TW5864_FULL_HALF_MODE_SEL 0x0d54
+
+#define TW5864_VLC 0x1000
+/* Define controls in register TW5864_VLC */
+/* QP Value used by H264 CAVLC */
+#define TW5864_VLC_SLICE_QP 0x003f
+/*
+ * Swap byte order of VLC stream in d-word.
+ * 1 Normal (VLC output= [31:0])
+ * 0 Swap (VLC output={[23:16],[31:24],[7:0], [15:8]})
+ */
+#define TW5864_VLC_BYTE_SWP BIT(6)
+/* Enable Adding 03 circuit for VLC stream */
+#define TW5864_VLC_ADD03_EN BIT(7)
+/* Number of bit for VLC bit Align */
+#define TW5864_VLC_BIT_ALIGN_SHIFT 8
+#define TW5864_VLC_BIT_ALIGN_MASK (0x1f << 8)
+/*
+ * Synchronous Interface select for VLC Stream
+ * 1 CDC_VLCS_MAS read VLC stream
+ * 0 CPU read VLC stream
+ */
+#define TW5864_VLC_INF_SEL BIT(13)
+/* Enable VLC overflow control */
+#define TW5864_VLC_OVFL_CNTL BIT(14)
+/*
+ * 1 PCI Master Mode
+ * 0 Non PCI Master Mode
+ */
+#define TW5864_VLC_PCI_SEL BIT(15)
+/*
+ * 0 Enable Adding 03 to VLC header and stream
+ * 1 Disable Adding 03 to VLC header of "00000001"
+ */
+#define TW5864_VLC_A03_DISAB BIT(16)
+/*
+ * Status of VLC stream in DDR (one bit for each buffer)
+ * 1 VLC is ready in buffer n (HW set)
+ * 0 VLC is not ready in buffer n (SW clear)
+ */
+#define TW5864_VLC_BUF_RDY_SHIFT 24
+#define TW5864_VLC_BUF_RDY_MASK (0xff << 24)
+
+/* Total number of bit in the slice */
+#define TW5864_SLICE_TOTAL_BIT 0x1004
+/* Total number of bit in the residue */
+#define TW5864_RES_TOTAL_BIT 0x1008
+
+#define TW5864_VLC_BUF 0x100c
+/* Define controls in register TW5864_VLC_BUF */
+/* VLC BK0 full status, write ‘1’ to clear */
+#define TW5864_VLC_BK0_FULL BIT(0)
+/* VLC BK1 full status, write ‘1’ to clear */
+#define TW5864_VLC_BK1_FULL BIT(1)
+/* VLC end slice status, write ‘1’ to clear */
+#define TW5864_VLC_END_SLICE BIT(2)
+/* VLC Buffer overflow status, write ‘1’ to clear */
+#define TW5864_DSP_RD_OF BIT(3)
+/* VLC string length in either buffer 0 or 1 at end of frame */
+#define TW5864_VLC_STREAM_LEN_SHIFT 4
+#define TW5864_VLC_STREAM_LEN_MASK (0x1ff << 4)
+
+/* [15:0] Total coefficient number in a frame */
+#define TW5864_TOTAL_COEF_NO 0x1010
+/* [0] VLC Encoder Interrupt. Write ‘1’ to clear */
+#define TW5864_VLC_DSP_INTR 0x1014
+/* [31:0] VLC stream CRC checksum */
+#define TW5864_VLC_STREAM_CRC 0x1018
+
+#define TW5864_VLC_RD 0x101c
+/* Define controls in register TW5864_VLC_RD */
+/*
+ * 1 Read VLC lookup Memory
+ * 0 Read VLC Stream Memory
+ */
+#define TW5864_VLC_RD_MEM BIT(0)
+/*
+ * 1 Read VLC Stream Memory in burst mode
+ * 0 Read VLC Stream Memory in single mode
+ */
+#define TW5864_VLC_RD_BRST BIT(1)
+
+/* 0x2000 ~ 0x2ffc -- H264 Stream Memory Map */
+/*
+ * A word is 4 bytes. I.e.,
+ * VLC_STREAM_MEM[0] address: 0x2000
+ * VLC_STREAM_MEM[1] address: 0x2004
+ * ...
+ * VLC_STREAM_MEM[3FF] address: 0x2ffc
+ */
+#define TW5864_VLC_STREAM_MEM_START 0x2000
+#define TW5864_VLC_STREAM_MEM_MAX_OFFSET 0x3ff
+#define TW5864_VLC_STREAM_MEM(offset) (TW5864_VLC_STREAM_MEM_START + 4 * offset)
+
+/* 0x4000 ~ 0x4ffc -- Audio Register Map */
+/* [31:0] config 1ms cnt = Realtime clk/1000 */
+#define TW5864_CFG_1MS_CNT 0x4000
+
+#define TW5864_ADPCM 0x4004
+/* Define controls in register TW5864_ADPCM */
+/* ADPCM decoder enable */
+#define TW5864_ADPCM_DEC BIT(0)
+/* ADPCM input data enable */
+#define TW5864_ADPCM_IN_DATA BIT(1)
+/* ADPCM encoder enable */
+#define TW5864_ADPCM_ENC BIT(2)
+
+#define TW5864_AUD 0x4008
+/* Define controls in register TW5864_AUD */
+/* Record path PCM Audio enable bit for each channel */
+#define TW5864_AUD_ORG_CH_EN 0x00ff
+/* Speaker path PCM Audio Enable */
+#define TW5864_SPK_ORG_EN BIT(16)
+/*
+ * 0 16bit
+ * 1 8bit
+ */
+#define TW5864_AD_BIT_MODE BIT(17)
+#define TW5864_AUD_TYPE_SHIFT 18
+/*
+ * 0 PCM
+ * 3 ADPCM
+ */
+#define TW5864_AUD_TYPE (0xf << 18)
+#define TW5864_AUD_SAMPLE_RATE_SHIFT 22
+/*
+ * 0 8K
+ * 1 16K
+ */
+#define TW5864_AUD_SAMPLE_RATE (3 << 22)
+/* Channel ID used to select audio channel (0 to 16) for loopback */
+#define TW5864_TESTLOOP_CHID_SHIFT 24
+#define TW5864_TESTLOOP_CHID (0x1f << 24)
+/* Enable AD Loopback Test */
+#define TW5864_TEST_ADLOOP_EN BIT(30)
+/*
+ * 0 Asynchronous Mode or PCI target mode
+ * 1 PCI Initiator Mode
+ */
+#define TW5864_AUD_MODE BIT(31)
+
+#define TW5864_AUD_ADPCM 0x400c
+/* Define controls in register TW5864_AUD_ADPCM */
+/* Record path ADPCM audio channel enable, one bit for each */
+#define TW5864_AUD_ADPCM_CH_EN 0x00ff
+/* Speaker path ADPCM audio channel enable */
+#define TW5864_SPK_ADPCM_EN BIT(16)
+
+#define TW5864_PC_BLOCK_ADPCM_RD_NO 0x4018
+#define TW5864_PC_BLOCK_ADPCM_RD_NO_MASK 0x1f
+
+/*
+ * For ADPCM_ENC_WR_PTR, ADPCM_ENC_RD_PTR (see below):
+ * Bit[2:0] ch0
+ * Bit[5:3] ch1
+ * Bit[8:6] ch2
+ * Bit[11:9] ch3
+ * Bit[14:12] ch4
+ * Bit[17:15] ch5
+ * Bit[20:18] ch6
+ * Bit[23:21] ch7
+ * Bit[26:24] ch8
+ * Bit[29:27] ch9
+ * Bit[32:30] ch10
+ * Bit[35:33] ch11
+ * Bit[38:36] ch12
+ * Bit[41:39] ch13
+ * Bit[44:42] ch14
+ * Bit[47:45] ch15
+ * Bit[50:48] ch16
+ */
+#define TW5864_ADPCM_ENC_XX_MASK 0x3fff
+#define TW5864_ADPCM_ENC_XX_PTR2_SHIFT 30
+/* ADPCM_ENC_WR_PTR[29:0] */
+#define TW5864_ADPCM_ENC_WR_PTR1 0x401c
+/* ADPCM_ENC_WR_PTR[50:30] */
+#define TW5864_ADPCM_ENC_WR_PTR2 0x4020
+
+/* ADPCM_ENC_RD_PTR[29:0] */
+#define TW5864_ADPCM_ENC_RD_PTR1 0x4024
+/* ADPCM_ENC_RD_PTR[50:30] */
+#define TW5864_ADPCM_ENC_RD_PTR2 0x4028
+
+/* [3:0] rd ch0, [7:4] rd ch1, [11:8] wr ch0, [15:12] wr ch1 */
+#define TW5864_ADPCM_DEC_RD_WR_PTR 0x402c
+
+/*
+ * For TW5864_AD_ORIG_WR_PTR, TW5864_AD_ORIG_RD_PTR:
+ * Bit[3:0] ch0
+ * Bit[7:4] ch1
+ * Bit[11:8] ch2
+ * Bit[15:12] ch3
+ * Bit[19:16] ch4
+ * Bit[23:20] ch5
+ * Bit[27:24] ch6
+ * Bit[31:28] ch7
+ * Bit[35:32] ch8
+ * Bit[39:36] ch9
+ * Bit[43:40] ch10
+ * Bit[47:44] ch11
+ * Bit[51:48] ch12
+ * Bit[55:52] ch13
+ * Bit[59:56] ch14
+ * Bit[63:60] ch15
+ * Bit[67:64] ch16
+ */
+/* AD_ORIG_WR_PTR[31:0] */
+#define TW5864_AD_ORIG_WR_PTR1 0x4030
+/* AD_ORIG_WR_PTR[63:32] */
+#define TW5864_AD_ORIG_WR_PTR2 0x4034
+/* AD_ORIG_WR_PTR[67:64] */
+#define TW5864_AD_ORIG_WR_PTR3 0x4038
+
+/* AD_ORIG_RD_PTR[31:0] */
+#define TW5864_AD_ORIG_RD_PTR1 0x403c
+/* AD_ORIG_RD_PTR[63:32] */
+#define TW5864_AD_ORIG_RD_PTR2 0x4040
+/* AD_ORIG_RD_PTR[67:64] */
+#define TW5864_AD_ORIG_RD_PTR3 0x4044
+
+#define TW5864_PC_BLOCK_ORIG_RD_NO 0x4048
+#define TW5864_PC_BLOCK_ORIG_RD_NO_MASK 0x1f
+
+#define TW5864_PCI_AUD 0x404c
+/* Define controls in register TW5864_PCI_AUD */
+/*
+ * The register is applicable to PCI initiator mode only. Used to select PCM(0)
+ * or ADPCM(1) audio data sent to PC. One bit for each channel
+ */
+#define TW5864_PCI_DATA_SEL 0xffff
+/*
+ * Audio flow control mode selection bit.
+ * 0 Flow control disabled. TW5864 continuously sends audio frame to PC
+ * (initiator mode)
+ * 1 Flow control enabled
+ */
+#define TW5864_PCI_FLOW_EN BIT(16)
+/*
+ * When PCI_FLOW_EN is set, PCI need to toggle this bit to send an audio frame
+ * to PC. One toggle to send one frame.
+ */
+#define TW5864_PCI_AUD_FRM_EN BIT(17)
+
+/* [1:0] CS valid to data valid CLK cycles when writing operation */
+#define TW5864_CS2DAT_CNT 0x8000
+/* [2:0] Data valid signal width by system clock cycles */
+#define TW5864_DATA_VLD_WIDTH 0x8004
+
+#define TW5864_SYNC 0x8008
+/* Define controls in register TW5864_SYNC */
+/*
+ * 0 vlc stream to syncrous port
+ * 1 vlc stream to ddr buffers
+ */
+#define TW5864_SYNC_CFG BIT(7)
+/*
+ * 0 SYNC Address sampled on Rising edge
+ * 1 SYNC Address sampled on Falling edge
+ */
+#define TW5864_SYNC_ADR_EDGE BIT(0)
+#define TW5864_VLC_STR_DELAY_SHIFT 1
+/*
+ * 0 No system delay
+ * 1 One system clock delay
+ * 2 Two system clock delay
+ * 3 Three system clock delay
+ */
+#define TW5864_VLC_STR_DELAY (3 << 1)
+/*
+ * 0 Rising edge output
+ * 1 Falling edge output
+ */
+#define TW5864_VLC_OUT_EDGE BIT(3)
+
+/*
+ * [1:0]
+ * 2’b00 phase set to 180 degree
+ * 2’b01 phase set to 270 degree
+ * 2’b10 phase set to 0 degree
+ * 2’b11 phase set to 90 degree
+ */
+#define TW5864_I2C_PHASE_CFG 0x800c
+
+/*
+ * The system / DDR clock (166 MHz) is generated with an on-chip system clock
+ * PLL (SYSPLL) using input crystal clock of 27 MHz. The system clock PLL
+ * frequency is controlled with the following equation.
+ * CLK_OUT = CLK_IN * (M+1) / ((N+1) * P)
+ * SYSPLL_M M parameter
+ * SYSPLL_N N parameter
+ * SYSPLL_P P parameter
+ */
+/* SYSPLL_M[7:0] */
+#define TW5864_SYSPLL1 0x8018
+/* Define controls in register TW5864_SYSPLL1 */
+#define TW5864_SYSPLL_M_LOW 0x00ff
+
+/* [2:0]: SYSPLL_M[10:8], [7:3]: SYSPLL_N[4:0] */
+#define TW5864_SYSPLL2 0x8019
+/* Define controls in register TW5864_SYSPLL2 */
+#define TW5864_SYSPLL_M_HI 0x07
+#define TW5864_SYSPLL_N_LOW_SHIFT 3
+#define TW5864_SYSPLL_N_LOW (0x1f << 3)
+
+/*
+ * [1:0]: SYSPLL_N[6:5], [3:2]: SYSPLL_P, [4]: SYSPLL_IREF, [7:5]: SYSPLL_CP_SEL
+ */
+#define TW5864_SYSPLL3 0x8020
+/* Define controls in register TW5864_SYSPLL3 */
+#define TW5864_SYSPLL_N_HI 0x03
+#define TW5864_SYSPLL_P_SHIFT 2
+#define TW5864_SYSPLL_P (0x03 << 2)
+/*
+ * SYSPLL bias current control
+ * 0 Lower current (default)
+ * 1 30% higher current
+ */
+#define TW5864_SYSPLL_IREF BIT(4)
+/*
+ * SYSPLL charge pump current selection
+ * 0 1,5 uA
+ * 1 4 uA
+ * 2 9 uA
+ * 3 19 uA
+ * 4 39 uA
+ * 5 79 uA
+ * 6 159 uA
+ * 7 319 uA
+ */
+#define TW5864_SYSPLL_CP_SEL_SHIFT 5
+#define TW5864_SYSPLL_CP_SEL (0x07 << 5)
+
+/*
+ * [1:0]: SYSPLL_VCO, [3:2]: SYSPLL_LP_X8, [5:4]: SYSPLL_ICP_SEL,
+ * [6]: SYSPLL_LPF_5PF, [7]: SYSPLL_ED_SEL
+ */
+#define TW5864_SYSPLL4 0x8021
+/* Define controls in register TW5864_SYSPLL4 */
+/*
+ * SYSPLL_VCO VCO Range selection
+ * 00 5 ~ 75 MHz
+ * 01 50 ~ 140 MHz
+ * 10 110 ~ 320 MHz
+ * 11 270 ~ 700 MHz
+ */
+#define TW5864_SYSPLL_VCO 0x03
+#define TW5864_SYSPLL_LP_X8_SHIFT 2
+/*
+ * Loop resister
+ * 0 38.5K ohms
+ * 1 6.6K ohms (default)
+ * 2 2.2K ohms
+ * 3 1.1K ohms
+ */
+#define TW5864_SYSPLL_LP_X8 (0x03 << 2)
+#define TW5864_SYSPLL_ICP_SEL_SHIFT 4
+/*
+ * PLL charge pump fine tune
+ * 00 x1 (default)
+ * 01 x1/2
+ * 10 x1/7
+ * 11 x1/8
+ */
+#define TW5864_SYSPLL_ICP_SEL (0x03 << 4)
+/*
+ * PLL low pass filter phase margin adjustment
+ * 0 no 5pF (default)
+ * 1 5pF added
+ */
+#define TW5864_SYSPLL_LPF_5PF BIT(6)
+/*
+ * PFD select edge for detection
+ * 0 Falling edge (default)
+ * 1 Rising edge
+ */
+#define TW5864_SYSPLL_ED_SEL BIT(7)
+
+/* [0]: SYSPLL_RST, [4]: SYSPLL_PD */
+#define TW5864_SYSPLL5 0x8024
+/* Define controls in register TW5864_SYSPLL5 */
+/* Reset SYSPLL */
+#define TW5864_SYSPLL_RST BIT(0)
+/* Power down SYSPLL */
+#define TW5864_SYSPLL_PD BIT(4)
+
+#define TW5864_PLL_CFG 0x801c
+/* Define controls in register TW5864_PLL_CFG */
+/*
+ * Issue Soft Reset from Async Host Interface / PCI Interface clock domain.
+ * Become valid after sync to the xtal clock domain. This bit is set only if
+ * LOAD register bit is also set to 1.
+ */
+#define TW5864_SRST BIT(0)
+/*
+ * Issue SYSPLL (166 MHz) configuration latch from Async host interface / PCI
+ * Interface clock domain. The configuration setting becomes effective only if
+ * LOAD register bit is also set to 1.
+ */
+#define TW5864_SYSPLL_CFG BIT(2)
+/*
+ * Issue SPLL (108 MHz) configuration load from Async host interface / PCI
+ * Interface clock domain. The configuration setting becomes effective only if
+ * the LOAD register bit is also set to 1.
+ */
+#define TW5864_SPLL_CFG BIT(4)
+/*
+ * Set this bit to latch the SRST, SYSPLL_CFG, SPLL_CFG setting into the xtal
+ * clock domain to restart the PLL. This bit is self cleared.
+ */
+#define TW5864_LOAD BIT(3)
+
+/* SPLL_IREF, SPLL_LPX4, SPLL_CPX4, SPLL_PD, SPLL_DBG */
+#define TW5864_SPLL 0x8028
+
+/* 0x8800 ~ 0x88fc -- Interrupt Register Map */
+/*
+ * Trigger mode of interrupt source 0 ~ 15
+ * 1 Edge trigger mode
+ * 0 Level trigger mode
+ */
+#define TW5864_TRIGGER_MODE_L 0x8800
+/* Trigger mode of interrupt source 16 ~ 31 */
+#define TW5864_TRIGGER_MODE_H 0x8804
+/* Enable of interrupt source 0 ~ 15 */
+#define TW5864_INTR_ENABLE_L 0x8808
+/* Enable of interrupt source 16 ~ 31 */
+#define TW5864_INTR_ENABLE_H 0x880c
+/* Clear interrupt command of interrupt source 0 ~ 15 */
+#define TW5864_INTR_CLR_L 0x8810
+/* Clear interrupt command of interrupt source 16 ~ 31 */
+#define TW5864_INTR_CLR_H 0x8814
+/*
+ * Assertion of interrupt source 0 ~ 15
+ * 1 High level or pos-edge is assertion
+ * 0 Low level or neg-edge is assertion
+ */
+#define TW5864_INTR_ASSERT_L 0x8818
+/* Assertion of interrupt source 16 ~ 31 */
+#define TW5864_INTR_ASSERT_H 0x881c
+/*
+ * Output level of interrupt
+ * 1 Interrupt output is high assertion
+ * 0 Interrupt output is low assertion
+ */
+#define TW5864_INTR_OUT_LEVEL 0x8820
+/*
+ * Status of interrupt source 0 ~ 15
+ * Bit[0]: VLC 4k RAM interrupt
+ * Bit[1]: BURST DDR RAM interrupt
+ * Bit[2]: MV DSP interrupt
+ * Bit[3]: video lost interrupt
+ * Bit[4]: gpio 0 interrupt
+ * Bit[5]: gpio 1 interrupt
+ * Bit[6]: gpio 2 interrupt
+ * Bit[7]: gpio 3 interrupt
+ * Bit[8]: gpio 4 interrupt
+ * Bit[9]: gpio 5 interrupt
+ * Bit[10]: gpio 6 interrupt
+ * Bit[11]: gpio 7 interrupt
+ * Bit[12]: JPEG interrupt
+ * Bit[13:15]: Reserved
+ */
+#define TW5864_INTR_STATUS_L 0x8838
+/*
+ * Status of interrupt source 16 ~ 31
+ * Bit[0]: Reserved
+ * Bit[1]: VLC done interrupt
+ * Bit[2]: Reserved
+ * Bit[3]: AD Vsync interrupt
+ * Bit[4]: Preview eof interrupt
+ * Bit[5]: Preview overflow interrupt
+ * Bit[6]: Timer interrupt
+ * Bit[7]: Reserved
+ * Bit[8]: Audio eof interrupt
+ * Bit[9]: I2C done interrupt
+ * Bit[10]: AD interrupt
+ * Bit[11:15]: Reserved
+ */
+#define TW5864_INTR_STATUS_H 0x883c
+
+/* Defines of interrupt bits, united for both low and high word registers */
+#define TW5864_INTR_VLC_RAM BIT(0)
+#define TW5864_INTR_BURST BIT(1)
+#define TW5864_INTR_MV_DSP BIT(2)
+#define TW5864_INTR_VIN_LOST BIT(3)
+/* n belongs to [0; 7] */
+#define TW5864_INTR_GPIO(n) (1 << (4 + n))
+#define TW5864_INTR_JPEG BIT(12)
+#define TW5864_INTR_VLC_DONE BIT(17)
+#define TW5864_INTR_AD_VSYNC BIT(19)
+#define TW5864_INTR_PV_EOF BIT(20)
+#define TW5864_INTR_PV_OVERFLOW BIT(21)
+#define TW5864_INTR_TIMER BIT(22)
+#define TW5864_INTR_AUD_EOF BIT(24)
+#define TW5864_INTR_I2C_DONE BIT(25)
+#define TW5864_INTR_AD BIT(26)
+
+/* 0x9000 ~ 0x920c -- Video Capture (VIF) Register Map */
+/*
+ * H264EN_CH_STATUS[n] Status of Vsync synchronized H264EN_CH_EN (Read Only)
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_H264EN_CH_STATUS 0x9000
+/*
+ * [15:0] H264EN_CH_EN[n] H264 Encoding Path Enable for channel
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_H264EN_CH_EN 0x9004
+/*
+ * H264EN_CH_DNS[n] H264 Encoding Path Downscale Video Decoder Input for
+ * channel n
+ * 1 Downscale Y to 1/2
+ * 0 Does not downscale
+ */
+#define TW5864_H264EN_CH_DNS 0x9008
+/*
+ * H264EN_CH_PROG[n] H264 Encoding Path channel n is progressive
+ * 1 Progressive (Not valid for TW5864)
+ * 0 Interlaced (TW5864 default)
+ */
+#define TW5864_H264EN_CH_PROG 0x900c
+/*
+ * [3:0] H264EN_BUS_MAX_CH[n]
+ * H264 Encoding Path maximum number of channel on BUS n
+ * 0 Max 4 channels
+ * 1 Max 2 channels
+ */
+#define TW5864_H264EN_BUS_MAX_CH 0x9010
+
+/*
+ * H264EN_RATE_MAX_LINE_n H264 Encoding path Rate Mapping Maximum Line Number
+ * on Bus n
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_EVEN 0x1f
+#define TW5864_H264EN_RATE_MAX_LINE_ODD_SHIFT 5
+#define TW5864_H264EN_RATE_MAX_LINE_ODD (0x1f << 5)
+/*
+ * [4:0] H264EN_RATE_MAX_LINE_0
+ * [9:5] H264EN_RATE_MAX_LINE_1
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_REG1 0x9014
+/*
+ * [4:0] H264EN_RATE_MAX_LINE_2
+ * [9:5] H264EN_RATE_MAX_LINE_3
+ */
+#define TW5864_H264EN_RATE_MAX_LINE_REG2 0x9018
+
+/*
+ * H264EN_CHn_FMT H264 Encoding Path Format configuration of Channel n
+ * 00 D1 (For D1 and hD1 frame)
+ * 01 (Reserved)
+ * 10 (Reserved)
+ * 11 D1 with 1/2 size in X (for CIF frame)
+ * Note: To be used with 0x9008 register to configure the frame size
+ */
+/*
+ * [1:0]: H264EN_CH0_FMT,
+ * ..., [15:14]: H264EN_CH7_FMT
+ */
+#define TW5864_H264EN_CH_FMT_REG1 0x9020
+/*
+ * [1:0]: H264EN_CH8_FMT (?),
+ * ..., [15:14]: H264EN_CH15_FMT (?)
+ */
+#define TW5864_H264EN_CH_FMT_REG2 0x9024
+
+/*
+ * H264EN_RATE_CNTL_BUSm_CHn H264 Encoding Path BUS m Rate Control for Channel n
+ */
+#define TW5864_H264EN_RATE_CNTL_LO_WORD(bus, channel) \
+ (0x9100 + bus * 0x20 + channel * 0x08)
+#define TW5864_H264EN_RATE_CNTL_HI_WORD(bus, channel) \
+ (0x9104 + bus * 0x20 + channel * 0x08)
+
+/*
+ * H264EN_BUSm_MAP_CHn The 16-to-1 MUX configuration register for each encoding
+ * channel (total of 16 channels). Four bits for each channel.
+ */
+#define TW5864_H264EN_BUS0_MAP 0x9200
+#define TW5864_H264EN_BUS1_MAP 0x9204
+#define TW5864_H264EN_BUS2_MAP 0x9208
+#define TW5864_H264EN_BUS3_MAP 0x920c
+
+/* This register is not defined in datasheet, but used in reference driver */
+#define TW5864_UNDECLARED_ERROR_FLAGS_0x9218 0x9218
+
+#define TW5864_GPIO1 0x9800
+#define TW5864_GPIO2 0x9804
+/* Define controls in registers TW5864_GPIO1, TW5864_GPIO2 */
+/* GPIO DATA of Group n */
+#define TW5864_GPIO_DATA 0x00ff
+#define TW5864_GPIO_OEN_SHIFT 8
+/* GPIO Output Enable of Group n */
+#define TW5864_GPIO_OEN (0xff << 8)
+
+/* 0xa000 ~ 0xa8ff – DDR Controller Register Map */
+/* DDR Controller A */
+/*
+ * [2:0] Data valid counter after read command to DDR. This is the delay value
+ * to show how many cycles the data will be back from DDR after we issue a read
+ * command.
+ */
+#define TW5864_RD_ACK_VLD_MUX 0xa000
+
+#define TW5864_DDR_PERIODS 0xa004
+/* Define controls in register TW5864_DDR_PERIODS */
+/*
+ * Tras value, the minimum cycle of active to precharge command period,
+ * default is 7
+ */
+#define TW5864_TRAS_CNT_MAX 0x000f
+/*
+ * Trfc value, the minimum cycle of refresh to active or refresh command period,
+ * default is 4"hf
+ */
+#define TW5864_RFC_CNT_MAX_SHIFT 8
+#define TW5864_RFC_CNT_MAX (0x0f << 8)
+/*
+ * Trcd value, the minimum cycle of active to internal read/write command
+ * period, default is 4"h2
+ */
+#define TW5864_TCD_CNT_MAX_SHIFT 4
+#define TW5864_TCD_CNT_MAX (0x0f << 4)
+/* Twr value, write recovery time, default is 4"h3 */
+#define TW5864_TWR_CNT_MAX_SHIFT 12
+#define TW5864_TWR_CNT_MAX (0x0f << 12)
+
+/*
+ * [2:0] CAS latency, the delay cycle between internal read command and the
+ * availability of the first bit of output data, default is 3
+ */
+#define TW5864_CAS_LATENCY 0xa008
+/*
+ * [15:0] Maximum average periodic refresh, the value is based on the current
+ * frequency to match 7.8mcs
+ */
+#define TW5864_DDR_REF_CNTR_MAX 0xa00c
+/*
+ * DDR_ON_CHIP_MAP [1:0]
+ * 0 256M DDR on board
+ * 1 512M DDR on board
+ * 2 1G DDR on board
+ * DDR_ON_CHIP_MAP [2]
+ * 0 Only one DDR chip
+ * 1 Two DDR chips
+ */
+#define TW5864_DDR_ON_CHIP_MAP 0xa01c
+#define TW5864_DDR_SELFTEST_MODE 0xa020
+/* Define controls in register TW5864_DDR_SELFTEST_MODE */
+/*
+ * 0 Common read/write mode
+ * 1 DDR self-test mode
+ */
+#define TW5864_MASTER_MODE BIT(0)
+/*
+ * 0 DDR self-test single read/write
+ * 1 DDR self-test burst read/write
+ */
+#define TW5864_SINGLE_PROC BIT(1)
+/*
+ * 0 DDR self-test write command
+ * 1 DDR self-test read command
+ */
+#define TW5864_WRITE_FLAG BIT(2)
+#define TW5864_DATA_MODE_SHIFT 4
+/*
+ * 0 write 32'haaaa5555 to DDR
+ * 1 write 32'hffffffff to DDR
+ * 2 write 32'hha5a55a5a to DDR
+ * 3 write increasing data to DDR
+ */
+#define TW5864_DATA_MODE (0x3 << 4)
+
+/* [7:0] The maximum data of one burst in DDR self-test mode */
+#define TW5864_BURST_CNTR_MAX 0xa024
+/* [15:0] The maximum burst counter (bit 15~0) in DDR self-test mode */
+#define TW5864_DDR_PROC_CNTR_MAX_L 0xa028
+/* The maximum burst counter (bit 31~16) in DDR self-test mode */
+#define TW5864_DDR_PROC_CNTR_MAX_H 0xa02c
+/* [0]: Start one DDR self-test */
+#define TW5864_DDR_SELF_TEST_CMD 0xa030
+/* The maximum error counter (bit 15 ~ 0) in DDR self-test */
+#define TW5864_ERR_CNTR_L 0xa034
+
+#define TW5864_ERR_CNTR_H_AND_FLAG 0xa038
+/* Define controls in register TW5864_ERR_CNTR_H_AND_FLAG */
+/* The maximum error counter (bit 30 ~ 16) in DDR self-test */
+#define TW5864_ERR_CNTR_H_MASK 0x3fff
+/* DDR self-test end flag */
+#define TW5864_END_FLAG 0x8000
+
+/*
+ * DDR Controller B: same as 0xa000 ~ 0xa038, but add TW5864_DDR_B_OFFSET to all
+ * addresses
+ */
+#define TW5864_DDR_B_OFFSET 0x0800
+
+/* 0xb004 ~ 0xb018 – HW version/ARB12 Register Map */
+/* [15:0] Default is C013 */
+#define TW5864_HW_VERSION 0xb004
+
+#define TW5864_REQS_ENABLE 0xb010
+/* Define controls in register TW5864_REQS_ENABLE */
+/* Audio data in to DDR enable (default 1) */
+#define TW5864_AUD_DATA_IN_ENB BIT(0)
+/* Audio encode request to DDR enable (default 1) */
+#define TW5864_AUD_ENC_REQ_ENB BIT(1)
+/* Audio decode request0 to DDR enable (default 1) */
+#define TW5864_AUD_DEC_REQ0_ENB BIT(2)
+/* Audio decode request1 to DDR enable (default 1) */
+#define TW5864_AUD_DEC_REQ1_ENB BIT(3)
+/* VLC stream request to DDR enable (default 1) */
+#define TW5864_VLC_STRM_REQ_ENB BIT(4)
+/* H264 MV request to DDR enable (default 1) */
+#define TW5864_DVM_MV_REQ_ENB BIT(5)
+/* mux_core MVD request to DDR enable (default 1) */
+#define TW5864_MVD_REQ_ENB BIT(6)
+/* mux_core MVD temp data request to DDR enable (default 1) */
+#define TW5864_MVD_TMP_REQ_ENB BIT(7)
+/* JPEG request to DDR enable (default 1) */
+#define TW5864_JPEG_REQ_ENB BIT(8)
+/* mv_flag request to DDR enable (default 1) */
+#define TW5864_MV_FLAG_REQ_ENB BIT(9)
+
+#define TW5864_ARB12 0xb018
+/* Define controls in register TW5864_ARB12 */
+/* ARB12 Enable (default 1) */
+#define TW5864_ARB12_ENB BIT(15)
+/* ARB12 maximum value of time out counter (default 15"h1FF) */
+#define TW5864_ARB12_TIME_OUT_CNT 0x7fff
+
+/* 0xb800 ~ 0xb80c -- Indirect Access Register Map */
+/*
+ * Spec says:
+ * In order to access the indirect register space, the following procedure is
+ * followed.
+ * But reference driver implementation, and current driver, too, does it
+ * differently.
+ *
+ * Write Registers:
+ * (1) Write IND_DATA at 0xb804 ~ 0xb807
+ * (2) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (3) Write IND_ADDR at 0xb800 ~ 0xb801. Set R/W to "1", ENABLE to "1"
+ * Read Registers:
+ * (1) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (2) Write IND_ADDR at 0xb800 ~ 0xb801. Set R/W to "0", ENABLE to "1"
+ * (3) Read BUSY flag from 0xb803. Wait until BUSY signal is 0.
+ * (4) Read IND_DATA from 0xb804 ~ 0xb807
+ */
+#define TW5864_IND_CTL 0xb800
+/* Define controls in register TW5864_IND_CTL */
+/* Address used to access indirect register space */
+#define TW5864_IND_ADDR 0x0000ffff
+/* Wait until this bit is "0" before using indirect access */
+#define TW5864_BUSY BIT(31)
+/* Activate the indirect access. This bit is self cleared */
+#define TW5864_ENABLE BIT(25)
+/* Read/Write command */
+#define TW5864_RW BIT(24)
+
+/* [31:0] Data used to read/write indirect register space */
+#define TW5864_IND_DATA 0xb804
+
+/* 0xc000 ~ 0xc7fc -- Preview Register Map */
+/* Mostly skipped this section. */
+/*
+ * [15:0] Status of Vsync Synchronized PCI_PV_CH_EN (Read Only)
+ * 1 Channel Enabled
+ * 0 Channel Disabled
+ */
+#define TW5864_PCI_PV_CH_STATUS 0xc000
+/*
+ * [15:0] PCI Preview Path Enable for channel n
+ * 1 Channel Enable
+ * 0 Channel Disable
+ */
+#define TW5864_PCI_PV_CH_EN 0xc004
+
+/* 0xc800 ~ 0xc804 -- JPEG Capture Register Map */
+/* Skipped. */
+/* 0xd000 ~ 0xd0fc -- JPEG Control Register Map */
+/* Skipped. */
+
+/* 0xe000 ~ 0xfc04 – Motion Vector Register Map */
+
+/* ME Motion Vector data (Four Byte Each) 0xe000 ~ 0xe7fc */
+#define TW5864_ME_MV_VEC_START 0xe000
+#define TW5864_ME_MV_VEC_MAX_OFFSET 0x1ff
+#define TW5864_ME_MV_VEC(offset) (TW5864_ME_MV_VEC_START + 4 * offset)
+
+#define TW5864_MV 0xfc00
+/* Define controls in register TW5864_MV */
+/* mv bank0 full status , write "1" to clear */
+#define TW5864_MV_BK0_FULL BIT(0)
+/* mv bank1 full status , write "1" to clear */
+#define TW5864_MV_BK1_FULL BIT(1)
+/* slice end status; write "1" to clear */
+#define TW5864_MV_EOF BIT(2)
+/* mv encode interrupt status; write "1" to clear */
+#define TW5864_MV_DSP_INTR BIT(3)
+/* mv write memory overflow, write "1" to clear */
+#define TW5864_DSP_WR_OF BIT(4)
+#define TW5864_MV_LEN_SHIFT 5
+/* mv stream length */
+#define TW5864_MV_LEN (0xff << 5)
+/* The configured status bit written into bit 15 of 0xfc04 */
+#define TW5864_MPI_DDR_SEL BIT(13)
+
+#define TW5864_MPI_DDR_SEL_REG 0xfc04
+/* Define controls in register TW5864_MPI_DDR_SEL_REG */
+/*
+ * SW configure register
+ * 0 MV is saved in internal DPR
+ * 1 MV is saved in DDR
+ */
+#define TW5864_MPI_DDR_SEL2 BIT(15)
+
+/* 0x18000 ~ 0x181fc – PCI Master/Slave Control Map */
+#define TW5864_PCI_INTR_STATUS 0x18000
+/* Define controls in register TW5864_PCI_INTR_STATUS */
+/* vlc done */
+#define TW5864_VLC_DONE_INTR BIT(1)
+/* ad vsync */
+#define TW5864_AD_VSYNC_INTR BIT(3)
+/* preview eof */
+#define TW5864_PREV_EOF_INTR BIT(4)
+/* preview overflow interrupt */
+#define TW5864_PREV_OVERFLOW_INTR BIT(5)
+/* timer interrupt */
+#define TW5864_TIMER_INTR BIT(6)
+/* audio eof */
+#define TW5864_AUDIO_EOF_INTR BIT(8)
+/* IIC done */
+#define TW5864_IIC_DONE_INTR BIT(24)
+/* ad interrupt (e.g.: video lost, video format changed) */
+#define TW5864_AD_INTR_REG BIT(25)
+
+#define TW5864_PCI_INTR_CTL 0x18004
+/* Define controls in register TW5864_PCI_INTR_CTL */
+/* master enable */
+#define TW5864_PCI_MAST_ENB BIT(0)
+/* mvd&vlc master enable */
+#define TW5864_MVD_VLC_MAST_ENB 0x06
+/* (Need to set 0 in TW5864A) */
+#define TW5864_AD_MAST_ENB BIT(3)
+/* preview master enable */
+#define TW5864_PREV_MAST_ENB BIT(4)
+/* preview overflow enable */
+#define TW5864_PREV_OVERFLOW_ENB BIT(5)
+/* timer interrupt enable */
+#define TW5864_TIMER_INTR_ENB BIT(6)
+/* JPEG master (push mode) enable */
+#define TW5864_JPEG_MAST_ENB BIT(7)
+#define TW5864_AU_MAST_ENB_CHN_SHIFT 8
+/* audio master channel enable */
+#define TW5864_AU_MAST_ENB_CHN (0xffff << 8)
+/* IIC interrupt enable */
+#define TW5864_IIC_INTR_ENB BIT(24)
+/* ad interrupt enable */
+#define TW5864_AD_INTR_ENB BIT(25)
+/* target burst enable */
+#define TW5864_PCI_TAR_BURST_ENB BIT(26)
+/* vlc stream burst enable */
+#define TW5864_PCI_VLC_BURST_ENB BIT(27)
+/* ddr burst enable (1 enable, and must set DDR_BRST_EN) */
+#define TW5864_PCI_DDR_BURST_ENB BIT(28)
+
+/*
+ * Because preview and audio have 16 channels separately, so using this
+ * registers to indicate interrupt status for every channels. This is secondary
+ * interrupt status register. OR operating of the PREV_INTR_REG is
+ * PREV_EOF_INTR, OR operating of the AU_INTR_REG bits is AUDIO_EOF_INTR
+ */
+#define TW5864_PREV_AND_AU_INTR 0x18008
+/* Define controls in register TW5864_PREV_AND_AU_INTR */
+/* preview eof interrupt flag */
+#define TW5864_PREV_INTR_REG 0x0000ffff
+#define TW5864_AU_INTR_REG_SHIFT 16
+/* audio eof interrupt flag */
+#define TW5864_AU_INTR_REG (0xffff << 16)
+
+#define TW5864_MASTER_ENB_REG 0x1800c
+/* Define controls in register TW5864_MASTER_ENB_REG */
+/* master enable */
+#define TW5864_PCI_VLC_INTR_ENB BIT(1)
+/* mvd and vlc master enable */
+#define TW5864_PCI_PREV_INTR_ENB BIT(4)
+/* ad vsync master enable */
+#define TW5864_PCI_PREV_OF_INTR_ENB BIT(5)
+/* jpeg master enable */
+#define TW5864_PCI_JPEG_INTR_ENB BIT(7)
+/* preview master enable */
+#define TW5864_PCI_AUD_INTR_ENB BIT(8)
+
+/*
+ * Every channel of preview and audio have ping-pong buffers in system memory,
+ * this register is the buffer flag to notify software which buffer is been
+ * operated.
+ */
+#define TW5864_PREV_AND_AU_BUF_FLAG 0x18010
+/* Define controls in register TW5864_PREV_AND_AU_BUF_FLAG */
+/* preview buffer A/B flag */
+#define TW5864_PREV_BUF_FLAG 0xffff
+#define TW5864_AUDIO_BUF_FLAG_SHIFT 16
+/* audio buffer A/B flag */
+#define TW5864_AUDIO_BUF_FLAG (0xffff << 16)
+
+#define TW5864_IIC 0x18014
+/* Define controls in register TW5864_IIC */
+/* register data */
+#define TW5864_IIC_DATA 0x00ff
+#define TW5864_IIC_REG_ADDR_SHIFT 8
+/* register addr */
+#define TW5864_IIC_REG_ADDR (0xff << 8)
+/* rd/wr flag rd=1,wr=0 */
+#define TW5864_IIC_RW BIT(16)
+#define TW5864_IIC_DEV_ADDR_SHIFT 17
+/* device addr */
+#define TW5864_IIC_DEV_ADDR (0x7f << 17)
+/*
+ * iic done, software kick off one time iic transaction through setting this
+ * bit to 1. Then poll this bit, value 1 indicate iic transaction have
+ * completed, if read, valid data have been stored in iic_data
+ */
+#define TW5864_IIC_DONE BIT(24)
+
+#define TW5864_RST_AND_IF_INFO 0x18018
+/* Define controls in register TW5864_RST_AND_IF_INFO */
+/* application software soft reset */
+#define TW5864_APP_SOFT_RST BIT(0)
+#define TW5864_PCI_INF_VERSION_SHIFT 16
+/* PCI interface version, read only */
+#define TW5864_PCI_INF_VERSION (0xffff << 16)
+
+/* vlc stream crc value, it is calculated in pci module */
+#define TW5864_VLC_CRC_REG 0x1801c
+/*
+ * vlc max length, it is defined by software based on software assign memory
+ * space for vlc
+ */
+#define TW5864_VLC_MAX_LENGTH 0x18020
+/* vlc length of one frame */
+#define TW5864_VLC_LENGTH 0x18024
+/* vlc original crc value */
+#define TW5864_VLC_INTRA_CRC_I_REG 0x18028
+/* vlc original crc value */
+#define TW5864_VLC_INTRA_CRC_O_REG 0x1802c
+/* mv stream crc value, it is calculated in pci module */
+#define TW5864_VLC_PAR_CRC_REG 0x18030
+/* mv length */
+#define TW5864_VLC_PAR_LENGTH_REG 0x18034
+/* mv original crc value */
+#define TW5864_VLC_PAR_I_REG 0x18038
+/* mv original crc value */
+#define TW5864_VLC_PAR_O_REG 0x1803c
+
+/*
+ * Configuration register for 9[or 10] CIFs or 1D1+15QCIF Preview mode.
+ * PREV_PCI_ENB_CHN[0] Enable 9th preview channel (9CIF prev) or 1D1 channel in
+ * (1D1+15QCIF prev)
+ * PREV_PCI_ENB_CHN[1] Enable 10th preview channel
+ */
+#define TW5864_PREV_PCI_ENB_CHN 0x18040
+/* Description skipped. */
+#define TW5864_PREV_FRAME_FORMAT_IN 0x18044
+/* IIC enable */
+#define TW5864_IIC_ENB 0x18048
+/*
+ * Timer interrupt interval
+ * 0 1ms
+ * 1 2ms
+ * 2 4ms
+ * 3 8ms
+ */
+#define TW5864_PCI_INTTM_SCALE 0x1804c
+
+/*
+ * The above register is pci base address registers. Application software will
+ * initialize them to tell chip where the corresponding stream will be dumped
+ * to. Application software will select appropriate base address interval based
+ * on the stream length.
+ */
+/* VLC stream base address */
+#define TW5864_VLC_STREAM_BASE_ADDR 0x18080
+/* MV stream base address */
+#define TW5864_MV_STREAM_BASE_ADDR 0x18084
+/* 0x180a0 – 0x180bc: audio burst base address. Skipped. */
+/* 0x180c0 ~ 0x180dc – JPEG Push Mode Buffer Base Address. Skipped. */
+/* 0x18100 – 0x1817c: preview burst base address. Skipped. */
+
+/* 0x80000 ~ 0x87fff -- DDR Burst RW Register Map */
+#define TW5864_DDR_CTL 0x80000
+/* Define controls in register TW5864_DDR_CTL */
+#define TW5864_BRST_LENGTH_SHIFT 2
+/* Length of 32-bit data burst */
+#define TW5864_BRST_LENGTH (0x3fff << 2)
+/*
+ * Burst Read/Write
+ * 0 Read Burst from DDR
+ * 1 Write Burst to DDR
+ */
+#define TW5864_BRST_RW BIT(16)
+/* Begin a new DDR Burst. This bit is self cleared */
+#define TW5864_NEW_BRST_CMD BIT(17)
+/* DDR Burst End Flag */
+#define TW5864_BRST_END BIT(24)
+/* Enable Error Interrupt for Single DDR Access */
+#define TW5864_SING_ERR_INTR BIT(25)
+/* Enable Error Interrupt for Burst DDR Access */
+#define TW5864_BRST_ERR_INTR BIT(26)
+/* Enable Interrupt for End of DDR Burst Access */
+#define TW5864_BRST_END_INTR BIT(27)
+/* DDR Single Access Error Flag */
+#define TW5864_SINGLE_ERR BIT(28)
+/* DDR Single Access Busy Flag */
+#define TW5864_SINGLE_BUSY BIT(29)
+/* DDR Burst Access Error Flag */
+#define TW5864_BRST_ERR BIT(30)
+/* DDR Burst Access Busy Flag */
+#define TW5864_BRST_BUSY BIT(31)
+
+/* [27:0] DDR Access Address. Bit [1:0] has to be 0 */
+#define TW5864_DDR_ADDR 0x80004
+/* DDR Access Internal Buffer Address. Bit [1:0] has to be 0 */
+#define TW5864_DPR_BUF_ADDR 0x80008
+/* SRAM Buffer MPI Access Space. Totally 16 KB */
+#define TW5864_DPR_BUF_START 0x84000
+/* 0x84000 - 0x87ffc */
+#define TW5864_DPR_BUF_SIZE 0x4000
+
+/* Indirect Map Space */
+/*
+ * The indirect space is accessed through 0xb800 ~ 0xb807 registers in direct
+ * access space
+ */
+/* Analog Video / Audio Decoder / Encoder */
+/* Allowed channel values: [0; 3] */
+/* Read-only register */
+#define TW5864_INDIR_VIN_0(channel) (0x000 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_0 */
+/*
+ * 1 Video not present. (sync is not detected in number of consecutive line
+ * periods specified by MISSCNT register)
+ * 0 Video detected.
+ */
+#define TW5864_INDIR_VIN_0_VDLOSS BIT(7)
+/*
+ * 1 Horizontal sync PLL is locked to the incoming video source.
+ * 0 Horizontal sync PLL is not locked.
+ */
+#define TW5864_INDIR_VIN_0_HLOCK BIT(6)
+/*
+ * 1 Sub-carrier PLL is locked to the incoming video source.
+ * 0 Sub-carrier PLL is not locked.
+ */
+#define TW5864_INDIR_VIN_0_SLOCK BIT(5)
+/*
+ * 1 Even field is being decoded.
+ * 0 Odd field is being decoded.
+ */
+#define TW5864_INDIR_VIN_0_FLD BIT(4)
+/*
+ * 1 Vertical logic is locked to the incoming video source.
+ * 0 Vertical logic is not locked.
+ */
+#define TW5864_INDIR_VIN_0_VLOCK BIT(3)
+/*
+ * 1 No color burst signal detected.
+ * 0 Color burst signal detected.
+ */
+#define TW5864_INDIR_VIN_0_MONO BIT(1)
+/*
+ * 0 60Hz source detected
+ * 1 50Hz source detected
+ * The actual vertical scanning frequency depends on the current standard
+ * invoked.
+ */
+#define TW5864_INDIR_VIN_0_DET50 BIT(0)
+
+#define TW5864_INDIR_VIN_1(channel) (0x001 + channel * 0x010)
+/* VCR signal indicator. Read-only. */
+#define TW5864_INDIR_VIN_1_VCR BIT(7)
+/* Weak signal indicator 2. Read-only. */
+#define TW5864_INDIR_VIN_1_WKAIR BIT(6)
+/* Weak signal indicator controlled by WKTH. Read-only. */
+#define TW5864_INDIR_VIN_1_WKAIR1 BIT(5)
+/*
+ * 1 = Standard signal
+ * 0 = Non-standard signal
+ * Read-only
+ */
+#define TW5864_INDIR_VIN_1_VSTD BIT(4)
+/*
+ * 1 = Non-interlaced signal
+ * 0 = interlaced signal
+ * Read-only
+ */
+#define TW5864_INDIR_VIN_1_NINTL BIT(3)
+/*
+ * Vertical Sharpness Control. Writable.
+ * 0 = None (default)
+ * 7 = Highest
+ * **Note: VSHP must be set to ‘0’ if COMB = 0
+ */
+#define TW5864_INDIR_VIN_1_VSHP 0x07
+
+/* HDELAY_XY[7:0] */
+#define TW5864_INDIR_VIN_2_HDELAY_XY_LO(channel) (0x002 + channel * 0x010)
+/* HACTIVE_XY[7:0] */
+#define TW5864_INDIR_VIN_3_HACTIVE_XY_LO(channel) (0x003 + channel * 0x010)
+/* VDELAY_XY[7:0] */
+#define TW5864_INDIR_VIN_4_VDELAY_XY_LO(channel) (0x004 + channel * 0x010)
+/* VACTIVE_XY[7:0] */
+#define TW5864_INDIR_VIN_5_VACTIVE_XY_LO(channel) (0x005 + channel * 0x010)
+
+#define TW5864_INDIR_VIN_6(channel) (0x006 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_6 */
+#define TW5864_INDIR_VIN_6_HDELAY_XY_HI 0x03
+#define TW5864_INDIR_VIN_6_HACTIVE_XY_HI_SHIFT 2
+#define TW5864_INDIR_VIN_6_HACTIVE_XY_HI (0x03 << 2)
+#define TW5864_INDIR_VIN_6_VDELAY_XY_HI BIT(4)
+#define TW5864_INDIR_VIN_6_VACTIVE_XY_HI BIT(5)
+
+/*
+ * HDELAY_XY This 10bit register defines the starting location of horizontal
+ * active pixel for display / record path. A unit is 1 pixel. The default value
+ * is 0x00f for NTSC and 0x00a for PAL.
+ *
+ * HACTIVE_XY This 10bit register defines the number of horizontal active pixel
+ * for display / record path. A unit is 1 pixel. The default value is decimal
+ * 720.
+ *
+ * VDELAY_XY This 9bit register defines the starting location of vertical
+ * active for display / record path. A unit is 1 line. The default value is
+ * decimal 6.
+ *
+ * VACTIVE_XY This 9bit register defines the number of vertical active lines
+ * for display / record path. A unit is 1 line. The default value is decimal
+ * 240.
+ */
+
+/* HUE These bits control the color hue as 2's complement number. They have
+ * value from +36o (7Fh) to -36o (80h) with an increment of 2.8o. The 2 LSB has
+ * no effect. The positive value gives greenish tone and negative value gives
+ * purplish tone. The default value is 0o (00h). This is effective only on NTSC
+ * system. The default is 00h.
+ */
+#define TW5864_INDIR_VIN_7_HUE(channel) (0x007 + channel * 0x010)
+
+#define TW5864_INDIR_VIN_8(channel) (0x008 + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_8 */
+/*
+ * This bit controls the center frequency of the peaking filter.
+ * The corresponding gain adjustment is HFLT.
+ * 0 Low
+ * 1 center
+ */
+#define TW5864_INDIR_VIN_8_SCURVE BIT(7)
+/* CTI level selection. The default is 1.
+ * 0 None
+ * 3 Highest
+ */
+#define TW5864_INDIR_VIN_8_CTI_SHIFT 4
+#define TW5864_INDIR_VIN_8_CTI (0x03 << 4)
+
+/*
+ * These bits control the amount of sharpness enhancement on the luminance
+ * signals. There are 16 levels of control with "0" having no effect on the
+ * output image. 1 through 15 provides sharpness enhancement with "F" being the
+ * strongest. The default is 1.
+ */
+#define TW5864_INDIR_VIN_8_SHARPNESS 0x0f
+
+/*
+ * These bits control the luminance contrast gain. A value of 100 (64h) has a
+ * gain of 1. The range adjustment is from 0% to 255% at 1% per step. The
+ * default is 64h.
+ */
+#define TW5864_INDIR_VIN_9_CNTRST(channel) (0x009 + channel * 0x010)
+
+/*
+ * These bits control the brightness. They have value of –128 to 127 in 2's
+ * complement form. Positive value increases brightness. A value 0 has no
+ * effect on the data. The default is 00h.
+ */
+#define TW5864_INDIR_VIN_A_BRIGHT(channel) (0x00a + channel * 0x010)
+
+/*
+ * These bits control the digital gain adjustment to the U (or Cb) component of
+ * the digital video signal. The color saturation can be adjusted by adjusting
+ * the U and V color gain components by the same amount in the normal
+ * situation. The U and V can also be adjusted independently to provide greater
+ * flexibility. The range of adjustment is 0 to 200%. A value of 128 (80h) has
+ * gain of 100%. The default is 80h.
+ */
+#define TW5864_INDIR_VIN_B_SAT_U(channel) (0x00b + channel * 0x010)
+
+/*
+ * These bits control the digital gain adjustment to the V (or Cr) component of
+ * the digital video signal. The color saturation can be adjusted by adjusting
+ * the U and V color gain components by the same amount in the normal
+ * situation. The U and V can also be adjusted independently to provide greater
+ * flexibility. The range of adjustment is 0 to 200%. A value of 128 (80h) has
+ * gain of 100%. The default is 80h.
+ */
+#define TW5864_INDIR_VIN_C_SAT_V(channel) (0x00c + channel * 0x010)
+
+/* Read-only */
+#define TW5864_INDIR_VIN_D(channel) (0x00d + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_D */
+/* Macrovision color stripe detection may be un-reliable */
+#define TW5864_INDIR_VIN_D_CSBAD BIT(3)
+/* Macrovision AGC pulse detected */
+#define TW5864_INDIR_VIN_D_MCVSN BIT(2)
+/* Macrovision color stripe protection burst detected */
+#define TW5864_INDIR_VIN_D_CSTRIPE BIT(1)
+/*
+ * This bit is valid only when color stripe protection is detected, i.e. if
+ * CSTRIPE=1,
+ * 1 Type 2 color stripe protection
+ * 0 Type 3 color stripe protection
+ */
+#define TW5864_INDIR_VIN_D_CTYPE2 BIT(0)
+
+/* Read-only */
+#define TW5864_INDIR_VIN_E(channel) (0x00e + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_E */
+/*
+ * Read-only.
+ * 0 Idle
+ * 1 Detection in progress
+ */
+#define TW5864_INDIR_VIN_E_DETSTUS BIT(7)
+/*
+ * STDNOW Current standard invoked
+ * 0 NTSC (M)
+ * 1 PAL (B, D, G, H, I)
+ * 2 SECAM
+ * 3 NTSC4.43
+ * 4 PAL (M)
+ * 5 PAL (CN)
+ * 6 PAL 60
+ * 7 Not valid
+ */
+#define TW5864_INDIR_VIN_E_STDNOW_SHIFT 4
+#define TW5864_INDIR_VIN_E_STDNOW (0x07 << 4)
+
+/*
+ * 1 Disable the shadow registers
+ * 0 Enable VACTIVE and HDELAY shadow registers value depending on STANDARD.
+ * (Default)
+ */
+#define TW5864_INDIR_VIN_E_ATREG BIT(3)
+/*
+ * STANDARD Standard selection
+ * 0 NTSC (M)
+ * 1 PAL (B, D, G, H, I)
+ * 2 SECAM
+ * 3 NTSC4.43
+ * 4 PAL (M)
+ * 5 PAL (CN)
+ * 6 PAL 60
+ * 7 Auto detection (Default)
+ */
+#define TW5864_INDIR_VIN_E_STANDARD 0x07
+
+#define TW5864_INDIR_VIN_F(channel) (0x00f + channel * 0x010)
+/* Define controls in register TW5864_INDIR_VIN_F */
+/*
+ * 1 Writing 1 to this bit will manually initiate the auto format detection
+ * process. This bit is a self-clearing bit
+ * 0 Manual initiation of auto format detection is done. (Default)
+ */
+#define TW5864_INDIR_VIN_F_ATSTART BIT(7)
+/* Enable recognition of PAL60 (Default) */
+#define TW5864_INDIR_VIN_F_PAL60EN BIT(6)
+/* Enable recognition of PAL (CN). (Default) */
+#define TW5864_INDIR_VIN_F_PALCNEN BIT(5)
+/* Enable recognition of PAL (M). (Default) */
+#define TW5864_INDIR_VIN_F_PALMEN BIT(4)
+/* Enable recognition of NTSC 4.43. (Default) */
+#define TW5864_INDIR_VIN_F_NTSC44EN BIT(3)
+/* Enable recognition of SECAM. (Default) */
+#define TW5864_INDIR_VIN_F_SECAMEN BIT(2)
+/* Enable recognition of PAL (B, D, G, H, I). (Default) */
+#define TW5864_INDIR_VIN_F_PALBEN BIT(1)
+/* Enable recognition of NTSC (M). (Default) */
+#define TW5864_INDIR_VIN_F_NTSCEN BIT(0)
+
+/* Some registers skipped. */
+
+/* Use falling edge to sample VD1-VD4 from 54 MHz to 108 MHz */
+#define TW5864_INDIR_VD_108_POL 0x041
+#define TW5864_INDIR_VD_108_POL_VD12 BIT(0)
+#define TW5864_INDIR_VD_108_POL_VD34 BIT(1)
+#define TW5864_INDIR_VD_108_POL_BOTH \
+ (TW5864_INDIR_VD_108_POL_VD12 | TW5864_INDIR_VD_108_POL_VD34)
+
+/* Some registers skipped. */
+
+/*
+ * Audio Input ADC gain control
+ * 0 0.25
+ * 1 0.31
+ * 2 0.38
+ * 3 0.44
+ * 4 0.50
+ * 5 0.63
+ * 6 0.75
+ * 7 0.88
+ * 8 1.00 (default)
+ * 9 1.25
+ * 10 1.50
+ * 11 1.75
+ * 12 2.00
+ * 13 2.25
+ * 14 2.50
+ * 15 2.75
+ */
+/* [3:0] channel 0, [7:4] channel 1 */
+#define TW5864_INDIR_AIGAIN1 0x060
+/* [3:0] channel 2, [7:4] channel 3 */
+#define TW5864_INDIR_AIGAIN2 0x061
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_AIN_0x06D 0x06d
+/* Define controls in register TW5864_INDIR_AIN_0x06D */
+/*
+ * LAWMD Select u-Law/A-Law/PCM/SB data output format on ADATR and ADATM pin.
+ * 0 PCM output (default)
+ * 1 SB (Signed MSB bit in PCM data is inverted) output
+ * 2 u-Law output
+ * 3 A-Law output
+ */
+#define TW5864_INDIR_AIN_LAWMD_SHIFT 6
+#define TW5864_INDIR_AIN_LAWMD (0x03 << 6)
+/*
+ * Disable the mixing ratio value for all audio.
+ * 0 Apply individual mixing ratio value for each audio (default)
+ * 1 Apply nominal value for all audio commonly
+ */
+#define TW5864_INDIR_AIN_MIX_DERATIO BIT(5)
+/*
+ * Enable the mute function for audio channel AINn when n is 0 to 3. It effects
+ * only for mixing. When n = 4, it enable the mute function of the playback
+ * audio input. It effects only for single chip or the last stage chip
+ * 0 Normal
+ * 1 Muted (default)
+ */
+#define TW5864_INDIR_AIN_MIX_MUTE 0x1f
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_AIN_0x0E3 0x0e3
+/* Define controls in register TW5864_INDIR_AIN_0x0E3 */
+/*
+ * ADATP signal is coming from external ADPCM decoder, instead of on-chip ADPCM
+ * decoder
+ */
+#define TW5864_INDIR_AIN_0x0E3_EXT_ADATP BIT(7)
+/* ACLKP output signal polarity inverse */
+#define TW5864_INDIR_AIN_0x0E3_ACLKPPOLO BIT(6)
+/*
+ * ACLKR input signal polarity inverse.
+ * 0 Not inversed (Default)
+ * 1 Inversed
+ */
+#define TW5864_INDIR_AIN_0x0E3_ACLKRPOL BIT(5)
+/*
+ * ACLKP input signal polarity inverse.
+ * 0 Not inversed (Default)
+ * 1 Inversed
+ */
+#define TW5864_INDIR_AIN_0x0E3_ACLKPPOLI BIT(4)
+/*
+ * ACKI [21:0] control automatic set up with AFMD registers
+ * This mode is only effective when ACLKRMASTER=1
+ * 0 ACKI [21:0] registers set up ACKI control
+ * 1 ACKI control is automatically set up by AFMD register values
+ */
+#define TW5864_INDIR_AIN_0x0E3_AFAUTO BIT(3)
+/*
+ * AFAUTO control mode
+ * 0 8kHz setting (Default)
+ * 1 16kHz setting
+ * 2 32kHz setting
+ * 3 44.1kHz setting
+ * 4 48kHz setting
+ */
+#define TW5864_INDIR_AIN_0x0E3_AFMD 0x07
+
+#define TW5864_INDIR_AIN_0x0E4 0x0e4
+/* Define controls in register TW5864_INDIR_AIN_0x0ED */
+/*
+ * 8bit I2S Record output mode.
+ * 0 L/R half length separated output (Default).
+ * 1 One continuous packed output equal to DSP output format.
+ */
+#define TW5864_INDIR_AIN_0x0E4_I2S8MODE BIT(7)
+/*
+ * Audio Clock Master ACLKR output wave format.
+ * 0 High periods is one 27MHz clock period (default).
+ * 1 Almost duty 50-50% clock output on ACLKR pin. If this mode is selected, two
+ * times bigger number value need to be set up on the ACKI register. If
+ * AFAUTO=1, ACKI control is automatically set up even if MASCKMD=1.
+ */
+#define TW5864_INDIR_AIN_0x0E4_MASCKMD BIT(6)
+/* Playback ACLKP/ASYNP/ADATP input data MSB-LSB swapping */
+#define TW5864_INDIR_AIN_0x0E4_PBINSWAP BIT(5)
+/*
+ * ASYNR input signal delay.
+ * 0 No delay
+ * 1 Add one 27MHz period delay in ASYNR signal input
+ */
+#define TW5864_INDIR_AIN_0x0E4_ASYNRDLY BIT(4)
+/*
+ * ASYNP input signal delay.
+ * 0 no delay
+ * 1 add one 27MHz period delay in ASYNP signal input
+ */
+#define TW5864_INDIR_AIN_0x0E4_ASYNPDLY BIT(3)
+/*
+ * ADATP input data delay by one ACLKP clock.
+ * 0 No delay (Default). This is for I2S type 1T delay input interface.
+ * 1 Add 1 ACLKP clock delay in ADATP input data. This is for left-justified
+ * type 0T delay input interface.
+ */
+#define TW5864_INDIR_AIN_0x0E4_ADATPDLY BIT(2)
+/*
+ * Select u-Law/A-Law/PCM/SB data input format on ADATP pin.
+ * 0 PCM input (Default)
+ * 1 SB (Signed MSB bit in PCM data is inverted) input
+ * 2 u-Law input
+ * 3 A-Law input
+ */
+#define TW5864_INDIR_AIN_0x0E4_INLAWMD 0x03
+
+/*
+ * Enable state register updating and interrupt request of audio AIN5 detection
+ * for each input
+ */
+#define TW5864_INDIR_AIN_A5DETENA 0x0e5
+
+/* Some registers skipped */
+
+/*
+ * [7:3]: DEV_ID The TW5864 product ID code is 01000
+ * [2:0]: REV_ID The revision number is 0h
+ */
+#define TW5864_INDIR_ID 0x0fe
+
+#define TW5864_INDIR_IN_PIC_WIDTH(channel) (0x200 + 4 * channel)
+#define TW5864_INDIR_IN_PIC_HEIGHT(channel) (0x201 + 4 * channel)
+#define TW5864_INDIR_OUT_PIC_WIDTH(channel) (0x202 + 4 * channel)
+#define TW5864_INDIR_OUT_PIC_HEIGHT(channel) (0x203 + 4 * channel)
+/*
+ * Interrupt status register from the front-end. Write "1" to each bit to clear
+ * the interrupt
+ * 15:0 Motion detection interrupt for channel 0 ~ 15
+ * 31:16 Night detection interrupt for channel 0 ~ 15
+ * 47:32 Blind detection interrupt for channel 0 ~ 15
+ * 63:48 No video interrupt for channel 0 ~ 15
+ * 79:64 Line mode underflow interrupt for channel 0 ~ 15
+ * 95:80 Line mode overflow interrupt for channel 0 ~ 15
+ */
+/* 0x2d0~0x2d7: [63:0] bits */
+#define TW5864_INDIR_INTERRUPT1 0x2d0
+/* 0x2e0~0x2e3: [95:64] bits */
+#define TW5864_INDIR_INTERRUPT2 0x2e0
+
+/*
+ * Interrupt mask register for interrupts in 0x2d0 ~ 0x2d7
+ * 15:0 Motion detection interrupt for channel 0 ~ 15
+ * 31:16 Night detection interrupt for channel 0 ~ 15
+ * 47:32 Blind detection interrupt for channel 0 ~ 15
+ * 63:48 No video interrupt for channel 0 ~ 15
+ * 79:64 Line mode underflow interrupt for channel 0 ~ 15
+ * 95:80 Line mode overflow interrupt for channel 0 ~ 15
+ */
+/* 0x2d8~0x2df: [63:0] bits */
+#define TW5864_INDIR_INTERRUPT_MASK1 0x2d8
+/* 0x2e8~0x2eb: [95:64] bits */
+#define TW5864_INDIR_INTERRUPT_MASK2 0x2e8
+
+/* [11:0]: Interrupt summary register for interrupts & interrupt mask from in
+ * 0x2d0 ~ 0x2d7 and 0x2d8 ~ 0x2df
+ * bit 0: interrupt occurs in 0x2d0 & 0x2d8
+ * bit 1: interrupt occurs in 0x2d1 & 0x2d9
+ * bit 2: interrupt occurs in 0x2d2 & 0x2da
+ * bit 3: interrupt occurs in 0x2d3 & 0x2db
+ * bit 4: interrupt occurs in 0x2d4 & 0x2dc
+ * bit 5: interrupt occurs in 0x2d5 & 0x2dd
+ * bit 6: interrupt occurs in 0x2d6 & 0x2de
+ * bit 7: interrupt occurs in 0x2d7 & 0x2df
+ * bit 8: interrupt occurs in 0x2e0 & 0x2e8
+ * bit 9: interrupt occurs in 0x2e1 & 0x2e9
+ * bit 10: interrupt occurs in 0x2e2 & 0x2ea
+ * bit 11: interrupt occurs in 0x2e3 & 0x2eb
+ */
+#define TW5864_INDIR_INTERRUPT_SUMMARY 0x2f0
+
+/* Motion / Blind / Night Detection */
+/* valid value for channel is [0:15] */
+#define TW5864_INDIR_DETECTION_CTL0(channel) (0x300 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL0 */
+/*
+ * Disable the motion and blind detection.
+ * 0 Enable motion and blind detection (default)
+ * 1 Disable motion and blind detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_DIS BIT(5)
+/*
+ * Request to start motion detection on manual trigger mode
+ * 0 None Operation (default)
+ * 1 Request to start motion detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_STRB BIT(3)
+/*
+ * Select the trigger mode of motion detection
+ * 0 Automatic trigger mode of motion detection (default)
+ * 1 Manual trigger mode for motion detection
+ */
+#define TW5864_INDIR_DETECTION_CTL0_MD_STRB_EN BIT(2)
+/*
+ * Define the threshold of cell for blind detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 3 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL0_BD_CELSENS 0x03
+
+#define TW5864_INDIR_DETECTION_CTL1(channel) (0x301 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL1 */
+/*
+ * Control the temporal sensitivity of motion detector.
+ * 0 More Sensitive (default)
+ * : :
+ * 15 Less Sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL1_MD_TMPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL1_MD_TMPSENS (0x0f << 4)
+/*
+ * Adjust the horizontal starting position for motion detection
+ * 0 0 pixel (default)
+ * : :
+ * 15 15 pixels
+ */
+#define TW5864_INDIR_DETECTION_CTL1_MD_PIXEL_OS 0x0f
+
+#define TW5864_INDIR_DETECTION_CTL2(channel) (0x302 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL2 */
+/*
+ * Control the updating time of reference field for motion detection.
+ * 0 Update reference field every field (default)
+ * 1 Update reference field according to MD_SPEED
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_REFFLD BIT(7)
+/*
+ * Select the field for motion detection.
+ * 0 Detecting motion for only odd field (default)
+ * 1 Detecting motion for only even field
+ * 2 Detecting motion for any field
+ * 3 Detecting motion for both odd and even field
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_FIELD_SHIFT 5
+#define TW5864_INDIR_DETECTION_CTL2_MD_FIELD (0x03 << 5)
+/*
+ * Control the level sensitivity of motion detector.
+ * 0 More sensitive (default)
+ * : :
+ * 15 Less sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL2_MD_LVSENS 0x1f
+
+#define TW5864_INDIR_DETECTION_CTL3(channel) (0x303 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL3 */
+/*
+ * Define the threshold of sub-cell number for motion detection.
+ * 0 Motion is detected if 1 sub-cell has motion (More sensitive) (default)
+ * 1 Motion is detected if 2 sub-cells have motion
+ * 2 Motion is detected if 3 sub-cells have motion
+ * 3 Motion is detected if 4 sub-cells have motion (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL3_MD_CELSENS_SHIFT 6
+#define TW5864_INDIR_DETECTION_CTL3_MD_CELSENS (0x03 << 6)
+/*
+ * Control the velocity of motion detector.
+ * Large value is suitable for slow motion detection.
+ * In MD_DUAL_EN = 1, MD_SPEED should be limited to 0 ~ 31.
+ * 0 1 field intervals (default)
+ * 1 2 field intervals
+ * : :
+ * 61 62 field intervals
+ * 62 63 field intervals
+ * 63 Not supported
+ */
+#define TW5864_INDIR_DETECTION_CTL3_MD_SPEED 0x3f
+
+#define TW5864_INDIR_DETECTION_CTL4(channel) (0x304 + channel * 0x08)
+/* Define controls in register TW5864_INDIR_DETECTION_CTL4 */
+/*
+ * Control the spatial sensitivity of motion detector.
+ * 0 More Sensitive (default)
+ * : :
+ * 15 Less Sensitive
+ */
+#define TW5864_INDIR_DETECTION_CTL4_MD_SPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL4_MD_SPSENS (0x0f << 4)
+/*
+ * Define the threshold of level for blind detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 15 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL4_BD_LVSENS 0x0f
+
+#define TW5864_INDIR_DETECTION_CTL5(channel) (0x305 + channel * 0x08)
+/*
+ * Define the threshold of temporal sensitivity for night detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 15 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL5_ND_TMPSENS_SHIFT 4
+#define TW5864_INDIR_DETECTION_CTL5_ND_TMPSENS (0x0f << 4)
+/*
+ * Define the threshold of level for night detection.
+ * 0 Low threshold (More sensitive) (default)
+ * : :
+ * 3 High threshold (Less sensitive)
+ */
+#define TW5864_INDIR_DETECTION_CTL5_ND_LVSENS 0x0f
+
+/*
+ * [11:0] The base address of the motion detection buffer. This address is in
+ * unit of 64K bytes. The generated DDR address will be {MD_BASE_ADDR,
+ * 16"h0000}. The default value should be 12"h000
+ */
+#define TW5864_INDIR_MD_BASE_ADDR 0x380
+
+/*
+ * This controls the channel of the motion detection result shown in register
+ * 0x3a0 ~ 0x3b7. Before reading back motion result, always set this first.
+ */
+#define TW5864_INDIR_RGR_MOTION_SEL 0x382
+
+/* [15:0] MD strobe has been performed at channel n (read only) */
+#define TW5864_INDIR_MD_STRB 0x386
+/* NO_VIDEO Detected from channel n (read only) */
+#define TW5864_INDIR_NOVID_DET 0x388
+/* Motion Detected from channel n (read only) */
+#define TW5864_INDIR_MD_DET 0x38a
+/* Blind Detected from channel n (read only) */
+#define TW5864_INDIR_BD_DET 0x38c
+/* Night Detected from channel n (read only) */
+#define TW5864_INDIR_ND_DET 0x38e
+
+/* 192 bit motion flag of the channel specified by RGR_MOTION_SEL in 0x382 */
+#define TW5864_INDIR_MOTION_FLAG 0x3a0
+#define TW5864_INDIR_MOTION_FLAG_BYTE_COUNT 24
+
+/*
+ * [9:0] The motion cell count of a specific channel selected by 0x382. This is
+ * for DI purpose
+ */
+#define TW5864_INDIR_MD_DI_CNT 0x3b8
+/* The motion detection cell sensitivity for DI purpose */
+#define TW5864_INDIR_MD_DI_CELLSENS 0x3ba
+/* The motion detection threshold level for DI purpose */
+#define TW5864_INDIR_MD_DI_LVSENS 0x3bb
+
+/* 192 bit motion mask of the channel specified by MASK_CH_SEL in 0x3fe */
+#define TW5864_INDIR_MOTION_MASK 0x3e0
+#define TW5864_INDIR_MOTION_MASK_BYTE_COUNT 24
+
+/* [4:0] The channel selection to access masks in 0x3e0 ~ 0x3f7 */
+#define TW5864_INDIR_MASK_CH_SEL 0x3fe
+
+/* Clock PLL / Analog IP Control */
+/* Some registers skipped */
+
+#define TW5864_INDIR_DDRA_DLL_DQS_SEL0 0xee6
+#define TW5864_INDIR_DDRA_DLL_DQS_SEL1 0xee7
+#define TW5864_INDIR_DDRA_DLL_CLK90_SEL 0xee8
+#define TW5864_INDIR_DDRA_DLL_TEST_SEL_AND_TAP_S 0xee9
+
+#define TW5864_INDIR_DDRB_DLL_DQS_SEL0 0xeeb
+#define TW5864_INDIR_DDRB_DLL_DQS_SEL1 0xeec
+#define TW5864_INDIR_DDRB_DLL_CLK90_SEL 0xeed
+#define TW5864_INDIR_DDRB_DLL_TEST_SEL_AND_TAP_S 0xeee
+
+#define TW5864_INDIR_RESET 0xef0
+#define TW5864_INDIR_RESET_VD BIT(7)
+#define TW5864_INDIR_RESET_DLL BIT(6)
+#define TW5864_INDIR_RESET_MUX_CORE BIT(5)
+
+#define TW5864_INDIR_PV_VD_CK_POL 0xefd
+#define TW5864_INDIR_PV_VD_CK_POL_PV(channel) BIT(channel)
+#define TW5864_INDIR_PV_VD_CK_POL_VD(channel) BIT(channel + 4)
+
+#define TW5864_INDIR_CLK0_SEL 0xefe
+#define TW5864_INDIR_CLK0_SEL_VD_SHIFT 0
+#define TW5864_INDIR_CLK0_SEL_VD_MASK 0x3
+#define TW5864_INDIR_CLK0_SEL_PV_SHIFT 2
+#define TW5864_INDIR_CLK0_SEL_PV_MASK (0x3 << 2)
+#define TW5864_INDIR_CLK0_SEL_PV2_SHIFT 4
+#define TW5864_INDIR_CLK0_SEL_PV2_MASK (0x3 << 4)
diff --git a/drivers/media/pci/tw5864/tw5864-util.c b/drivers/media/pci/tw5864/tw5864-util.c
new file mode 100644
index 000000000000..771eef235755
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-util.c
@@ -0,0 +1,37 @@
+#include "tw5864.h"
+
+void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data)
+{
+ int retries = 30000;
+
+ while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+ ;
+ if (!retries)
+ dev_err(&dev->pci->dev,
+ "tw_indir_writel() retries exhausted before writing\n");
+
+ tw_writel(TW5864_IND_DATA, data);
+ tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_RW | TW5864_ENABLE);
+}
+
+u8 tw5864_indir_readb(struct tw5864_dev *dev, u16 addr)
+{
+ int retries = 30000;
+
+ while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+ ;
+ if (!retries)
+ dev_err(&dev->pci->dev,
+ "tw_indir_readl() retries exhausted before reading\n");
+
+ tw_writel(TW5864_IND_CTL, addr << 2 | TW5864_ENABLE);
+
+ retries = 30000;
+ while (tw_readl(TW5864_IND_CTL) & BIT(31) && --retries)
+ ;
+ if (!retries)
+ dev_err(&dev->pci->dev,
+ "tw_indir_readl() retries exhausted at reading\n");
+
+ return tw_readl(TW5864_IND_DATA);
+}
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
new file mode 100644
index 000000000000..652a059b2e0a
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -0,0 +1,1510 @@
+/*
+ * TW5864 driver - video encoding functions
+ *
+ * Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "tw5864.h"
+#include "tw5864-reg.h"
+
+#define QUANTIZATION_TABLE_LEN 96
+#define VLC_LOOKUP_TABLE_LEN 1024
+
+static const u16 forward_quantization_table[QUANTIZATION_TABLE_LEN] = {
+ 0x3333, 0x1f82, 0x3333, 0x1f82, 0x1f82, 0x147b, 0x1f82, 0x147b,
+ 0x3333, 0x1f82, 0x3333, 0x1f82, 0x1f82, 0x147b, 0x1f82, 0x147b,
+ 0x2e8c, 0x1d42, 0x2e8c, 0x1d42, 0x1d42, 0x1234, 0x1d42, 0x1234,
+ 0x2e8c, 0x1d42, 0x2e8c, 0x1d42, 0x1d42, 0x1234, 0x1d42, 0x1234,
+ 0x2762, 0x199a, 0x2762, 0x199a, 0x199a, 0x1062, 0x199a, 0x1062,
+ 0x2762, 0x199a, 0x2762, 0x199a, 0x199a, 0x1062, 0x199a, 0x1062,
+ 0x2492, 0x16c1, 0x2492, 0x16c1, 0x16c1, 0x0e3f, 0x16c1, 0x0e3f,
+ 0x2492, 0x16c1, 0x2492, 0x16c1, 0x16c1, 0x0e3f, 0x16c1, 0x0e3f,
+ 0x2000, 0x147b, 0x2000, 0x147b, 0x147b, 0x0d1b, 0x147b, 0x0d1b,
+ 0x2000, 0x147b, 0x2000, 0x147b, 0x147b, 0x0d1b, 0x147b, 0x0d1b,
+ 0x1c72, 0x11cf, 0x1c72, 0x11cf, 0x11cf, 0x0b4d, 0x11cf, 0x0b4d,
+ 0x1c72, 0x11cf, 0x1c72, 0x11cf, 0x11cf, 0x0b4d, 0x11cf, 0x0b4d
+};
+
+static const u16 inverse_quantization_table[QUANTIZATION_TABLE_LEN] = {
+ 0x800a, 0x800d, 0x800a, 0x800d, 0x800d, 0x8010, 0x800d, 0x8010,
+ 0x800a, 0x800d, 0x800a, 0x800d, 0x800d, 0x8010, 0x800d, 0x8010,
+ 0x800b, 0x800e, 0x800b, 0x800e, 0x800e, 0x8012, 0x800e, 0x8012,
+ 0x800b, 0x800e, 0x800b, 0x800e, 0x800e, 0x8012, 0x800e, 0x8012,
+ 0x800d, 0x8010, 0x800d, 0x8010, 0x8010, 0x8014, 0x8010, 0x8014,
+ 0x800d, 0x8010, 0x800d, 0x8010, 0x8010, 0x8014, 0x8010, 0x8014,
+ 0x800e, 0x8012, 0x800e, 0x8012, 0x8012, 0x8017, 0x8012, 0x8017,
+ 0x800e, 0x8012, 0x800e, 0x8012, 0x8012, 0x8017, 0x8012, 0x8017,
+ 0x8010, 0x8014, 0x8010, 0x8014, 0x8014, 0x8019, 0x8014, 0x8019,
+ 0x8010, 0x8014, 0x8010, 0x8014, 0x8014, 0x8019, 0x8014, 0x8019,
+ 0x8012, 0x8017, 0x8012, 0x8017, 0x8017, 0x801d, 0x8017, 0x801d,
+ 0x8012, 0x8017, 0x8012, 0x8017, 0x8017, 0x801d, 0x8017, 0x801d
+};
+
+static const u16 encoder_vlc_lookup_table[VLC_LOOKUP_TABLE_LEN] = {
+ 0x011, 0x000, 0x000, 0x000, 0x065, 0x021, 0x000, 0x000, 0x087, 0x064,
+ 0x031, 0x000, 0x097, 0x086, 0x075, 0x053, 0x0a7, 0x096, 0x085, 0x063,
+ 0x0b7, 0x0a6, 0x095, 0x074, 0x0df, 0x0b6, 0x0a5, 0x084, 0x0db, 0x0de,
+ 0x0b5, 0x094, 0x0d8, 0x0da, 0x0dd, 0x0a4, 0x0ef, 0x0ee, 0x0d9, 0x0b4,
+ 0x0eb, 0x0ea, 0x0ed, 0x0dc, 0x0ff, 0x0fe, 0x0e9, 0x0ec, 0x0fb, 0x0fa,
+ 0x0fd, 0x0e8, 0x10f, 0x0f1, 0x0f9, 0x0fc, 0x10b, 0x10e, 0x10d, 0x0f8,
+ 0x107, 0x10a, 0x109, 0x10c, 0x104, 0x106, 0x105, 0x108, 0x023, 0x000,
+ 0x000, 0x000, 0x06b, 0x022, 0x000, 0x000, 0x067, 0x057, 0x033, 0x000,
+ 0x077, 0x06a, 0x069, 0x045, 0x087, 0x066, 0x065, 0x044, 0x084, 0x076,
+ 0x075, 0x056, 0x097, 0x086, 0x085, 0x068, 0x0bf, 0x096, 0x095, 0x064,
+ 0x0bb, 0x0be, 0x0bd, 0x074, 0x0cf, 0x0ba, 0x0b9, 0x094, 0x0cb, 0x0ce,
+ 0x0cd, 0x0bc, 0x0c8, 0x0ca, 0x0c9, 0x0b8, 0x0df, 0x0de, 0x0dd, 0x0cc,
+ 0x0db, 0x0da, 0x0d9, 0x0dc, 0x0d7, 0x0eb, 0x0d6, 0x0d8, 0x0e9, 0x0e8,
+ 0x0ea, 0x0d1, 0x0e7, 0x0e6, 0x0e5, 0x0e4, 0x04f, 0x000, 0x000, 0x000,
+ 0x06f, 0x04e, 0x000, 0x000, 0x06b, 0x05f, 0x04d, 0x000, 0x068, 0x05c,
+ 0x05e, 0x04c, 0x07f, 0x05a, 0x05b, 0x04b, 0x07b, 0x058, 0x059, 0x04a,
+ 0x079, 0x06e, 0x06d, 0x049, 0x078, 0x06a, 0x069, 0x048, 0x08f, 0x07e,
+ 0x07d, 0x05d, 0x08b, 0x08e, 0x07a, 0x06c, 0x09f, 0x08a, 0x08d, 0x07c,
+ 0x09b, 0x09e, 0x089, 0x08c, 0x098, 0x09a, 0x09d, 0x088, 0x0ad, 0x097,
+ 0x099, 0x09c, 0x0a9, 0x0ac, 0x0ab, 0x0aa, 0x0a5, 0x0a8, 0x0a7, 0x0a6,
+ 0x0a1, 0x0a4, 0x0a3, 0x0a2, 0x021, 0x000, 0x000, 0x000, 0x067, 0x011,
+ 0x000, 0x000, 0x064, 0x066, 0x031, 0x000, 0x063, 0x073, 0x072, 0x065,
+ 0x062, 0x083, 0x082, 0x070, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x011, 0x010,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x011, 0x021, 0x020, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x023, 0x022, 0x021, 0x020, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x023, 0x022, 0x021, 0x031,
+ 0x030, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x023, 0x022, 0x033, 0x032, 0x031, 0x030, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x023, 0x030,
+ 0x031, 0x033, 0x032, 0x035, 0x034, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x037, 0x036, 0x035, 0x034, 0x033, 0x032,
+ 0x031, 0x041, 0x051, 0x061, 0x071, 0x081, 0x091, 0x0a1, 0x0b1, 0x000,
+ 0x002, 0x000, 0x0e4, 0x011, 0x0f4, 0x002, 0x024, 0x003, 0x005, 0x012,
+ 0x034, 0x013, 0x065, 0x024, 0x013, 0x063, 0x015, 0x022, 0x075, 0x034,
+ 0x044, 0x023, 0x023, 0x073, 0x054, 0x033, 0x033, 0x004, 0x043, 0x014,
+ 0x011, 0x043, 0x014, 0x001, 0x025, 0x015, 0x035, 0x025, 0x064, 0x055,
+ 0x045, 0x035, 0x074, 0x065, 0x085, 0x0d5, 0x012, 0x095, 0x055, 0x045,
+ 0x095, 0x0e5, 0x084, 0x075, 0x022, 0x0a5, 0x094, 0x085, 0x032, 0x0b5,
+ 0x003, 0x0c5, 0x001, 0x044, 0x0a5, 0x032, 0x0b5, 0x094, 0x0c5, 0x0a4,
+ 0x0a4, 0x054, 0x0d5, 0x0b4, 0x0b4, 0x064, 0x0f5, 0x0f5, 0x053, 0x0d4,
+ 0x0e5, 0x0c4, 0x105, 0x105, 0x0c4, 0x074, 0x063, 0x0e4, 0x0d4, 0x084,
+ 0x073, 0x0f4, 0x004, 0x005, 0x000, 0x053, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x011, 0x021, 0x031, 0x030, 0x011, 0x021, 0x020, 0x000,
+ 0x011, 0x010, 0x000, 0x000, 0x011, 0x033, 0x032, 0x043, 0x042, 0x053,
+ 0x052, 0x063, 0x062, 0x073, 0x072, 0x083, 0x082, 0x093, 0x092, 0x091,
+ 0x037, 0x036, 0x035, 0x034, 0x033, 0x045, 0x044, 0x043, 0x042, 0x053,
+ 0x052, 0x063, 0x062, 0x061, 0x060, 0x000, 0x045, 0x037, 0x036, 0x035,
+ 0x044, 0x043, 0x034, 0x033, 0x042, 0x053, 0x052, 0x061, 0x051, 0x060,
+ 0x000, 0x000, 0x053, 0x037, 0x045, 0x044, 0x036, 0x035, 0x034, 0x043,
+ 0x033, 0x042, 0x052, 0x051, 0x050, 0x000, 0x000, 0x000, 0x045, 0x044,
+ 0x043, 0x037, 0x036, 0x035, 0x034, 0x033, 0x042, 0x051, 0x041, 0x050,
+ 0x000, 0x000, 0x000, 0x000, 0x061, 0x051, 0x037, 0x036, 0x035, 0x034,
+ 0x033, 0x032, 0x041, 0x031, 0x060, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x061, 0x051, 0x035, 0x034, 0x033, 0x023, 0x032, 0x041, 0x031, 0x060,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x061, 0x041, 0x051, 0x033,
+ 0x023, 0x022, 0x032, 0x031, 0x060, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x061, 0x060, 0x041, 0x023, 0x022, 0x031, 0x021, 0x051,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x051, 0x050,
+ 0x031, 0x023, 0x022, 0x021, 0x041, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x040, 0x041, 0x031, 0x032, 0x011, 0x033,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x040, 0x041, 0x021, 0x011, 0x031, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x030, 0x031, 0x011, 0x021,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x020, 0x021, 0x011, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x010, 0x011,
+ 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000, 0x000,
+ 0x000, 0x000, 0x000, 0x000
+};
+
+static const unsigned int lambda_lookup_table[] = {
+ 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020,
+ 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020, 0x0020,
+ 0x0040, 0x0040, 0x0040, 0x0040, 0x0060, 0x0060, 0x0060, 0x0080,
+ 0x0080, 0x0080, 0x00a0, 0x00c0, 0x00c0, 0x00e0, 0x0100, 0x0120,
+ 0x0140, 0x0160, 0x01a0, 0x01c0, 0x0200, 0x0240, 0x0280, 0x02e0,
+ 0x0320, 0x03a0, 0x0400, 0x0480, 0x0500, 0x05a0, 0x0660, 0x0720,
+ 0x0800, 0x0900, 0x0a20, 0x0b60
+};
+
+static const unsigned int intra4x4_lambda3[] = {
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 4,
+ 4, 4, 5, 6, 6, 7, 8, 9,
+ 10, 11, 13, 14, 16, 18, 20, 23,
+ 25, 29, 32, 36, 40, 45, 51, 57,
+ 64, 72, 81, 91
+};
+
+static v4l2_std_id tw5864_get_v4l2_std(enum tw5864_vid_std std);
+static enum tw5864_vid_std tw5864_from_v4l2_std(v4l2_std_id v4l2_std);
+
+static void tw5864_handle_frame_task(unsigned long data);
+static void tw5864_handle_frame(struct tw5864_h264_frame *frame);
+static void tw5864_frame_interval_set(struct tw5864_input *input);
+
+static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ if (*num_planes)
+ return sizes[0] < H264_VLC_BUF_SIZE ? -EINVAL : 0;
+
+ sizes[0] = H264_VLC_BUF_SIZE;
+ *num_planes = 1;
+
+ return 0;
+}
+
+static void tw5864_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct tw5864_input *dev = vb2_get_drv_priv(vq);
+ struct tw5864_buf *buf = container_of(vbuf, struct tw5864_buf, vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ list_add_tail(&buf->list, &dev->active);
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+static int tw5864_input_std_get(struct tw5864_input *input,
+ enum tw5864_vid_std *std)
+{
+ struct tw5864_dev *dev = input->root;
+ u8 std_reg = tw_indir_readb(TW5864_INDIR_VIN_E(input->nr));
+
+ *std = (std_reg & 0x70) >> 4;
+
+ if (std_reg & 0x80) {
+ dev_dbg(&dev->pci->dev,
+ "Video format detection is in progress, please wait\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int tw5864_enable_input(struct tw5864_input *input)
+{
+ struct tw5864_dev *dev = input->root;
+ int nr = input->nr;
+ unsigned long flags;
+ int d1_width = 720;
+ int d1_height;
+ int frame_width_bus_value = 0;
+ int frame_height_bus_value = 0;
+ int reg_frame_bus = 0x1c;
+ int fmt_reg_value = 0;
+ int downscale_enabled = 0;
+
+ dev_dbg(&dev->pci->dev, "Enabling channel %d\n", nr);
+
+ input->frame_seqno = 0;
+ input->frame_gop_seqno = 0;
+ input->h264_idr_pic_id = 0;
+
+ input->reg_dsp_qp = input->qp;
+ input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
+ input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
+ input->reg_emu = TW5864_EMU_EN_LPF | TW5864_EMU_EN_BHOST
+ | TW5864_EMU_EN_SEN | TW5864_EMU_EN_ME | TW5864_EMU_EN_DDR;
+ input->reg_dsp = nr /* channel id */
+ | TW5864_DSP_CHROM_SW
+ | ((0xa << 8) & TW5864_DSP_MB_DELAY)
+ ;
+
+ input->resolution = D1;
+
+ d1_height = (input->std == STD_NTSC) ? 480 : 576;
+
+ input->width = d1_width;
+ input->height = d1_height;
+
+ input->reg_interlacing = 0x4;
+
+ switch (input->resolution) {
+ case D1:
+ frame_width_bus_value = 0x2cf;
+ frame_height_bus_value = input->height - 1;
+ reg_frame_bus = 0x1c;
+ fmt_reg_value = 0;
+ downscale_enabled = 0;
+ input->reg_dsp_codec |= TW5864_CIF_MAP_MD | TW5864_HD1_MAP_MD;
+ input->reg_emu |= TW5864_DSP_FRAME_TYPE_D1;
+ input->reg_interlacing = TW5864_DI_EN | TW5864_DSP_INTER_ST;
+
+ tw_setl(TW5864_FULL_HALF_FLAG, 1 << nr);
+ break;
+ case HD1:
+ input->height /= 2;
+ input->width /= 2;
+ frame_width_bus_value = 0x2cf;
+ frame_height_bus_value = input->height * 2 - 1;
+ reg_frame_bus = 0x1c;
+ fmt_reg_value = 0;
+ downscale_enabled = 0;
+ input->reg_dsp_codec |= TW5864_HD1_MAP_MD;
+ input->reg_emu |= TW5864_DSP_FRAME_TYPE_D1;
+
+ tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+
+ break;
+ case CIF:
+ input->height /= 4;
+ input->width /= 2;
+ frame_width_bus_value = 0x15f;
+ frame_height_bus_value = input->height * 2 - 1;
+ reg_frame_bus = 0x07;
+ fmt_reg_value = 1;
+ downscale_enabled = 1;
+ input->reg_dsp_codec |= TW5864_CIF_MAP_MD;
+
+ tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+ break;
+ case QCIF:
+ input->height /= 4;
+ input->width /= 4;
+ frame_width_bus_value = 0x15f;
+ frame_height_bus_value = input->height * 2 - 1;
+ reg_frame_bus = 0x07;
+ fmt_reg_value = 1;
+ downscale_enabled = 1;
+ input->reg_dsp_codec |= TW5864_CIF_MAP_MD;
+
+ tw_clearl(TW5864_FULL_HALF_FLAG, 1 << nr);
+ break;
+ }
+
+ /* analog input width / 4 */
+ tw_indir_writeb(TW5864_INDIR_IN_PIC_WIDTH(nr), d1_width / 4);
+ tw_indir_writeb(TW5864_INDIR_IN_PIC_HEIGHT(nr), d1_height / 4);
+
+ /* output width / 4 */
+ tw_indir_writeb(TW5864_INDIR_OUT_PIC_WIDTH(nr), input->width / 4);
+ tw_indir_writeb(TW5864_INDIR_OUT_PIC_HEIGHT(nr), input->height / 4);
+
+ tw_writel(TW5864_DSP_PIC_MAX_MB,
+ ((input->width / 16) << 8) | (input->height / 16));
+
+ tw_writel(TW5864_FRAME_WIDTH_BUS_A(nr),
+ frame_width_bus_value);
+ tw_writel(TW5864_FRAME_WIDTH_BUS_B(nr),
+ frame_width_bus_value);
+ tw_writel(TW5864_FRAME_HEIGHT_BUS_A(nr),
+ frame_height_bus_value);
+ tw_writel(TW5864_FRAME_HEIGHT_BUS_B(nr),
+ (frame_height_bus_value + 1) / 2 - 1);
+
+ tw5864_frame_interval_set(input);
+
+ if (downscale_enabled)
+ tw_setl(TW5864_H264EN_CH_DNS, 1 << nr);
+
+ tw_mask_shift_writel(TW5864_H264EN_CH_FMT_REG1, 0x3, 2 * nr,
+ fmt_reg_value);
+
+ tw_mask_shift_writel((nr < 2
+ ? TW5864_H264EN_RATE_MAX_LINE_REG1
+ : TW5864_H264EN_RATE_MAX_LINE_REG2),
+ 0x1f, 5 * (nr % 2),
+ input->std == STD_NTSC ? 29 : 24);
+
+ tw_mask_shift_writel((nr < 2) ? TW5864_FRAME_BUS1 :
+ TW5864_FRAME_BUS2, 0xff, (nr % 2) * 8,
+ reg_frame_bus);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ input->enabled = 1;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ return 0;
+}
+
+void tw5864_request_encoded_frame(struct tw5864_input *input)
+{
+ struct tw5864_dev *dev = input->root;
+ u32 enc_buf_id_new;
+
+ tw_setl(TW5864_DSP_CODEC, TW5864_CIF_MAP_MD | TW5864_HD1_MAP_MD);
+ tw_writel(TW5864_EMU, input->reg_emu);
+ tw_writel(TW5864_INTERLACING, input->reg_interlacing);
+ tw_writel(TW5864_DSP, input->reg_dsp);
+
+ tw_writel(TW5864_DSP_QP, input->reg_dsp_qp);
+ tw_writel(TW5864_DSP_REF_MVP_LAMBDA, input->reg_dsp_ref_mvp_lambda);
+ tw_writel(TW5864_DSP_I4x4_WEIGHT, input->reg_dsp_i4x4_weight);
+ tw_mask_shift_writel(TW5864_DSP_INTRA_MODE, TW5864_DSP_INTRA_MODE_MASK,
+ TW5864_DSP_INTRA_MODE_SHIFT,
+ TW5864_DSP_INTRA_MODE_16x16);
+
+ if (input->frame_gop_seqno == 0) {
+ /* Produce I-frame */
+ tw_writel(TW5864_MOTION_SEARCH_ETC, TW5864_INTRA_EN);
+ input->h264_idr_pic_id++;
+ input->h264_idr_pic_id &= TW5864_DSP_REF_FRM;
+ } else {
+ /* Produce P-frame */
+ tw_writel(TW5864_MOTION_SEARCH_ETC, TW5864_INTRA_EN |
+ TW5864_ME_EN | BIT(5) /* SRCH_OPT default */);
+ }
+ tw5864_prepare_frame_headers(input);
+ tw_writel(TW5864_VLC,
+ TW5864_VLC_PCI_SEL |
+ ((input->tail_nb_bits + 24) << TW5864_VLC_BIT_ALIGN_SHIFT) |
+ input->reg_dsp_qp);
+
+ enc_buf_id_new = tw_mask_shift_readl(TW5864_ENC_BUF_PTR_REC1, 0x3,
+ 2 * input->nr);
+ tw_writel(TW5864_DSP_ENC_ORG_PTR_REG,
+ enc_buf_id_new << TW5864_DSP_ENC_ORG_PTR_SHIFT);
+ tw_writel(TW5864_DSP_ENC_REC,
+ enc_buf_id_new << 12 | ((enc_buf_id_new + 3) & 3));
+
+ tw_writel(TW5864_SLICE, TW5864_START_NSLICE);
+ tw_writel(TW5864_SLICE, 0);
+}
+
+static int tw5864_disable_input(struct tw5864_input *input)
+{
+ struct tw5864_dev *dev = input->root;
+ unsigned long flags;
+
+ dev_dbg(&dev->pci->dev, "Disabling channel %d\n", input->nr);
+
+ spin_lock_irqsave(&dev->slock, flags);
+ input->enabled = 0;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return 0;
+}
+
+static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct tw5864_input *input = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = tw5864_enable_input(input);
+ if (!ret)
+ return 0;
+
+ while (!list_empty(&input->active)) {
+ struct tw5864_buf *buf = list_entry(input->active.next,
+ struct tw5864_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ return ret;
+}
+
+static void tw5864_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct tw5864_input *input = vb2_get_drv_priv(q);
+
+ tw5864_disable_input(input);
+
+ spin_lock_irqsave(&input->slock, flags);
+ if (input->vb) {
+ vb2_buffer_done(&input->vb->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ input->vb = NULL;
+ }
+ while (!list_empty(&input->active)) {
+ struct tw5864_buf *buf = list_entry(input->active.next,
+ struct tw5864_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&input->slock, flags);
+}
+
+static const struct vb2_ops tw5864_video_qops = {
+ .queue_setup = tw5864_queue_setup,
+ .buf_queue = tw5864_buf_queue,
+ .start_streaming = tw5864_start_streaming,
+ .stop_streaming = tw5864_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int tw5864_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw5864_input *input =
+ container_of(ctrl->handler, struct tw5864_input, hdl);
+ struct tw5864_dev *dev = input->root;
+ unsigned long flags;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ tw_indir_writeb(TW5864_INDIR_VIN_A_BRIGHT(input->nr),
+ (u8)ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ tw_indir_writeb(TW5864_INDIR_VIN_7_HUE(input->nr),
+ (u8)ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ tw_indir_writeb(TW5864_INDIR_VIN_9_CNTRST(input->nr),
+ (u8)ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ tw_indir_writeb(TW5864_INDIR_VIN_B_SAT_U(input->nr),
+ (u8)ctrl->val);
+ tw_indir_writeb(TW5864_INDIR_VIN_C_SAT_V(input->nr),
+ (u8)ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ input->gop = ctrl->val;
+ return 0;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ spin_lock_irqsave(&input->slock, flags);
+ input->qp = ctrl->val;
+ input->reg_dsp_qp = input->qp;
+ input->reg_dsp_ref_mvp_lambda = lambda_lookup_table[input->qp];
+ input->reg_dsp_i4x4_weight = intra4x4_lambda3[input->qp];
+ spin_unlock_irqrestore(&input->slock, flags);
+ return 0;
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+ memset(input->md_threshold_grid_values, ctrl->val,
+ sizeof(input->md_threshold_grid_values));
+ return 0;
+ case V4L2_CID_DETECT_MD_MODE:
+ return 0;
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+ /* input->md_threshold_grid_ctrl->p_new.p_u16 contains data */
+ memcpy(input->md_threshold_grid_values,
+ input->md_threshold_grid_ctrl->p_new.p_u16,
+ sizeof(input->md_threshold_grid_values));
+ return 0;
+ }
+ return 0;
+}
+
+static int tw5864_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw5864_input *input = video_drvdata(file);
+
+ f->fmt.pix.width = 720;
+ switch (input->std) {
+ default:
+ WARN_ON_ONCE(1);
+ case STD_NTSC:
+ f->fmt.pix.height = 480;
+ break;
+ case STD_PAL:
+ case STD_SECAM:
+ f->fmt.pix.height = 576;
+ break;
+ }
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264;
+ f->fmt.pix.sizeimage = H264_VLC_BUF_SIZE;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+}
+
+static int tw5864_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct tw5864_dev *dev = input->root;
+
+ u8 indir_0x000 = tw_indir_readb(TW5864_INDIR_VIN_0(input->nr));
+ u8 indir_0x00d = tw_indir_readb(TW5864_INDIR_VIN_D(input->nr));
+ u8 v1 = indir_0x000;
+ u8 v2 = indir_0x00d;
+
+ if (i->index)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ snprintf(i->name, sizeof(i->name), "Encoder %d", input->nr);
+ i->std = TW5864_NORMS;
+ if (v1 & (1 << 7))
+ i->status |= V4L2_IN_ST_NO_SYNC;
+ if (!(v1 & (1 << 6)))
+ i->status |= V4L2_IN_ST_NO_H_LOCK;
+ if (v1 & (1 << 2))
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+ if (v1 & (1 << 1))
+ i->status |= V4L2_IN_ST_NO_COLOR;
+ if (v2 & (1 << 2))
+ i->status |= V4L2_IN_ST_MACROVISION;
+
+ return 0;
+}
+
+static int tw5864_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int tw5864_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+ return 0;
+}
+
+static int tw5864_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct tw5864_input *input = video_drvdata(file);
+
+ strcpy(cap->driver, "tw5864");
+ snprintf(cap->card, sizeof(cap->card), "TW5864 Encoder %d",
+ input->nr);
+ sprintf(cap->bus_info, "PCI:%s", pci_name(input->root->pci));
+ return 0;
+}
+
+static int tw5864_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ enum tw5864_vid_std tw_std;
+ int ret;
+
+ ret = tw5864_input_std_get(input, &tw_std);
+ if (ret)
+ return ret;
+ *std = tw5864_get_v4l2_std(tw_std);
+
+ return 0;
+}
+
+static int tw5864_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct tw5864_input *input = video_drvdata(file);
+
+ *std = input->v4l2_std;
+ return 0;
+}
+
+static int tw5864_s_std(struct file *file, void *priv, v4l2_std_id std)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct tw5864_dev *dev = input->root;
+
+ input->v4l2_std = std;
+ input->std = tw5864_from_v4l2_std(std);
+ tw_indir_writeb(TW5864_INDIR_VIN_E(input->nr), input->std);
+ return 0;
+}
+
+static int tw5864_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_H264;
+
+ return 0;
+}
+
+static int tw5864_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ case V4L2_EVENT_MOTION_DET:
+ /*
+ * Allow for up to 30 events (1 second for NTSC) to be stored.
+ */
+ return v4l2_event_subscribe(fh, sub, 30, NULL);
+ }
+ return -EINVAL;
+}
+
+static void tw5864_frame_interval_set(struct tw5864_input *input)
+{
+ /*
+ * This register value seems to follow such approach: In each second
+ * interval, when processing Nth frame, it checks Nth bit of register
+ * value and, if the bit is 1, it processes the frame, otherwise the
+ * frame is discarded.
+ * So unary representation would work, but more or less equal gaps
+ * between the frames should be preserved.
+ *
+ * For 1 FPS - 0x00000001
+ * 00000000 00000000 00000000 00000001
+ *
+ * For max FPS - set all 25/30 lower bits:
+ * 00111111 11111111 11111111 11111111 (NTSC)
+ * 00000001 11111111 11111111 11111111 (PAL)
+ *
+ * For half of max FPS - use such pattern:
+ * 00010101 01010101 01010101 01010101 (NTSC)
+ * 00000001 01010101 01010101 01010101 (PAL)
+ *
+ * Et cetera.
+ *
+ * The value supplied to hardware is capped by mask of 25/30 lower bits.
+ */
+ struct tw5864_dev *dev = input->root;
+ u32 unary_framerate = 0;
+ int shift = 0;
+ int std_max_fps = input->std == STD_NTSC ? 30 : 25;
+
+ for (shift = 0; shift < std_max_fps; shift += input->frame_interval)
+ unary_framerate |= 0x00000001 << shift;
+
+ tw_writel(TW5864_H264EN_RATE_CNTL_LO_WORD(input->nr, 0),
+ unary_framerate >> 16);
+ tw_writel(TW5864_H264EN_RATE_CNTL_HI_WORD(input->nr, 0),
+ unary_framerate & 0xffff);
+}
+
+static int tw5864_frameinterval_get(struct tw5864_input *input,
+ struct v4l2_fract *frameinterval)
+{
+ switch (input->std) {
+ case STD_NTSC:
+ frameinterval->numerator = 1001;
+ frameinterval->denominator = 30000;
+ break;
+ case STD_PAL:
+ case STD_SECAM:
+ frameinterval->numerator = 1;
+ frameinterval->denominator = 25;
+ break;
+ default:
+ WARN(1, "tw5864_frameinterval_get requested for unknown std %d\n",
+ input->std);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tw5864_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct tw5864_input *input = video_drvdata(file);
+
+ if (fsize->index > 0)
+ return -EINVAL;
+ if (fsize->pixel_format != V4L2_PIX_FMT_H264)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = 720;
+ fsize->discrete.height = input->std == STD_NTSC ? 480 : 576;
+
+ return 0;
+}
+
+static int tw5864_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fintv)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct v4l2_fract frameinterval;
+ int std_max_fps = input->std == STD_NTSC ? 30 : 25;
+ struct v4l2_frmsizeenum fsize = { .index = fintv->index,
+ .pixel_format = fintv->pixel_format };
+ int ret;
+
+ ret = tw5864_enum_framesizes(file, priv, &fsize);
+ if (ret)
+ return ret;
+
+ if (fintv->width != fsize.discrete.width ||
+ fintv->height != fsize.discrete.height)
+ return -EINVAL;
+
+ fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+ ret = tw5864_frameinterval_get(input, &frameinterval);
+ fintv->stepwise.step = frameinterval;
+ fintv->stepwise.min = frameinterval;
+ fintv->stepwise.max = frameinterval;
+ fintv->stepwise.max.numerator *= std_max_fps;
+
+ return ret;
+}
+
+static int tw5864_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct v4l2_captureparm *cp = &sp->parm.capture;
+ int ret;
+
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+
+ ret = tw5864_frameinterval_get(input, &cp->timeperframe);
+ cp->timeperframe.numerator *= input->frame_interval;
+ cp->capturemode = 0;
+ cp->readbuffers = 2;
+
+ return ret;
+}
+
+static int tw5864_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct v4l2_fract *t = &sp->parm.capture.timeperframe;
+ struct v4l2_fract time_base;
+ int ret;
+
+ ret = tw5864_frameinterval_get(input, &time_base);
+ if (ret)
+ return ret;
+
+ if (!t->numerator || !t->denominator) {
+ t->numerator = time_base.numerator * input->frame_interval;
+ t->denominator = time_base.denominator;
+ } else if (t->denominator != time_base.denominator) {
+ t->numerator = t->numerator * time_base.denominator /
+ t->denominator;
+ t->denominator = time_base.denominator;
+ }
+
+ input->frame_interval = t->numerator / time_base.numerator;
+ if (input->frame_interval < 1)
+ input->frame_interval = 1;
+ tw5864_frame_interval_set(input);
+ return tw5864_g_parm(file, priv, sp);
+}
+
+static const struct v4l2_ctrl_ops tw5864_ctrl_ops = {
+ .s_ctrl = tw5864_s_ctrl,
+};
+
+static const struct v4l2_file_operations video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+#define INDIR_SPACE_MAP_SHIFT 0x100000
+
+static int tw5864_g_reg(struct file *file, void *fh,
+ struct v4l2_dbg_register *reg)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct tw5864_dev *dev = input->root;
+
+ if (reg->reg < INDIR_SPACE_MAP_SHIFT) {
+ if (reg->reg > 0x87fff)
+ return -EINVAL;
+ reg->size = 4;
+ reg->val = tw_readl(reg->reg);
+ } else {
+ __u64 indir_addr = reg->reg - INDIR_SPACE_MAP_SHIFT;
+
+ if (indir_addr > 0xefe)
+ return -EINVAL;
+ reg->size = 1;
+ reg->val = tw_indir_readb(reg->reg);
+ }
+ return 0;
+}
+
+static int tw5864_s_reg(struct file *file, void *fh,
+ const struct v4l2_dbg_register *reg)
+{
+ struct tw5864_input *input = video_drvdata(file);
+ struct tw5864_dev *dev = input->root;
+
+ if (reg->reg < INDIR_SPACE_MAP_SHIFT) {
+ if (reg->reg > 0x87fff)
+ return -EINVAL;
+ tw_writel(reg->reg, reg->val);
+ } else {
+ __u64 indir_addr = reg->reg - INDIR_SPACE_MAP_SHIFT;
+
+ if (indir_addr > 0xefe)
+ return -EINVAL;
+ tw_indir_writeb(reg->reg, reg->val);
+ }
+ return 0;
+}
+#endif
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = tw5864_querycap,
+ .vidioc_enum_fmt_vid_cap = tw5864_enum_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_querystd = tw5864_querystd,
+ .vidioc_s_std = tw5864_s_std,
+ .vidioc_g_std = tw5864_g_std,
+ .vidioc_enum_input = tw5864_enum_input,
+ .vidioc_g_input = tw5864_g_input,
+ .vidioc_s_input = tw5864_s_input,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_try_fmt_vid_cap = tw5864_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = tw5864_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = tw5864_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = tw5864_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_enum_framesizes = tw5864_enum_framesizes,
+ .vidioc_enum_frameintervals = tw5864_enum_frameintervals,
+ .vidioc_s_parm = tw5864_s_parm,
+ .vidioc_g_parm = tw5864_g_parm,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = tw5864_g_reg,
+ .vidioc_s_register = tw5864_s_reg,
+#endif
+};
+
+static const struct video_device tw5864_video_template = {
+ .name = "tw5864_video",
+ .fops = &video_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = video_device_release_empty,
+ .tvnorms = TW5864_NORMS,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
+};
+
+/* Motion Detection Threshold matrix */
+static const struct v4l2_ctrl_config tw5864_md_thresholds = {
+ .ops = &tw5864_ctrl_ops,
+ .id = V4L2_CID_DETECT_MD_THRESHOLD_GRID,
+ .dims = {MD_CELLS_HOR, MD_CELLS_VERT},
+ .def = 14,
+ /* See tw5864_md_metric_from_mvd() */
+ .max = 2 * 0x0f,
+ .step = 1,
+};
+
+static int tw5864_video_input_init(struct tw5864_input *dev, int video_nr);
+static void tw5864_video_input_fini(struct tw5864_input *dev);
+static void tw5864_encoder_tables_upload(struct tw5864_dev *dev);
+
+int tw5864_video_init(struct tw5864_dev *dev, int *video_nr)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ int last_dma_allocated = -1;
+ int last_input_nr_registered = -1;
+
+ for (i = 0; i < H264_BUF_CNT; i++) {
+ struct tw5864_h264_frame *frame = &dev->h264_buf[i];
+
+ frame->vlc.addr = dma_alloc_coherent(&dev->pci->dev,
+ H264_VLC_BUF_SIZE,
+ &frame->vlc.dma_addr,
+ GFP_KERNEL | GFP_DMA32);
+ if (!frame->vlc.addr) {
+ dev_err(&dev->pci->dev, "dma alloc fail\n");
+ ret = -ENOMEM;
+ goto free_dma;
+ }
+ frame->mv.addr = dma_alloc_coherent(&dev->pci->dev,
+ H264_MV_BUF_SIZE,
+ &frame->mv.dma_addr,
+ GFP_KERNEL | GFP_DMA32);
+ if (!frame->mv.addr) {
+ dev_err(&dev->pci->dev, "dma alloc fail\n");
+ ret = -ENOMEM;
+ dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+ frame->vlc.addr, frame->vlc.dma_addr);
+ goto free_dma;
+ }
+ last_dma_allocated = i;
+ }
+
+ tw5864_encoder_tables_upload(dev);
+
+ /* Picture is distorted without this block */
+ /* use falling edge to sample 54M to 108M */
+ tw_indir_writeb(TW5864_INDIR_VD_108_POL, TW5864_INDIR_VD_108_POL_BOTH);
+ tw_indir_writeb(TW5864_INDIR_CLK0_SEL, 0x00);
+
+ tw_indir_writeb(TW5864_INDIR_DDRA_DLL_DQS_SEL0, 0x02);
+ tw_indir_writeb(TW5864_INDIR_DDRA_DLL_DQS_SEL1, 0x02);
+ tw_indir_writeb(TW5864_INDIR_DDRA_DLL_CLK90_SEL, 0x02);
+ tw_indir_writeb(TW5864_INDIR_DDRB_DLL_DQS_SEL0, 0x02);
+ tw_indir_writeb(TW5864_INDIR_DDRB_DLL_DQS_SEL1, 0x02);
+ tw_indir_writeb(TW5864_INDIR_DDRB_DLL_CLK90_SEL, 0x02);
+
+ /* video input reset */
+ tw_indir_writeb(TW5864_INDIR_RESET, 0);
+ tw_indir_writeb(TW5864_INDIR_RESET, TW5864_INDIR_RESET_VD |
+ TW5864_INDIR_RESET_DLL | TW5864_INDIR_RESET_MUX_CORE);
+ msleep(20);
+
+ /*
+ * Select Part A mode for all channels.
+ * tw_setl instead of tw_clearl for Part B mode.
+ *
+ * I guess "Part B" is primarily for downscaled version of same channel
+ * which goes in Part A of same bus
+ */
+ tw_writel(TW5864_FULL_HALF_MODE_SEL, 0);
+
+ tw_indir_writeb(TW5864_INDIR_PV_VD_CK_POL,
+ TW5864_INDIR_PV_VD_CK_POL_VD(0) |
+ TW5864_INDIR_PV_VD_CK_POL_VD(1) |
+ TW5864_INDIR_PV_VD_CK_POL_VD(2) |
+ TW5864_INDIR_PV_VD_CK_POL_VD(3));
+
+ spin_lock_irqsave(&dev->slock, flags);
+ dev->encoder_busy = 0;
+ dev->h264_buf_r_index = 0;
+ dev->h264_buf_w_index = 0;
+ tw_writel(TW5864_VLC_STREAM_BASE_ADDR,
+ dev->h264_buf[dev->h264_buf_w_index].vlc.dma_addr);
+ tw_writel(TW5864_MV_STREAM_BASE_ADDR,
+ dev->h264_buf[dev->h264_buf_w_index].mv.dma_addr);
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ tw_writel(TW5864_SEN_EN_CH, 0x000f);
+ tw_writel(TW5864_H264EN_CH_EN, 0x000f);
+
+ tw_writel(TW5864_H264EN_BUS0_MAP, 0x00000000);
+ tw_writel(TW5864_H264EN_BUS1_MAP, 0x00001111);
+ tw_writel(TW5864_H264EN_BUS2_MAP, 0x00002222);
+ tw_writel(TW5864_H264EN_BUS3_MAP, 0x00003333);
+
+ /*
+ * Quote from Intersil (manufacturer):
+ * 0x0038 is managed by HW, and by default it won't pass the pointer set
+ * at 0x0010. So if you don't do encoding, 0x0038 should stay at '3'
+ * (with 4 frames in buffer). If you encode one frame and then move
+ * 0x0010 to '1' for example, HW will take one more frame and set it to
+ * buffer #0, and then you should see 0x0038 is set to '0'. There is
+ * only one HW encoder engine, so 4 channels cannot get encoded
+ * simultaneously. But each channel does have its own buffer (for
+ * original frames and reconstructed frames). So there is no problem to
+ * manage encoding for 4 channels at same time and no need to force
+ * I-frames in switching channels.
+ * End of quote.
+ *
+ * If we set 0x0010 (TW5864_ENC_BUF_PTR_REC1) to 0 (for any channel), we
+ * have no "rolling" (until we change this value).
+ * If we set 0x0010 (TW5864_ENC_BUF_PTR_REC1) to 0x3, it starts to roll
+ * continuously together with 0x0038.
+ */
+ tw_writel(TW5864_ENC_BUF_PTR_REC1, 0x00ff);
+ tw_writel(TW5864_PCI_INTTM_SCALE, 0);
+
+ tw_writel(TW5864_INTERLACING, TW5864_DI_EN);
+ tw_writel(TW5864_MASTER_ENB_REG, TW5864_PCI_VLC_INTR_ENB);
+ tw_writel(TW5864_PCI_INTR_CTL,
+ TW5864_TIMER_INTR_ENB | TW5864_PCI_MAST_ENB |
+ TW5864_MVD_VLC_MAST_ENB);
+
+ dev->irqmask |= TW5864_INTR_VLC_DONE | TW5864_INTR_TIMER;
+ tw5864_irqmask_apply(dev);
+
+ tasklet_init(&dev->tasklet, tw5864_handle_frame_task,
+ (unsigned long)dev);
+
+ for (i = 0; i < TW5864_INPUTS; i++) {
+ dev->inputs[i].root = dev;
+ dev->inputs[i].nr = i;
+ ret = tw5864_video_input_init(&dev->inputs[i], video_nr[i]);
+ if (ret)
+ goto fini_video_inputs;
+ last_input_nr_registered = i;
+ }
+
+ return 0;
+
+fini_video_inputs:
+ for (i = last_input_nr_registered; i >= 0; i--)
+ tw5864_video_input_fini(&dev->inputs[i]);
+
+ tasklet_kill(&dev->tasklet);
+
+free_dma:
+ for (i = last_dma_allocated; i >= 0; i--) {
+ dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+ dev->h264_buf[i].vlc.addr,
+ dev->h264_buf[i].vlc.dma_addr);
+ dma_free_coherent(&dev->pci->dev, H264_MV_BUF_SIZE,
+ dev->h264_buf[i].mv.addr,
+ dev->h264_buf[i].mv.dma_addr);
+ }
+
+ return ret;
+}
+
+static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
+{
+ struct tw5864_dev *dev = input->root;
+ int ret;
+ struct v4l2_ctrl_handler *hdl = &input->hdl;
+
+ mutex_init(&input->lock);
+ spin_lock_init(&input->slock);
+
+ /* setup video buffers queue */
+ INIT_LIST_HEAD(&input->active);
+ input->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ input->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ input->vidq.io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ input->vidq.ops = &tw5864_video_qops;
+ input->vidq.mem_ops = &vb2_dma_contig_memops;
+ input->vidq.drv_priv = input;
+ input->vidq.gfp_flags = 0;
+ input->vidq.buf_struct_size = sizeof(struct tw5864_buf);
+ input->vidq.lock = &input->lock;
+ input->vidq.min_buffers_needed = 2;
+ input->vidq.dev = &input->root->pci->dev;
+ ret = vb2_queue_init(&input->vidq);
+ if (ret)
+ goto free_mutex;
+
+ input->vdev = tw5864_video_template;
+ input->vdev.v4l2_dev = &input->root->v4l2_dev;
+ input->vdev.lock = &input->lock;
+ input->vdev.queue = &input->vidq;
+ video_set_drvdata(&input->vdev, input);
+
+ /* Initialize the device control structures */
+ v4l2_ctrl_handler_init(hdl, 6);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 100);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, MAX_GOP_SIZE, 1, GOP_SIZE);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 28, 51, 1, QP_VALUE);
+ v4l2_ctrl_new_std_menu(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_DETECT_MD_MODE,
+ V4L2_DETECT_MD_MODE_THRESHOLD_GRID, 0,
+ V4L2_DETECT_MD_MODE_DISABLED);
+ v4l2_ctrl_new_std(hdl, &tw5864_ctrl_ops,
+ V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD,
+ tw5864_md_thresholds.min, tw5864_md_thresholds.max,
+ tw5864_md_thresholds.step, tw5864_md_thresholds.def);
+ input->md_threshold_grid_ctrl =
+ v4l2_ctrl_new_custom(hdl, &tw5864_md_thresholds, NULL);
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_v4l2_hdl;
+ }
+ input->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ input->qp = QP_VALUE;
+ input->gop = GOP_SIZE;
+ input->frame_interval = 1;
+
+ ret = video_register_device(&input->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (ret)
+ goto free_v4l2_hdl;
+
+ dev_info(&input->root->pci->dev, "Registered video device %s\n",
+ video_device_node_name(&input->vdev));
+
+ /*
+ * Set default video standard. Doesn't matter which, the detected value
+ * will be found out by VIDIOC_QUERYSTD handler.
+ */
+ input->v4l2_std = V4L2_STD_NTSC_M;
+ input->std = STD_NTSC;
+
+ tw_indir_writeb(TW5864_INDIR_VIN_E(video_nr), 0x07);
+ /* to initiate auto format recognition */
+ tw_indir_writeb(TW5864_INDIR_VIN_F(video_nr), 0xff);
+
+ return 0;
+
+free_v4l2_hdl:
+ v4l2_ctrl_handler_free(hdl);
+ vb2_queue_release(&input->vidq);
+free_mutex:
+ mutex_destroy(&input->lock);
+
+ return ret;
+}
+
+static void tw5864_video_input_fini(struct tw5864_input *dev)
+{
+ video_unregister_device(&dev->vdev);
+ v4l2_ctrl_handler_free(&dev->hdl);
+ vb2_queue_release(&dev->vidq);
+}
+
+void tw5864_video_fini(struct tw5864_dev *dev)
+{
+ int i;
+
+ tasklet_kill(&dev->tasklet);
+
+ for (i = 0; i < TW5864_INPUTS; i++)
+ tw5864_video_input_fini(&dev->inputs[i]);
+
+ for (i = 0; i < H264_BUF_CNT; i++) {
+ dma_free_coherent(&dev->pci->dev, H264_VLC_BUF_SIZE,
+ dev->h264_buf[i].vlc.addr,
+ dev->h264_buf[i].vlc.dma_addr);
+ dma_free_coherent(&dev->pci->dev, H264_MV_BUF_SIZE,
+ dev->h264_buf[i].mv.addr,
+ dev->h264_buf[i].mv.dma_addr);
+ }
+}
+
+void tw5864_prepare_frame_headers(struct tw5864_input *input)
+{
+ struct tw5864_buf *vb = input->vb;
+ u8 *dst;
+ size_t dst_space;
+ unsigned long flags;
+
+ if (!vb) {
+ spin_lock_irqsave(&input->slock, flags);
+ if (list_empty(&input->active)) {
+ spin_unlock_irqrestore(&input->slock, flags);
+ input->vb = NULL;
+ return;
+ }
+ vb = list_first_entry(&input->active, struct tw5864_buf, list);
+ list_del(&vb->list);
+ spin_unlock_irqrestore(&input->slock, flags);
+ }
+
+ dst = vb2_plane_vaddr(&vb->vb.vb2_buf, 0);
+ dst_space = vb2_plane_size(&vb->vb.vb2_buf, 0);
+
+ /*
+ * Low-level bitstream writing functions don't have a fine way to say
+ * correctly that supplied buffer is too small. So we just check there
+ * and warn, and don't care at lower level.
+ * Currently all headers take below 32 bytes.
+ * The buffer is supposed to have plenty of free space at this point,
+ * anyway.
+ */
+ if (WARN_ON_ONCE(dst_space < 128))
+ return;
+
+ /*
+ * Generate H264 headers:
+ * If this is first frame, put SPS and PPS
+ */
+ if (input->frame_gop_seqno == 0)
+ tw5864_h264_put_stream_header(&dst, &dst_space, input->qp,
+ input->width, input->height);
+
+ /* Put slice header */
+ tw5864_h264_put_slice_header(&dst, &dst_space, input->h264_idr_pic_id,
+ input->frame_gop_seqno,
+ &input->tail_nb_bits, &input->tail);
+ input->vb = vb;
+ input->buf_cur_ptr = dst;
+ input->buf_cur_space_left = dst_space;
+}
+
+/*
+ * Returns heuristic motion detection metric value from known components of
+ * hardware-provided Motion Vector Data.
+ */
+static unsigned int tw5864_md_metric_from_mvd(u32 mvd)
+{
+ /*
+ * Format of motion vector data exposed by tw5864, according to
+ * manufacturer:
+ * mv_x 10 bits
+ * mv_y 10 bits
+ * non_zero_members 8 bits
+ * mb_type 3 bits
+ * reserved 1 bit
+ *
+ * non_zero_members: number of non-zero residuals in each macro block
+ * after quantization
+ *
+ * unsigned int reserved = mvd >> 31;
+ * unsigned int mb_type = (mvd >> 28) & 0x7;
+ * unsigned int non_zero_members = (mvd >> 20) & 0xff;
+ */
+ unsigned int mv_y = (mvd >> 10) & 0x3ff;
+ unsigned int mv_x = mvd & 0x3ff;
+
+ /* heuristic: */
+ mv_x &= 0x0f;
+ mv_y &= 0x0f;
+
+ return mv_y + mv_x;
+}
+
+static int tw5864_is_motion_triggered(struct tw5864_h264_frame *frame)
+{
+ struct tw5864_input *input = frame->input;
+ u32 *mv = (u32 *)frame->mv.addr;
+ int i;
+ int detected = 0;
+
+ for (i = 0; i < MD_CELLS; i++) {
+ const u16 thresh = input->md_threshold_grid_values[i];
+ const unsigned int metric = tw5864_md_metric_from_mvd(mv[i]);
+
+ if (metric > thresh)
+ detected = 1;
+
+ if (detected)
+ break;
+ }
+ return detected;
+}
+
+static void tw5864_handle_frame_task(unsigned long data)
+{
+ struct tw5864_dev *dev = (struct tw5864_dev *)data;
+ unsigned long flags;
+ int batch_size = H264_BUF_CNT;
+
+ spin_lock_irqsave(&dev->slock, flags);
+ while (dev->h264_buf_r_index != dev->h264_buf_w_index && batch_size--) {
+ struct tw5864_h264_frame *frame =
+ &dev->h264_buf[dev->h264_buf_r_index];
+
+ spin_unlock_irqrestore(&dev->slock, flags);
+ dma_sync_single_for_cpu(&dev->pci->dev, frame->vlc.dma_addr,
+ H264_VLC_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->pci->dev, frame->mv.dma_addr,
+ H264_MV_BUF_SIZE, DMA_FROM_DEVICE);
+ tw5864_handle_frame(frame);
+ dma_sync_single_for_device(&dev->pci->dev, frame->vlc.dma_addr,
+ H264_VLC_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(&dev->pci->dev, frame->mv.dma_addr,
+ H264_MV_BUF_SIZE, DMA_FROM_DEVICE);
+ spin_lock_irqsave(&dev->slock, flags);
+
+ dev->h264_buf_r_index++;
+ dev->h264_buf_r_index %= H264_BUF_CNT;
+ }
+ spin_unlock_irqrestore(&dev->slock, flags);
+}
+
+#ifdef DEBUG
+static u32 tw5864_vlc_checksum(u32 *data, int len)
+{
+ u32 val, count_len = len;
+
+ val = *data++;
+ while (((count_len >> 2) - 1) > 0) {
+ val ^= *data++;
+ count_len -= 4;
+ }
+ val ^= htonl((len >> 2));
+ return val;
+}
+#endif
+
+static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
+{
+#define SKIP_VLCBUF_BYTES 3
+ struct tw5864_input *input = frame->input;
+ struct tw5864_dev *dev = input->root;
+ struct tw5864_buf *vb;
+ struct vb2_v4l2_buffer *v4l2_buf;
+ int frame_len = frame->vlc_len - SKIP_VLCBUF_BYTES;
+ u8 *dst = input->buf_cur_ptr;
+ u8 tail_mask, vlc_mask = 0;
+ int i;
+ u8 vlc_first_byte = ((u8 *)(frame->vlc.addr + SKIP_VLCBUF_BYTES))[0];
+ unsigned long flags;
+ int zero_run;
+ u8 *src;
+ u8 *src_end;
+
+#ifdef DEBUG
+ if (frame->checksum !=
+ tw5864_vlc_checksum((u32 *)frame->vlc.addr, frame_len))
+ dev_err(&dev->pci->dev,
+ "Checksum of encoded frame doesn't match!\n");
+#endif
+
+ spin_lock_irqsave(&input->slock, flags);
+ vb = input->vb;
+ input->vb = NULL;
+ spin_unlock_irqrestore(&input->slock, flags);
+
+ v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+
+ if (!vb) { /* Gone because of disabling */
+ dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
+ return;
+ }
+
+ /*
+ * Check for space.
+ * Mind the overhead of startcode emulation prevention.
+ */
+ if (input->buf_cur_space_left < frame_len * 5 / 4) {
+ dev_err_once(&dev->pci->dev,
+ "Left space in vb2 buffer, %d bytes, is less than considered safely enough to put frame of length %d. Dropping this frame.\n",
+ input->buf_cur_space_left, frame_len);
+ return;
+ }
+
+ for (i = 0; i < 8 - input->tail_nb_bits; i++)
+ vlc_mask |= 1 << i;
+ tail_mask = (~vlc_mask) & 0xff;
+
+ dst[0] = (input->tail & tail_mask) | (vlc_first_byte & vlc_mask);
+ frame_len--;
+ dst++;
+
+ /* H.264 startcode emulation prevention */
+ src = frame->vlc.addr + SKIP_VLCBUF_BYTES + 1;
+ src_end = src + frame_len;
+ zero_run = 0;
+ for (; src < src_end; src++) {
+ if (zero_run < 2) {
+ if (*src == 0)
+ ++zero_run;
+ else
+ zero_run = 0;
+ } else {
+ if ((*src & ~0x03) == 0)
+ *dst++ = 0x03;
+ zero_run = *src == 0;
+ }
+ *dst++ = *src;
+ }
+
+ vb2_set_plane_payload(&vb->vb.vb2_buf, 0,
+ dst - (u8 *)vb2_plane_vaddr(&vb->vb.vb2_buf, 0));
+
+ vb->vb.vb2_buf.timestamp = frame->timestamp;
+ v4l2_buf->field = V4L2_FIELD_INTERLACED;
+ v4l2_buf->sequence = frame->seqno;
+
+ /* Check for motion flags */
+ if (frame->gop_seqno /* P-frame */ &&
+ tw5864_is_motion_triggered(frame)) {
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_MOTION_DET,
+ .u.motion_det = {
+ .flags = V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ,
+ .frame_sequence = v4l2_buf->sequence,
+ },
+ };
+
+ v4l2_event_queue(&input->vdev, &ev);
+ }
+
+ vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static v4l2_std_id tw5864_get_v4l2_std(enum tw5864_vid_std std)
+{
+ switch (std) {
+ case STD_NTSC: return V4L2_STD_NTSC_M;
+ case STD_PAL: return V4L2_STD_PAL_B;
+ case STD_SECAM: return V4L2_STD_SECAM_B;
+ case STD_NTSC443: return V4L2_STD_NTSC_443;
+ case STD_PAL_M: return V4L2_STD_PAL_M;
+ case STD_PAL_CN: return V4L2_STD_PAL_Nc;
+ case STD_PAL_60: return V4L2_STD_PAL_60;
+ case STD_INVALID: return V4L2_STD_UNKNOWN;
+ }
+ return 0;
+}
+
+static enum tw5864_vid_std tw5864_from_v4l2_std(v4l2_std_id v4l2_std)
+{
+ if (v4l2_std & V4L2_STD_NTSC_M)
+ return STD_NTSC;
+ if (v4l2_std & V4L2_STD_PAL_B)
+ return STD_PAL;
+ if (v4l2_std & V4L2_STD_SECAM_B)
+ return STD_SECAM;
+ if (v4l2_std & V4L2_STD_NTSC_443)
+ return STD_NTSC443;
+ if (v4l2_std & V4L2_STD_PAL_M)
+ return STD_PAL_M;
+ if (v4l2_std & V4L2_STD_PAL_Nc)
+ return STD_PAL_CN;
+ if (v4l2_std & V4L2_STD_PAL_60)
+ return STD_PAL_60;
+
+ return STD_INVALID;
+}
+
+static void tw5864_encoder_tables_upload(struct tw5864_dev *dev)
+{
+ int i;
+
+ tw_writel(TW5864_VLC_RD, 0x1);
+ for (i = 0; i < VLC_LOOKUP_TABLE_LEN; i++) {
+ tw_writel((TW5864_VLC_STREAM_MEM_START + i * 4),
+ encoder_vlc_lookup_table[i]);
+ }
+ tw_writel(TW5864_VLC_RD, 0x0);
+
+ for (i = 0; i < QUANTIZATION_TABLE_LEN; i++) {
+ tw_writel((TW5864_QUAN_TAB + i * 4),
+ forward_quantization_table[i]);
+ }
+
+ for (i = 0; i < QUANTIZATION_TABLE_LEN; i++) {
+ tw_writel((TW5864_QUAN_TAB + i * 4),
+ inverse_quantization_table[i]);
+ }
+}
diff --git a/drivers/media/pci/tw5864/tw5864.h b/drivers/media/pci/tw5864/tw5864.h
new file mode 100644
index 000000000000..f5de9f6ef119
--- /dev/null
+++ b/drivers/media/pci/tw5864/tw5864.h
@@ -0,0 +1,205 @@
+/*
+ * TW5864 driver - common header file
+ *
+ * Copyright (C) 2016 Bluecherry, LLC <maintainers@bluecherrydvr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/videodev2.h>
+#include <linux/notifier.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "tw5864-reg.h"
+
+#define PCI_DEVICE_ID_TECHWELL_5864 0x5864
+
+#define TW5864_NORMS V4L2_STD_ALL
+
+/* ----------------------------------------------------------- */
+/* card configuration */
+
+#define TW5864_INPUTS 4
+
+/* The TW5864 uses 192 (16x12) detection cells in full screen for motion
+ * detection. Each detection cell is composed of 44 pixels and 20 lines for
+ * NTSC and 24 lines for PAL.
+ */
+#define MD_CELLS_HOR 16
+#define MD_CELLS_VERT 12
+#define MD_CELLS (MD_CELLS_HOR * MD_CELLS_VERT)
+
+#define H264_VLC_BUF_SIZE 0x80000
+#define H264_MV_BUF_SIZE 0x2000 /* device writes 5396 bytes */
+#define QP_VALUE 28
+#define MAX_GOP_SIZE 255
+#define GOP_SIZE MAX_GOP_SIZE
+
+enum resolution {
+ D1 = 1,
+ HD1 = 2, /* half d1 - 360x(240|288) */
+ CIF = 3,
+ QCIF = 4,
+};
+
+/* ----------------------------------------------------------- */
+/* device / file handle status */
+
+struct tw5864_dev; /* forward delclaration */
+
+/* buffer for one video/vbi/ts frame */
+struct tw5864_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+
+ unsigned int size;
+};
+
+struct tw5864_dma_buf {
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+enum tw5864_vid_std {
+ STD_NTSC = 0, /* NTSC (M) */
+ STD_PAL = 1, /* PAL (B, D, G, H, I) */
+ STD_SECAM = 2, /* SECAM */
+ STD_NTSC443 = 3, /* NTSC4.43 */
+ STD_PAL_M = 4, /* PAL (M) */
+ STD_PAL_CN = 5, /* PAL (CN) */
+ STD_PAL_60 = 6, /* PAL 60 */
+ STD_INVALID = 7,
+ STD_AUTO = 7,
+};
+
+struct tw5864_input {
+ int nr; /* input number */
+ struct tw5864_dev *root;
+ struct mutex lock; /* used for vidq and vdev */
+ spinlock_t slock; /* used for sync between ISR, tasklet & V4L2 API */
+ struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
+ struct vb2_queue vidq;
+ struct list_head active;
+ enum resolution resolution;
+ unsigned int width, height;
+ unsigned int frame_seqno;
+ unsigned int frame_gop_seqno;
+ unsigned int h264_idr_pic_id;
+ int enabled;
+ enum tw5864_vid_std std;
+ v4l2_std_id v4l2_std;
+ int tail_nb_bits;
+ u8 tail;
+ u8 *buf_cur_ptr;
+ int buf_cur_space_left;
+
+ u32 reg_interlacing;
+ u32 reg_vlc;
+ u32 reg_dsp_codec;
+ u32 reg_dsp;
+ u32 reg_emu;
+ u32 reg_dsp_qp;
+ u32 reg_dsp_ref_mvp_lambda;
+ u32 reg_dsp_i4x4_weight;
+ u32 buf_id;
+
+ struct tw5864_buf *vb;
+
+ struct v4l2_ctrl *md_threshold_grid_ctrl;
+ u16 md_threshold_grid_values[12 * 16];
+ int qp;
+ int gop;
+
+ /*
+ * In (1/MAX_FPS) units.
+ * For max FPS (default), set to 1.
+ * For 1 FPS, set to e.g. 32.
+ */
+ int frame_interval;
+ unsigned long new_frame_deadline;
+};
+
+struct tw5864_h264_frame {
+ struct tw5864_dma_buf vlc;
+ struct tw5864_dma_buf mv;
+ int vlc_len;
+ u32 checksum;
+ struct tw5864_input *input;
+ u64 timestamp;
+ unsigned int seqno;
+ unsigned int gop_seqno;
+};
+
+/* global device status */
+struct tw5864_dev {
+ spinlock_t slock; /* used for sync between ISR, tasklet & V4L2 API */
+ struct v4l2_device v4l2_dev;
+ struct tw5864_input inputs[TW5864_INPUTS];
+#define H264_BUF_CNT 4
+ struct tw5864_h264_frame h264_buf[H264_BUF_CNT];
+ int h264_buf_r_index;
+ int h264_buf_w_index;
+
+ struct tasklet_struct tasklet;
+
+ int encoder_busy;
+ /* Input number to check next for ready raw picture (in RR fashion) */
+ int next_input;
+
+ /* pci i/o */
+ char name[64];
+ struct pci_dev *pci;
+ void __iomem *mmio;
+ u32 irqmask;
+};
+
+#define tw_readl(reg) readl(dev->mmio + reg)
+#define tw_mask_readl(reg, mask) \
+ (tw_readl(reg) & (mask))
+#define tw_mask_shift_readl(reg, mask, shift) \
+ (tw_mask_readl((reg), ((mask) << (shift))) >> (shift))
+
+#define tw_writel(reg, value) writel((value), dev->mmio + reg)
+#define tw_mask_writel(reg, mask, value) \
+ tw_writel(reg, (tw_readl(reg) & ~(mask)) | ((value) & (mask)))
+#define tw_mask_shift_writel(reg, mask, shift, value) \
+ tw_mask_writel((reg), ((mask) << (shift)), ((value) << (shift)))
+
+#define tw_setl(reg, bit) tw_writel((reg), tw_readl(reg) | (bit))
+#define tw_clearl(reg, bit) tw_writel((reg), tw_readl(reg) & ~(bit))
+
+u8 tw5864_indir_readb(struct tw5864_dev *dev, u16 addr);
+#define tw_indir_readb(addr) tw5864_indir_readb(dev, addr)
+void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data);
+#define tw_indir_writeb(addr, data) tw5864_indir_writeb(dev, addr, data)
+
+void tw5864_irqmask_apply(struct tw5864_dev *dev);
+int tw5864_video_init(struct tw5864_dev *dev, int *video_nr);
+void tw5864_video_fini(struct tw5864_dev *dev);
+void tw5864_prepare_frame_headers(struct tw5864_input *input);
+void tw5864_h264_put_stream_header(u8 **buf, size_t *space_left, int qp,
+ int width, int height);
+void tw5864_h264_put_slice_header(u8 **buf, size_t *space_left,
+ unsigned int idr_pic_id,
+ unsigned int frame_gop_seqno,
+ int *tail_nb_bits, u8 *tail);
+void tw5864_request_encoded_frame(struct tw5864_input *input);
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 5e8212845c87..a45e02367321 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -535,7 +535,7 @@ static void tw68_stop_streaming(struct vb2_queue *q)
}
}
-static struct vb2_ops tw68_video_qops = {
+static const struct vb2_ops tw68_video_qops = {
.queue_setup = tw68_queue_setup,
.buf_queue = tw68_buf_queue,
.buf_prepare = tw68_buf_prepare,
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 96e444c49173..77190768622a 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -269,7 +269,7 @@ static snd_pcm_uframes_t tw686x_pcm_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, ac->ptr);
}
-static struct snd_pcm_ops tw686x_pcm_ops = {
+static const struct snd_pcm_ops tw686x_pcm_ops = {
.open = tw686x_pcm_open,
.close = tw686x_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index cdb16de770fe..c3fafa97b2d0 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -577,7 +577,7 @@ static int tw686x_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static struct vb2_ops tw686x_video_qops = {
+static const struct vb2_ops tw686x_video_qops = {
.queue_setup = tw686x_queue_setup,
.buf_queue = tw686x_buf_queue,
.buf_prepare = tw686x_buf_prepare,
@@ -672,30 +672,20 @@ static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+static int tw686x_set_format(struct tw686x_video_channel *vc,
+ unsigned int pixelformat, unsigned int width,
+ unsigned int height, bool realloc)
{
- struct tw686x_video_channel *vc = video_drvdata(file);
struct tw686x_dev *dev = vc->dev;
- u32 val, width, line_width, height;
- unsigned long bitsperframe;
+ u32 val, dma_width, dma_height, dma_line_width;
int err, pb;
- if (vb2_is_busy(&vc->vidq))
- return -EBUSY;
-
- bitsperframe = vc->width * vc->height * vc->format->depth;
- err = tw686x_try_fmt_vid_cap(file, priv, f);
- if (err)
- return err;
-
- vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
- vc->width = f->fmt.pix.width;
- vc->height = f->fmt.pix.height;
+ vc->format = format_by_fourcc(pixelformat);
+ vc->width = width;
+ vc->height = height;
/* We need new DMA buffers if the framesize has changed */
- if (dev->dma_ops->alloc &&
- bitsperframe != vc->width * vc->height * vc->format->depth) {
+ if (dev->dma_ops->alloc && realloc) {
for (pb = 0; pb < 2; pb++)
dev->dma_ops->free(vc, pb);
@@ -739,14 +729,36 @@ static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
/* Program the DMA frame size */
- width = (vc->width * 2) & 0x7ff;
- height = vc->height / 2;
- line_width = (vc->width * 2) & 0x7ff;
- val = (height << 22) | (line_width << 11) | width;
+ dma_width = (vc->width * 2) & 0x7ff;
+ dma_height = vc->height / 2;
+ dma_line_width = (vc->width * 2) & 0x7ff;
+ val = (dma_height << 22) | (dma_line_width << 11) | dma_width;
reg_write(vc->dev, VDMA_WHP[vc->ch], val);
return 0;
}
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ unsigned long area;
+ bool realloc;
+ int err;
+
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ area = vc->width * vc->height;
+ err = tw686x_try_fmt_vid_cap(file, priv, f);
+ if (err)
+ return err;
+
+ realloc = area != (f->fmt.pix.width * f->fmt.pix.height);
+ return tw686x_set_format(vc, f->fmt.pix.pixelformat,
+ f->fmt.pix.width, f->fmt.pix.height,
+ realloc);
+}
+
static int tw686x_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -763,17 +775,9 @@ static int tw686x_querycap(struct file *file, void *priv,
return 0;
}
-static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+static int tw686x_set_standard(struct tw686x_video_channel *vc, v4l2_std_id id)
{
- struct tw686x_video_channel *vc = video_drvdata(file);
- struct v4l2_format f;
- u32 val, ret;
-
- if (vc->video_standard == id)
- return 0;
-
- if (vb2_is_busy(&vc->vidq))
- return -EBUSY;
+ u32 val;
if (id & V4L2_STD_NTSC)
val = 0;
@@ -802,14 +806,31 @@ static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
val |= (1 << (SYS_MODE_DMA_SHIFT + vc->ch));
reg_write(vc->dev, VIDEO_CONTROL1, val);
+ return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct v4l2_format f;
+ int ret;
+
+ if (vc->video_standard == id)
+ return 0;
+
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ ret = tw686x_set_standard(vc, id);
+ if (ret)
+ return ret;
/*
* Adjust format after V4L2_STD_525_60/V4L2_STD_625_50 change,
* calling g_fmt and s_fmt will sanitize the height
* according to the standard.
*/
- ret = tw686x_g_fmt_vid_cap(file, priv, &f);
- if (!ret)
- tw686x_s_fmt_vid_cap(file, priv, &f);
+ tw686x_g_fmt_vid_cap(file, priv, &f);
+ tw686x_s_fmt_vid_cap(file, priv, &f);
/*
* Frame decimation depends on the chosen standard,
@@ -885,6 +906,42 @@ static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
+static int tw686x_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ if (fsize->index)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.max_width = TW686X_VIDEO_WIDTH;
+ fsize->stepwise.min_width = fsize->stepwise.max_width / 2;
+ fsize->stepwise.step_width = fsize->stepwise.min_width;
+ fsize->stepwise.max_height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+ fsize->stepwise.min_height = fsize->stepwise.max_height / 2;
+ fsize->stepwise.step_height = fsize->stepwise.min_height;
+ return 0;
+}
+
+static int tw686x_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *ival)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ int max_fps = TW686X_MAX_FPS(vc->video_standard);
+ int max_rates = DIV_ROUND_UP(max_fps, 2);
+
+ if (ival->index >= max_rates)
+ return -EINVAL;
+
+ ival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ ival->discrete.numerator = 1;
+ if (ival->index < (max_rates - 1))
+ ival->discrete.denominator = (ival->index + 1) * 2;
+ else
+ ival->discrete.denominator = max_fps;
+ return 0;
+}
+
static int tw686x_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
@@ -928,10 +985,21 @@ static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static void tw686x_set_input(struct tw686x_video_channel *vc, unsigned int i)
+{
+ u32 val;
+
+ vc->input = i;
+
+ val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+ val &= ~(0x3 << 30);
+ val |= i << 30;
+ reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+}
+
static int tw686x_s_input(struct file *file, void *priv, unsigned int i)
{
struct tw686x_video_channel *vc = video_drvdata(file);
- u32 val;
if (i >= TW686X_INPUTS_PER_CH)
return -EINVAL;
@@ -943,12 +1011,7 @@ static int tw686x_s_input(struct file *file, void *priv, unsigned int i)
if (vb2_is_busy(&vc->vidq))
return -EBUSY;
- vc->input = i;
-
- val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
- val &= ~(0x3 << 30);
- val |= i << 30;
- reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+ tw686x_set_input(vc, i);
return 0;
}
@@ -1007,6 +1070,8 @@ static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
.vidioc_g_parm = tw686x_g_parm,
.vidioc_s_parm = tw686x_s_parm,
+ .vidioc_enum_framesizes = tw686x_enum_framesizes,
+ .vidioc_enum_frameintervals = tw686x_enum_frameintervals,
.vidioc_enum_input = tw686x_enum_input,
.vidioc_g_input = tw686x_g_input,
@@ -1093,8 +1158,7 @@ void tw686x_video_free(struct tw686x_dev *dev)
for (ch = 0; ch < max_channels(dev); ch++) {
struct tw686x_video_channel *vc = &dev->video_channels[ch];
- if (vc->device)
- video_unregister_device(vc->device);
+ video_unregister_device(vc->device);
if (dev->dma_ops->free)
for (pb = 0; pb < 2; pb++)
@@ -1104,7 +1168,7 @@ void tw686x_video_free(struct tw686x_dev *dev)
int tw686x_video_init(struct tw686x_dev *dev)
{
- unsigned int ch, val, pb;
+ unsigned int ch, val;
int err;
if (dev->dma_mode == TW686X_DMA_MODE_MEMCPY)
@@ -1138,27 +1202,23 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->ch = ch;
/* default settings */
- vc->format = &formats[0];
- vc->video_standard = V4L2_STD_NTSC;
- vc->width = TW686X_VIDEO_WIDTH;
- vc->height = TW686X_VIDEO_HEIGHT(vc->video_standard);
- vc->input = 0;
+ err = tw686x_set_standard(vc, V4L2_STD_NTSC);
+ if (err)
+ goto error;
- reg_write(vc->dev, SDT[ch], 0);
- tw686x_set_framerate(vc, 30);
+ err = tw686x_set_format(vc, formats[0].fourcc,
+ TW686X_VIDEO_WIDTH,
+ TW686X_VIDEO_HEIGHT(vc->video_standard),
+ true);
+ if (err)
+ goto error;
+ tw686x_set_input(vc, 0);
+ tw686x_set_framerate(vc, 30);
reg_write(dev, VDELAY_LO[ch], 0x14);
reg_write(dev, HACTIVE_LO[ch], 0xd0);
reg_write(dev, VIDEO_SIZE[ch], 0);
- if (dev->dma_ops->alloc) {
- for (pb = 0; pb < 2; pb++) {
- err = dev->dma_ops->alloc(vc, pb);
- if (err)
- goto error;
- }
- }
-
vc->vidq.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vc->vidq.drv_priv = vc;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 80caa70c6360..d6b631add216 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2365,94 +2365,80 @@ static int zoran_s_output(struct file *file, void *__fh, unsigned int output)
}
/* cropping (sub-frame capture) */
-static int zoran_cropcap(struct file *file, void *__fh,
- struct v4l2_cropcap *cropcap)
+static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- int type = cropcap->type, res = 0;
- memset(cropcap, 0, sizeof(*cropcap));
- cropcap->type = type;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
- if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- fh->map_mode == ZORAN_MAP_MODE_RAW)) {
+ if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
dprintk(1, KERN_ERR
- "%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n",
+ "%s: VIDIOC_G_SELECTION - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
- res = -EINVAL;
- return res;
+ return -EINVAL;
}
- cropcap->bounds.top = cropcap->bounds.left = 0;
- cropcap->bounds.width = BUZ_MAX_WIDTH;
- cropcap->bounds.height = BUZ_MAX_HEIGHT;
- cropcap->defrect.top = cropcap->defrect.left = 0;
- cropcap->defrect.width = BUZ_MIN_WIDTH;
- cropcap->defrect.height = BUZ_MIN_HEIGHT;
- return res;
-}
-
-static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop)
-{
- struct zoran_fh *fh = __fh;
- struct zoran *zr = fh->zr;
- int type = crop->type, res = 0;
-
- memset(crop, 0, sizeof(*crop));
- crop->type = type;
-
- if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- fh->map_mode == ZORAN_MAP_MODE_RAW)) {
- dprintk(1,
- KERN_ERR
- "%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
- ZR_DEVNAME(zr));
- res = -EINVAL;
- return res;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = fh->jpg_settings.img_y;
+ sel->r.left = fh->jpg_settings.img_x;
+ sel->r.width = fh->jpg_settings.img_width;
+ sel->r.height = fh->jpg_settings.img_height;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = BUZ_MIN_WIDTH;
+ sel->r.height = BUZ_MIN_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = BUZ_MAX_WIDTH;
+ sel->r.height = BUZ_MAX_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
}
-
- crop->c.top = fh->jpg_settings.img_y;
- crop->c.left = fh->jpg_settings.img_x;
- crop->c.width = fh->jpg_settings.img_width;
- crop->c.height = fh->jpg_settings.img_height;
- return res;
+ return 0;
}
-static int zoran_s_crop(struct file *file, void *__fh, const struct v4l2_crop *crop)
+static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
{
struct zoran_fh *fh = __fh;
struct zoran *zr = fh->zr;
- int res = 0;
struct zoran_jpg_settings settings;
+ int res;
- settings = fh->jpg_settings;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
- if (fh->buffers.allocated) {
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (fh->map_mode == ZORAN_MAP_MODE_RAW) {
dprintk(1, KERN_ERR
- "%s: VIDIOC_S_CROP - cannot change settings while active\n",
+ "%s: VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n",
ZR_DEVNAME(zr));
- res = -EBUSY;
- return res;
+ return -EINVAL;
}
- if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- fh->map_mode == ZORAN_MAP_MODE_RAW)) {
+ settings = fh->jpg_settings;
+
+ if (fh->buffers.allocated) {
dprintk(1, KERN_ERR
- "%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n",
+ "%s: VIDIOC_S_SELECTION - cannot change settings while active\n",
ZR_DEVNAME(zr));
- res = -EINVAL;
- return res;
+ return -EBUSY;
}
/* move into a form that we understand */
- settings.img_x = crop->c.left;
- settings.img_y = crop->c.top;
- settings.img_width = crop->c.width;
- settings.img_height = crop->c.height;
+ settings.img_x = sel->r.left;
+ settings.img_y = sel->r.top;
+ settings.img_width = sel->r.width;
+ settings.img_height = sel->r.height;
/* check validity */
res = zoran_check_jpg_settings(zr, &settings, 0);
@@ -2808,9 +2794,8 @@ zoran_mmap (struct file *file,
static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_querycap = zoran_querycap,
- .vidioc_cropcap = zoran_cropcap,
- .vidioc_s_crop = zoran_s_crop,
- .vidioc_g_crop = zoran_g_crop,
+ .vidioc_s_selection = zoran_s_selection,
+ .vidioc_g_selection = zoran_g_selection,
.vidioc_enum_input = zoran_enum_input,
.vidioc_g_input = zoran_g_input,
.vidioc_s_input = zoran_s_input,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 552b635cfce7..ce4a96fccc43 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -91,6 +91,15 @@ config VIDEO_OMAP3_DEBUG
---help---
Enable debug messages on OMAP 3 camera controller driver.
+config VIDEO_PXA27x
+ tristate "PXA27x Quick Capture Interface driver"
+ depends on VIDEO_DEV && HAS_DMA
+ depends on PXA27x || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select SG_SPLIT
+ ---help---
+ This is a v4l2 driver for the PXA27x Quick Capture Interface
+
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
@@ -107,10 +116,10 @@ config VIDEO_S3C_CAMIF
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
-source "drivers/media/platform/s5p-tv/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
source "drivers/media/platform/rcar-vin/Kconfig"
+source "drivers/media/platform/atmel/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
@@ -155,7 +164,7 @@ config VIDEO_CODA
config VIDEO_MEDIATEK_VPU
tristate "Mediatek Video Processor Unit"
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
---help---
This driver provides downloading VPU firmware and
@@ -257,6 +266,21 @@ config VIDEO_STI_BDISP
help
This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
+config VIDEO_STI_HVA
+ tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on HAS_DMA
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
+ video encoder of STMicroelectronics SoC, allowing hardware encoding of
+ raw uncompressed formats in various compressed video bitstreams format.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-hva.
+
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 21771c1a13fb..40b18d12726e 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
obj-$(CONFIG_VIDEO_OMAP3) += omap3isp/
+obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
@@ -30,12 +31,12 @@ obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) += exynos4-is/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
+obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
obj-$(CONFIG_BLACKFIN) += blackfin/
@@ -58,6 +59,8 @@ obj-$(CONFIG_VIDEO_XILINX) += xilinx/
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
new file mode 100644
index 000000000000..867dca22a473
--- /dev/null
+++ b/drivers/media/platform/atmel/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_ATMEL_ISC
+ tristate "ATMEL Image Sensor Controller (ISC) support"
+ depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on ARCH_AT91 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ help
+ This module makes the ATMEL Image Sensor Controller available
+ as a v4l2 device. \ No newline at end of file
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
new file mode 100644
index 000000000000..9d7c999d434d
--- /dev/null
+++ b/drivers/media/platform/atmel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
new file mode 100644
index 000000000000..00c449717cde
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -0,0 +1,165 @@
+#ifndef __ATMEL_ISC_REGS_H
+#define __ATMEL_ISC_REGS_H
+
+#include <linux/bitops.h>
+
+/* ISC Control Enable Register 0 */
+#define ISC_CTRLEN 0x00000000
+
+/* ISC Control Disable Register 0 */
+#define ISC_CTRLDIS 0x00000004
+
+/* ISC Control Status Register 0 */
+#define ISC_CTRLSR 0x00000008
+
+#define ISC_CTRL_CAPTURE BIT(0)
+#define ISC_CTRL_UPPRO BIT(1)
+#define ISC_CTRL_HISREQ BIT(2)
+#define ISC_CTRL_HISCLR BIT(3)
+
+/* ISC Parallel Front End Configuration 0 Register */
+#define ISC_PFE_CFG0 0x0000000c
+
+#define ISC_PFE_CFG0_HPOL_LOW BIT(0)
+#define ISC_PFE_CFG0_VPOL_LOW BIT(1)
+#define ISC_PFE_CFG0_PPOL_LOW BIT(2)
+
+#define ISC_PFE_CFG0_MODE_PROGRESSIVE (0x0 << 4)
+#define ISC_PFE_CFG0_MODE_MASK GENMASK(6, 4)
+
+#define ISC_PFE_CFG0_BPS_EIGHT (0x4 << 28)
+#define ISC_PFG_CFG0_BPS_NINE (0x3 << 28)
+#define ISC_PFG_CFG0_BPS_TEN (0x2 << 28)
+#define ISC_PFG_CFG0_BPS_ELEVEN (0x1 << 28)
+#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
+#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+
+/* ISC Clock Enable Register */
+#define ISC_CLKEN 0x00000018
+
+/* ISC Clock Disable Register */
+#define ISC_CLKDIS 0x0000001c
+
+/* ISC Clock Status Register */
+#define ISC_CLKSR 0x00000020
+
+#define ISC_CLK(n) BIT(n)
+
+/* ISC Clock Configuration Register */
+#define ISC_CLKCFG 0x00000024
+#define ISC_CLKCFG_DIV_SHIFT(n) ((n)*16)
+#define ISC_CLKCFG_DIV_MASK(n) GENMASK(((n)*16 + 7), (n)*16)
+#define ISC_CLKCFG_SEL_SHIFT(n) ((n)*16 + 8)
+#define ISC_CLKCFG_SEL_MASK(n) GENMASK(((n)*17 + 8), ((n)*16 + 8))
+
+/* ISC Interrupt Enable Register */
+#define ISC_INTEN 0x00000028
+
+/* ISC Interrupt Disable Register */
+#define ISC_INTDIS 0x0000002c
+
+/* ISC Interrupt Mask Register */
+#define ISC_INTMASK 0x00000030
+
+/* ISC Interrupt Status Register */
+#define ISC_INTSR 0x00000034
+
+#define ISC_INT_DDONE BIT(8)
+
+/* ISC White Balance Control Register */
+#define ISC_WB_CTRL 0x00000058
+
+/* ISC White Balance Configuration Register */
+#define ISC_WB_CFG 0x0000005c
+
+/* ISC Color Filter Array Control Register */
+#define ISC_CFA_CTRL 0x00000070
+
+/* ISC Color Filter Array Configuration Register */
+#define ISC_CFA_CFG 0x00000074
+
+#define ISC_BAY_CFG_GRGR 0x0
+#define ISC_BAY_CFG_RGRG 0x1
+#define ISC_BAY_CFG_GBGB 0x2
+#define ISC_BAY_CFG_BGBG 0x3
+#define ISC_BAY_CFG_MASK GENMASK(1, 0)
+
+/* ISC Color Correction Control Register */
+#define ISC_CC_CTRL 0x00000078
+
+/* ISC Gamma Correction Control Register */
+#define ISC_GAM_CTRL 0x00000094
+
+/* Color Space Conversion Control Register */
+#define ISC_CSC_CTRL 0x00000398
+
+/* Contrast And Brightness Control Register */
+#define ISC_CBC_CTRL 0x000003b4
+
+/* Subsampling 4:4:4 to 4:2:2 Control Register */
+#define ISC_SUB422_CTRL 0x000003c4
+
+/* Subsampling 4:2:2 to 4:2:0 Control Register */
+#define ISC_SUB420_CTRL 0x000003cc
+
+/* Rounding, Limiting and Packing Configuration Register */
+#define ISC_RLP_CFG 0x000003d0
+
+#define ISC_RLP_CFG_MODE_DAT8 0x0
+#define ISC_RLP_CFG_MODE_DAT9 0x1
+#define ISC_RLP_CFG_MODE_DAT10 0x2
+#define ISC_RLP_CFG_MODE_DAT11 0x3
+#define ISC_RLP_CFG_MODE_DAT12 0x4
+#define ISC_RLP_CFG_MODE_DATY8 0x5
+#define ISC_RLP_CFG_MODE_DATY10 0x6
+#define ISC_RLP_CFG_MODE_ARGB444 0x7
+#define ISC_RLP_CFG_MODE_ARGB555 0x8
+#define ISC_RLP_CFG_MODE_RGB565 0x9
+#define ISC_RLP_CFG_MODE_ARGB32 0xa
+#define ISC_RLP_CFG_MODE_YYCC 0xb
+#define ISC_RLP_CFG_MODE_YYCC_LIMITED 0xc
+#define ISC_RLP_CFG_MODE_MASK GENMASK(3, 0)
+
+/* DMA Configuration Register */
+#define ISC_DCFG 0x000003e0
+#define ISC_DCFG_IMODE_PACKED8 0x0
+#define ISC_DCFG_IMODE_PACKED16 0x1
+#define ISC_DCFG_IMODE_PACKED32 0x2
+#define ISC_DCFG_IMODE_YC422SP 0x3
+#define ISC_DCFG_IMODE_YC422P 0x4
+#define ISC_DCFG_IMODE_YC420SP 0x5
+#define ISC_DCFG_IMODE_YC420P 0x6
+#define ISC_DCFG_IMODE_MASK GENMASK(2, 0)
+
+#define ISC_DCFG_YMBSIZE_SINGLE (0x0 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS4 (0x1 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS8 (0x2 << 4)
+#define ISC_DCFG_YMBSIZE_BEATS16 (0x3 << 4)
+#define ISC_DCFG_YMBSIZE_MASK GENMASK(5, 4)
+
+#define ISC_DCFG_CMBSIZE_SINGLE (0x0 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS4 (0x1 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS8 (0x2 << 8)
+#define ISC_DCFG_CMBSIZE_BEATS16 (0x3 << 8)
+#define ISC_DCFG_CMBSIZE_MASK GENMASK(9, 8)
+
+/* DMA Control Register */
+#define ISC_DCTRL 0x000003e4
+
+#define ISC_DCTRL_DVIEW_PACKED (0x0 << 1)
+#define ISC_DCTRL_DVIEW_SEMIPLANAR (0x1 << 1)
+#define ISC_DCTRL_DVIEW_PLANAR (0x2 << 1)
+#define ISC_DCTRL_DVIEW_MASK GENMASK(2, 1)
+
+#define ISC_DCTRL_IE_IS (0x0 << 4)
+
+/* DMA Descriptor Address Register */
+#define ISC_DNDA 0x000003e8
+
+/* DMA Address 0 Register */
+#define ISC_DAD0 0x000003ec
+
+/* DMA Stride 0 Register */
+#define ISC_DST0 0x000003f0
+
+#endif
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
new file mode 100644
index 000000000000..ccfe13b7d3f8
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -0,0 +1,1520 @@
+/*
+ * Atmel Image Sensor Controller (ISC) driver
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Songjun Wu <songjun.wu@microchip.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA
+ *
+ * ISC video pipeline integrates the following submodules:
+ * PFE: Parallel Front End to sample the camera sensor input stream
+ * WB: Programmable white balance in the Bayer domain
+ * CFA: Color filter array interpolation module
+ * CC: Programmable color correction
+ * GAM: Gamma correction
+ * CSC: Programmable color space conversion
+ * CBC: Contrast and Brightness control
+ * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling
+ * RLP: This module performs rounding, range limiting
+ * and packing of the incoming data
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "atmel-isc-regs.h"
+
+#define ATMEL_ISC_NAME "atmel_isc"
+
+#define ISC_MAX_SUPPORT_WIDTH 2592
+#define ISC_MAX_SUPPORT_HEIGHT 1944
+
+#define ISC_CLK_MAX_DIV 255
+
+enum isc_clk_id {
+ ISC_ISPCK = 0,
+ ISC_MCK = 1,
+};
+
+struct isc_clk {
+ struct clk_hw hw;
+ struct clk *clk;
+ struct regmap *regmap;
+ u8 id;
+ u8 parent_id;
+ u32 div;
+ struct device *dev;
+};
+
+#define to_isc_clk(hw) container_of(hw, struct isc_clk, hw)
+
+struct isc_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct isc_subdev_entity {
+ struct v4l2_subdev *sd;
+ struct v4l2_async_subdev *asd;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev_pad_config *config;
+
+ u32 pfe_cfg0;
+
+ struct list_head list;
+};
+
+/*
+ * struct isc_format - ISC media bus format information
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @reg_bps: reg value for bits per sample
+ * (when transferred over a bus)
+ * @support: Indicates format supported by subdev
+ */
+struct isc_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+
+ u32 reg_bps;
+ u32 reg_rlp_mode;
+ u32 reg_dcfg_imode;
+ u32 reg_dctrl_dview;
+
+ bool support;
+};
+
+#define ISC_PIPE_LINE_NODE_NUM 11
+
+struct isc_device {
+ struct regmap *regmap;
+ struct clk *hclock;
+ struct clk *ispck;
+ struct isc_clk isc_clks[2];
+
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video_dev;
+
+ struct vb2_queue vb2_vidq;
+ spinlock_t dma_queue_lock;
+ struct list_head dma_queue;
+ struct isc_buffer *cur_frm;
+ unsigned int sequence;
+ bool stop;
+ struct completion comp;
+
+ struct v4l2_format fmt;
+ struct isc_format **user_formats;
+ unsigned int num_user_formats;
+ const struct isc_format *current_fmt;
+
+ struct mutex lock;
+
+ struct regmap_field *pipeline[ISC_PIPE_LINE_NODE_NUM];
+
+ struct isc_subdev_entity *current_subdev;
+ struct list_head subdev_entities;
+};
+
+static struct isc_format isc_formats[] = {
+ { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8,
+ 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8,
+ 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8,
+ 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8,
+ 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+
+ { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10,
+ 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10,
+ 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10,
+ 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10,
+ 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+
+ { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12,
+ 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12,
+ 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12,
+ 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12,
+ 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
+
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8,
+ 2, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+};
+
+static int isc_clk_enable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+ struct regmap *regmap = isc_clk->regmap;
+
+ dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
+ __func__, isc_clk->div, isc_clk->parent_id);
+
+ regmap_update_bits(regmap, ISC_CLKCFG,
+ ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
+ (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
+ (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
+
+ regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+
+ return 0;
+}
+
+static void isc_clk_disable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 id = isc_clk->id;
+
+ regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+}
+
+static int isc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 status;
+
+ regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+
+ return status & ISC_CLK(isc_clk->id) ? 1 : 0;
+}
+
+static unsigned long
+isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1);
+}
+
+static int isc_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ long best_rate = -EINVAL;
+ int best_diff = -1;
+ unsigned int i, div;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ continue;
+
+ for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) {
+ unsigned long rate;
+ int diff;
+
+ rate = DIV_ROUND_CLOSEST(parent_rate, div);
+ diff = abs(req->rate - rate);
+
+ if (best_diff < 0 || best_diff > diff) {
+ best_rate = rate;
+ best_diff = diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (!best_diff || rate < req->rate)
+ break;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ dev_dbg(isc_clk->dev,
+ "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
+ __func__, best_rate,
+ __clk_get_name((req->best_parent_hw)->clk),
+ req->best_parent_rate);
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static int isc_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ isc_clk->parent_id = index;
+
+ return 0;
+}
+
+static u8 isc_clk_get_parent(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ return isc_clk->parent_id;
+}
+
+static int isc_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ u32 div;
+
+ if (!rate)
+ return -EINVAL;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > (ISC_CLK_MAX_DIV + 1) || !div)
+ return -EINVAL;
+
+ isc_clk->div = div - 1;
+
+ return 0;
+}
+
+static const struct clk_ops isc_clk_ops = {
+ .enable = isc_clk_enable,
+ .disable = isc_clk_disable,
+ .is_enabled = isc_clk_is_enabled,
+ .recalc_rate = isc_clk_recalc_rate,
+ .determine_rate = isc_clk_determine_rate,
+ .set_parent = isc_clk_set_parent,
+ .get_parent = isc_clk_get_parent,
+ .set_rate = isc_clk_set_rate,
+};
+
+static int isc_clk_register(struct isc_device *isc, unsigned int id)
+{
+ struct regmap *regmap = isc->regmap;
+ struct device_node *np = isc->dev->of_node;
+ struct isc_clk *isc_clk;
+ struct clk_init_data init;
+ const char *clk_name = np->name;
+ const char *parent_names[3];
+ int num_parents;
+
+ num_parents = of_clk_get_parent_count(np);
+ if (num_parents < 1 || num_parents > 3)
+ return -EINVAL;
+
+ if (num_parents > 2 && id == ISC_ISPCK)
+ num_parents = 2;
+
+ of_clk_parent_fill(np, parent_names, num_parents);
+
+ if (id == ISC_MCK)
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ else
+ clk_name = "isc-ispck";
+
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ init.name = clk_name;
+ init.ops = &isc_clk_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+
+ isc_clk = &isc->isc_clks[id];
+ isc_clk->hw.init = &init;
+ isc_clk->regmap = regmap;
+ isc_clk->id = id;
+ isc_clk->dev = isc->dev;
+
+ isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
+ if (IS_ERR(isc_clk->clk)) {
+ dev_err(isc->dev, "%s: clock register fail\n", clk_name);
+ return PTR_ERR(isc_clk->clk);
+ } else if (id == ISC_MCK)
+ of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk);
+
+ return 0;
+}
+
+static int isc_clk_init(struct isc_device *isc)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++)
+ isc->isc_clks[i].clk = ERR_PTR(-EINVAL);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ ret = isc_clk_register(isc, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void isc_clk_cleanup(struct isc_device *isc)
+{
+ unsigned int i;
+
+ of_clk_del_provider(isc->dev->of_node);
+
+ for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) {
+ struct isc_clk *isc_clk = &isc->isc_clks[i];
+
+ if (!IS_ERR(isc_clk->clk))
+ clk_unregister(isc_clk->clk);
+ }
+}
+
+static int isc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned int size = isc->fmt.fmt.pix.sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int isc_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = isc->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&isc->v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ vbuf->field = isc->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+static inline void isc_start_dma(struct regmap *regmap,
+ struct isc_buffer *frm, u32 dview)
+{
+ dma_addr_t addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&frm->vb.vb2_buf, 0);
+
+ regmap_write(regmap, ISC_DCTRL, dview | ISC_DCTRL_IE_IS);
+ regmap_write(regmap, ISC_DAD0, addr);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
+}
+
+static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
+{
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ val = pipeline & BIT(i) ? 1 : 0;
+ regmap_field_write(isc->pipeline[i], val);
+ }
+}
+
+static int isc_configure(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ const struct isc_format *current_fmt = isc->current_fmt;
+ struct isc_subdev_entity *subdev = isc->current_subdev;
+ u32 val, mask;
+ int counter = 10;
+
+ val = current_fmt->reg_bps | subdev->pfe_cfg0 |
+ ISC_PFE_CFG0_MODE_PROGRESSIVE;
+ mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
+ ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
+ ISC_PFE_CFG0_MODE_MASK;
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0, mask, val);
+
+ regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
+ current_fmt->reg_rlp_mode);
+
+ regmap_update_bits(regmap, ISC_DCFG, ISC_DCFG_IMODE_MASK,
+ current_fmt->reg_dcfg_imode);
+
+ /* Disable the pipeline */
+ isc_set_pipeline(isc, 0x0);
+
+ /* Update profile */
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
+
+ regmap_read(regmap, ISC_CTRLSR, &val);
+ while ((val & ISC_CTRL_UPPRO) && counter--) {
+ usleep_range(1000, 2000);
+ regmap_read(regmap, ISC_CTRLSR, &val);
+ }
+
+ if (counter < 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ struct regmap *regmap = isc->regmap;
+ struct isc_buffer *buf;
+ unsigned long flags;
+ int ret;
+ u32 val;
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ v4l2_err(&isc->v4l2_dev, "stream on failed in subdev\n");
+ goto err_start_stream;
+ }
+
+ pm_runtime_get_sync(isc->dev);
+
+ /* Disable all the interrupts */
+ regmap_write(isc->regmap, ISC_INTDIS, (u32)~0UL);
+
+ /* Clean the interrupt status register */
+ regmap_read(regmap, ISC_INTSR, &val);
+
+ ret = isc_configure(isc);
+ if (unlikely(ret))
+ goto err_configure;
+
+ /* Enable DMA interrupt */
+ regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE);
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+
+ isc->sequence = 0;
+ isc->stop = false;
+ reinit_completion(&isc->comp);
+
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_start_dma(regmap, isc->cur_frm, isc->current_fmt->reg_dctrl_dview);
+
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ return 0;
+
+err_configure:
+ pm_runtime_put_sync(isc->dev);
+
+ v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+
+err_start_stream:
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+
+ return ret;
+}
+
+static void isc_stop_streaming(struct vb2_queue *vq)
+{
+ struct isc_device *isc = vb2_get_drv_priv(vq);
+ unsigned long flags;
+ struct isc_buffer *buf;
+ int ret;
+
+ isc->stop = true;
+
+ /* Wait until the end of the current frame */
+ if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ))
+ v4l2_err(&isc->v4l2_dev,
+ "Timeout waiting for end of the capture\n");
+
+ /* Disable DMA interrupt */
+ regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE);
+
+ pm_runtime_put_sync(isc->dev);
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_err(&isc->v4l2_dev, "stream off failed in subdev\n");
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ if (unlikely(isc->cur_frm)) {
+ vb2_buffer_done(&isc->cur_frm->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ isc->cur_frm = NULL;
+ }
+ list_for_each_entry(buf, &isc->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static void isc_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb);
+ struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&isc->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &isc->dma_queue);
+ spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
+}
+
+static struct vb2_ops isc_vb2_ops = {
+ .queue_setup = isc_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = isc_buffer_prepare,
+ .start_streaming = isc_start_streaming,
+ .stop_streaming = isc_stop_streaming,
+ .buf_queue = isc_buffer_queue,
+};
+
+static int isc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ strcpy(cap->driver, ATMEL_ISC_NAME);
+ strcpy(cap->card, "Atmel Image Sensor Controller");
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", isc->v4l2_dev.name);
+
+ return 0;
+}
+
+static int isc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+ u32 index = f->index;
+
+ if (index >= isc->num_user_formats)
+ return -EINVAL;
+
+ f->pixelformat = isc->user_formats[index]->fourcc;
+
+ return 0;
+}
+
+static int isc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ *fmt = isc->fmt;
+
+ return 0;
+}
+
+static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isc->num_user_formats;
+ struct isc_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isc->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
+ struct isc_format **current_fmt)
+{
+ struct isc_format *isc_fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ isc_fmt = find_format_by_fourcc(isc, pixfmt->pixelformat);
+ if (!isc_fmt) {
+ v4l2_warn(&isc->v4l2_dev, "Format 0x%x not found\n",
+ pixfmt->pixelformat);
+ isc_fmt = isc->user_formats[isc->num_user_formats - 1];
+ pixfmt->pixelformat = isc_fmt->fourcc;
+ }
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > ISC_MAX_SUPPORT_WIDTH)
+ pixfmt->width = ISC_MAX_SUPPORT_WIDTH;
+ if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
+ pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, isc_fmt->mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
+ isc->current_subdev->config, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = pixfmt->width * isc_fmt->bpp;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (current_fmt)
+ *current_fmt = isc_fmt;
+
+ return 0;
+}
+
+static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct isc_format *current_fmt;
+ int ret;
+
+ ret = isc_try_fmt(isc, f, &current_fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
+ current_fmt->mbus_code);
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ isc->fmt = *f;
+ isc->current_fmt = current_fmt;
+
+ return 0;
+}
+
+static int isc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ if (vb2_is_streaming(&isc->vb2_vidq))
+ return -EBUSY;
+
+ return isc_set_fmt(isc, f);
+}
+
+static int isc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ return isc_try_fmt(isc, f, NULL);
+}
+
+static int isc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = 0;
+ strcpy(inp->name, "Camera");
+
+ return 0;
+}
+
+static int isc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int isc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_subdev_call(isc->current_subdev->sd, video, g_parm, a);
+}
+
+static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct isc_device *isc = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_subdev_call(isc->current_subdev->sd, video, s_parm, a);
+}
+
+static int isc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct isc_device *isc = video_drvdata(file);
+ const struct isc_format *isc_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isc_fmt = find_format_by_fourcc(isc, fsize->pixel_format);
+ if (!isc_fmt)
+ return -EINVAL;
+
+ fse.code = isc_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int isc_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct isc_device *isc = video_drvdata(file);
+ const struct isc_format *isc_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isc_fmt = find_format_by_fourcc(isc, fival->pixel_format);
+ if (!isc_fmt)
+ return -EINVAL;
+
+ fie.code = isc_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops isc_ioctl_ops = {
+ .vidioc_querycap = isc_querycap,
+ .vidioc_enum_fmt_vid_cap = isc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = isc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = isc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = isc_try_fmt_vid_cap,
+
+ .vidioc_enum_input = isc_enum_input,
+ .vidioc_g_input = isc_g_input,
+ .vidioc_s_input = isc_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_parm = isc_g_parm,
+ .vidioc_s_parm = isc_s_parm,
+ .vidioc_enum_framesizes = isc_enum_framesizes,
+ .vidioc_enum_frameintervals = isc_enum_frameintervals,
+};
+
+static int isc_open(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ int ret;
+
+ if (mutex_lock_interruptible(&isc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ v4l2_fh_release(file);
+ goto unlock;
+ }
+
+ ret = isc_set_fmt(isc, &isc->fmt);
+ if (ret) {
+ v4l2_subdev_call(sd, core, s_power, 0);
+ v4l2_fh_release(file);
+ }
+
+unlock:
+ mutex_unlock(&isc->lock);
+ return ret;
+}
+
+static int isc_release(struct file *file)
+{
+ struct isc_device *isc = video_drvdata(file);
+ struct v4l2_subdev *sd = isc->current_subdev->sd;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&isc->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&isc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations isc_fops = {
+ .owner = THIS_MODULE,
+ .open = isc_open,
+ .release = isc_release,
+ .unlocked_ioctl = video_ioctl2,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+static irqreturn_t isc_interrupt(int irq, void *dev_id)
+{
+ struct isc_device *isc = (struct isc_device *)dev_id;
+ struct regmap *regmap = isc->regmap;
+ u32 isc_intsr, isc_intmask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isc->dma_queue_lock);
+
+ regmap_read(regmap, ISC_INTSR, &isc_intsr);
+ regmap_read(regmap, ISC_INTMASK, &isc_intmask);
+
+ pending = isc_intsr & isc_intmask;
+
+ if (likely(pending & ISC_INT_DDONE)) {
+ if (isc->cur_frm) {
+ struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vbuf->sequence = isc->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ isc->cur_frm = NULL;
+ }
+
+ if (!list_empty(&isc->dma_queue) && !isc->stop) {
+ isc->cur_frm = list_first_entry(&isc->dma_queue,
+ struct isc_buffer, list);
+ list_del(&isc->cur_frm->list);
+
+ isc_start_dma(regmap, isc->cur_frm,
+ isc->current_fmt->reg_dctrl_dview);
+ }
+
+ if (isc->stop)
+ complete(&isc->comp);
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&isc->dma_queue_lock);
+
+ return ret;
+}
+
+static int isc_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct isc_subdev_entity *subdev_entity =
+ container_of(notifier, struct isc_subdev_entity, notifier);
+
+ if (video_is_registered(&isc->video_dev)) {
+ v4l2_err(&isc->v4l2_dev, "only supports one sub-device.\n");
+ return -EBUSY;
+ }
+
+ subdev_entity->sd = subdev;
+
+ return 0;
+}
+
+static void isc_async_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+
+ video_unregister_device(&isc->video_dev);
+ if (isc->current_subdev->config)
+ v4l2_subdev_free_pad_config(isc->current_subdev->config);
+}
+
+static struct isc_format *find_format_by_code(unsigned int code, int *index)
+{
+ struct isc_format *fmt = &isc_formats[0];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ if (fmt->mbus_code == code) {
+ *index = i;
+ return fmt;
+ }
+
+ fmt++;
+ }
+
+ return NULL;
+}
+
+static int isc_formats_init(struct isc_device *isc)
+{
+ struct isc_format *fmt;
+ struct v4l2_subdev *subdev = isc->current_subdev->sd;
+ int num_fmts = 0, i, j;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmt = &isc_formats[0];
+ for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt->support = false;
+ fmt++;
+ }
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ mbus_code.index++;
+ fmt = find_format_by_code(mbus_code.code, &i);
+ if (!fmt)
+ continue;
+
+ fmt->support = true;
+ num_fmts++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ isc->num_user_formats = num_fmts;
+ isc->user_formats = devm_kcalloc(isc->dev,
+ num_fmts, sizeof(struct isc_format *),
+ GFP_KERNEL);
+ if (!isc->user_formats) {
+ v4l2_err(&isc->v4l2_dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ fmt = &isc_formats[0];
+ for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ if (fmt->support)
+ isc->user_formats[j++] = fmt;
+
+ fmt++;
+ }
+
+ return 0;
+}
+
+static int isc_set_default_fmt(struct isc_device *isc)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = isc->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = isc_try_fmt(isc, &f, NULL);
+ if (ret)
+ return ret;
+
+ isc->current_fmt = isc->user_formats[0];
+ isc->fmt = f;
+
+ return 0;
+}
+
+static int isc_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct isc_device *isc = container_of(notifier->v4l2_dev,
+ struct isc_device, v4l2_dev);
+ struct isc_subdev_entity *sd_entity;
+ struct video_device *vdev = &isc->video_dev;
+ struct vb2_queue *q = &isc->vb2_vidq;
+ int ret;
+
+ isc->current_subdev = container_of(notifier,
+ struct isc_subdev_entity, notifier);
+ sd_entity = isc->current_subdev;
+
+ mutex_init(&isc->lock);
+ init_completion(&isc->comp);
+
+ /* Initialize videobuf2 queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = isc;
+ q->buf_struct_size = sizeof(struct isc_buffer);
+ q->ops = &isc_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isc->lock;
+ q->min_buffers_needed = 1;
+ q->dev = isc->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "vb2_queue_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init video dma queues */
+ INIT_LIST_HEAD(&isc->dma_queue);
+ spin_lock_init(&isc->dma_queue_lock);
+
+ sd_entity->config = v4l2_subdev_alloc_pad_config(sd_entity->sd);
+ if (sd_entity->config == NULL)
+ return -ENOMEM;
+
+ ret = isc_formats_init(isc);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "Init format failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_set_default_fmt(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Could not set default format\n");
+ return ret;
+ }
+
+ /* Register video device */
+ strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &isc_fops;
+ vdev->ioctl_ops = &isc_ioctl_ops;
+ vdev->v4l2_dev = &isc->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &isc->lock;
+ vdev->ctrl_handler = isc->current_subdev->sd->ctrl_handler;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ video_set_drvdata(vdev, isc);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void isc_subdev_cleanup(struct isc_device *isc)
+{
+ struct isc_subdev_entity *subdev_entity;
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list)
+ v4l2_async_notifier_unregister(&subdev_entity->notifier);
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+}
+
+static int isc_pipeline_init(struct isc_device *isc)
+{
+ struct device *dev = isc->dev;
+ struct regmap *regmap = isc->regmap;
+ struct regmap_field *regs;
+ unsigned int i;
+
+ /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
+ const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = {
+ REG_FIELD(ISC_WB_CTRL, 0, 0),
+ REG_FIELD(ISC_CFA_CTRL, 0, 0),
+ REG_FIELD(ISC_CC_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 0, 0),
+ REG_FIELD(ISC_GAM_CTRL, 1, 1),
+ REG_FIELD(ISC_GAM_CTRL, 2, 2),
+ REG_FIELD(ISC_GAM_CTRL, 3, 3),
+ REG_FIELD(ISC_CSC_CTRL, 0, 0),
+ REG_FIELD(ISC_CBC_CTRL, 0, 0),
+ REG_FIELD(ISC_SUB422_CTRL, 0, 0),
+ REG_FIELD(ISC_SUB420_CTRL, 0, 0),
+ };
+
+ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
+ regs = devm_regmap_field_alloc(dev, regmap, regfields[i]);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ isc->pipeline[i] = regs;
+ }
+
+ return 0;
+}
+
+static int isc_parse_dt(struct device *dev, struct isc_device *isc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *epn = NULL, *rem;
+ struct v4l2_of_endpoint v4l2_epn;
+ struct isc_subdev_entity *subdev_entity;
+ unsigned int flags;
+ int ret;
+
+ INIT_LIST_HEAD(&isc->subdev_entities);
+
+ for (; ;) {
+ epn = of_graph_get_next_endpoint(np, epn);
+ if (!epn)
+ break;
+
+ rem = of_graph_get_remote_port_parent(epn);
+ if (!rem) {
+ dev_notice(dev, "Remote device at %s not found\n",
+ of_node_full_name(epn));
+ continue;
+ }
+
+ ret = v4l2_of_parse_endpoint(epn, &v4l2_epn);
+ if (ret) {
+ of_node_put(rem);
+ ret = -EINVAL;
+ dev_err(dev, "Could not parse the endpoint\n");
+ break;
+ }
+
+ subdev_entity = devm_kzalloc(dev,
+ sizeof(*subdev_entity), GFP_KERNEL);
+ if (subdev_entity == NULL) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ subdev_entity->asd = devm_kzalloc(dev,
+ sizeof(*subdev_entity->asd), GFP_KERNEL);
+ if (subdev_entity->asd == NULL) {
+ of_node_put(rem);
+ ret = -ENOMEM;
+ break;
+ }
+
+ flags = v4l2_epn.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
+
+ subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_OF;
+ subdev_entity->asd->match.of.node = rem;
+ list_add_tail(&subdev_entity->list, &isc->subdev_entities);
+ }
+
+ of_node_put(epn);
+ return ret;
+}
+
+/* regmap configuration */
+#define ATMEL_ISC_REG_MAX 0xbfc
+static const struct regmap_config isc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ATMEL_ISC_REG_MAX,
+};
+
+static int atmel_isc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct isc_device *isc;
+ struct resource *res;
+ void __iomem *io_base;
+ struct isc_subdev_entity *subdev_entity;
+ int irq;
+ int ret;
+
+ isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL);
+ if (!isc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, isc);
+ isc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ isc->regmap = devm_regmap_init_mmio(dev, io_base, &isc_regmap_config);
+ if (IS_ERR(isc->regmap)) {
+ ret = PTR_ERR(isc->regmap);
+ dev_err(dev, "failed to init register map: %d\n", ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, isc_interrupt, 0,
+ ATMEL_ISC_NAME, isc);
+ if (ret < 0) {
+ dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
+ irq, ret);
+ return ret;
+ }
+
+ ret = isc_pipeline_init(isc);
+ if (ret)
+ return ret;
+
+ isc->hclock = devm_clk_get(dev, "hclock");
+ if (IS_ERR(isc->hclock)) {
+ ret = PTR_ERR(isc->hclock);
+ dev_err(dev, "failed to get hclock: %d\n", ret);
+ return ret;
+ }
+
+ ret = isc_clk_init(isc);
+ if (ret) {
+ dev_err(dev, "failed to init isc clock: %d\n", ret);
+ goto clean_isc_clk;
+ }
+
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto clean_isc_clk;
+ }
+
+ ret = v4l2_device_register(dev, &isc->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "unable to register v4l2 device.\n");
+ goto clean_isc_clk;
+ }
+
+ ret = isc_parse_dt(dev, isc);
+ if (ret) {
+ dev_err(dev, "fail to parse device tree\n");
+ goto unregister_v4l2_device;
+ }
+
+ if (list_empty(&isc->subdev_entities)) {
+ dev_err(dev, "no subdev found\n");
+ goto unregister_v4l2_device;
+ }
+
+ list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
+ subdev_entity->notifier.subdevs = &subdev_entity->asd;
+ subdev_entity->notifier.num_subdevs = 1;
+ subdev_entity->notifier.bound = isc_async_bound;
+ subdev_entity->notifier.unbind = isc_async_unbind;
+ subdev_entity->notifier.complete = isc_async_complete;
+
+ ret = v4l2_async_notifier_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
+ if (ret) {
+ dev_err(dev, "fail to register async notifier\n");
+ goto cleanup_subdev;
+ }
+
+ if (video_is_registered(&isc->video_dev))
+ break;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+cleanup_subdev:
+ isc_subdev_cleanup(isc);
+
+unregister_v4l2_device:
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+clean_isc_clk:
+ isc_clk_cleanup(isc);
+
+ return ret;
+}
+
+static int atmel_isc_remove(struct platform_device *pdev)
+{
+ struct isc_device *isc = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ isc_subdev_cleanup(isc);
+
+ v4l2_device_unregister(&isc->v4l2_dev);
+
+ isc_clk_cleanup(isc);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_suspend(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
+
+ return 0;
+}
+
+static int __maybe_unused isc_runtime_resume(struct device *dev)
+{
+ struct isc_device *isc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret)
+ return ret;
+
+ return clk_prepare_enable(isc->ispck);
+}
+
+static const struct dev_pm_ops atmel_isc_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isc_of_match[] = {
+ { .compatible = "atmel,sama5d2-isc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isc_of_match);
+
+static struct platform_driver atmel_isc_driver = {
+ .probe = atmel_isc_probe,
+ .remove = atmel_isc_remove,
+ .driver = {
+ .name = ATMEL_ISC_NAME,
+ .pm = &atmel_isc_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_isc_of_match),
+ },
+};
+
+module_platform_driver(atmel_isc_driver);
+
+MODULE_AUTHOR("Songjun Wu <songjun.wu@microchip.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 0b1709e96673..a9bc0175e4d3 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -440,7 +440,7 @@ vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
/*
* Application initially set the image format. Current display
* size is obtained from the vpbe display controller. expected_xsize
- * and expected_ysize are set through S_CROP ioctl. Based on this,
+ * and expected_ysize are set through S_SELECTION ioctl. Based on this,
* driver will calculate the scale factors for vertical and
* horizontal direction so that the image is displayed scaled
* and expanded. Application uses expansion to display the image
@@ -649,24 +649,23 @@ static int vpbe_display_querycap(struct file *file, void *priv,
return 0;
}
-static int vpbe_display_s_crop(struct file *file, void *priv,
- const struct v4l2_crop *crop)
+static int vpbe_display_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct vpbe_layer *layer = video_drvdata(file);
struct vpbe_display *disp_dev = layer->disp_dev;
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
struct osd_layer_config *cfg = &layer->layer_info.config;
struct osd_state *osd_device = disp_dev->osd_device;
- struct v4l2_rect rect = crop->c;
+ struct v4l2_rect rect = sel->r;
int ret;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+ "VIDIOC_S_SELECTION, layer id = %d\n", layer->device_id);
- if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- }
if (rect.top < 0)
rect.top = 0;
@@ -714,32 +713,45 @@ static int vpbe_display_s_crop(struct file *file, void *priv,
else
osd_device->ops.set_interpolation_filter(osd_device, 0);
+ sel->r = rect;
return 0;
}
-static int vpbe_display_g_crop(struct file *file, void *priv,
- struct v4l2_crop *crop)
+static int vpbe_display_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct vpbe_layer *layer = video_drvdata(file);
struct osd_layer_config *cfg = &layer->layer_info.config;
struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
struct osd_state *osd_device = layer->disp_dev->osd_device;
- struct v4l2_rect *rect = &crop->c;
+ struct v4l2_rect *rect = &sel->r;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_G_CROP, layer id = %d\n",
+ "VIDIOC_G_SELECTION, layer id = %d\n",
layer->device_id);
- if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ rect->top = cfg->ypos;
+ rect->left = cfg->xpos;
+ rect->width = cfg->xsize;
+ rect->height = cfg->ysize;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = vpbe_dev->current_timings.xres;
+ rect->height = vpbe_dev->current_timings.yres;
+ break;
+ default:
return -EINVAL;
}
- osd_device->ops.get_layer_config(osd_device,
- layer->layer_info.id, cfg);
- rect->top = cfg->ypos;
- rect->left = cfg->xpos;
- rect->width = cfg->xsize;
- rect->height = cfg->ysize;
return 0;
}
@@ -752,13 +764,10 @@ static int vpbe_display_cropcap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
- cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- cropcap->bounds.left = 0;
- cropcap->bounds.top = 0;
- cropcap->bounds.width = vpbe_dev->current_timings.xres;
- cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
- cropcap->defrect = cropcap->bounds;
return 0;
}
@@ -1251,8 +1260,8 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_cropcap = vpbe_display_cropcap,
- .vidioc_g_crop = vpbe_display_g_crop,
- .vidioc_s_crop = vpbe_display_s_crop,
+ .vidioc_g_selection = vpbe_display_g_selection,
+ .vidioc_s_selection = vpbe_display_s_selection,
.vidioc_s_std = vpbe_display_s_std,
.vidioc_g_std = vpbe_display_g_std,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7767e072d623..6efb2f1631c4 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1610,38 +1610,53 @@ static int vpfe_cropcap(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
- if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ /* If std_index is invalid, then just return (== 1:1 aspect) */
+ if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
+ return 0;
- memset(crop, 0, sizeof(struct v4l2_cropcap));
- crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- crop->bounds.width = crop->defrect.width =
- vpfe_standards[vpfe_dev->std_index].width;
- crop->bounds.height = crop->defrect.height =
- vpfe_standards[vpfe_dev->std_index].height;
crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
return 0;
}
-static int vpfe_g_crop(struct file *file, void *priv,
- struct v4l2_crop *crop)
+static int vpfe_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_crop\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_selection\n");
- crop->c = vpfe_dev->crop;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = vpfe_dev->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.width = vpfe_standards[vpfe_dev->std_index].width;
+ sel->r.height = vpfe_standards[vpfe_dev->std_index].height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static int vpfe_s_crop(struct file *file, void *priv,
- const struct v4l2_crop *crop)
+static int vpfe_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct v4l2_rect rect = crop->c;
+ struct v4l2_rect rect = sel->r;
int ret = 0;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_crop\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
if (vpfe_dev->started) {
/* make sure streaming is not started */
@@ -1669,7 +1684,7 @@ static int vpfe_s_crop(struct file *file, void *priv,
vpfe_dev->std_info.active_pixels) ||
(rect.top + rect.height >
vpfe_dev->std_info.active_lines)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_CROP params\n");
+ v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_SELECTION params\n");
ret = -EINVAL;
goto unlock_out;
}
@@ -1682,6 +1697,7 @@ static int vpfe_s_crop(struct file *file, void *priv,
vpfe_dev->fmt.fmt.pix.bytesperline *
vpfe_dev->fmt.fmt.pix.height;
vpfe_dev->crop = rect;
+ sel->r = rect;
unlock_out:
mutex_unlock(&vpfe_dev->lock);
return ret;
@@ -1760,8 +1776,8 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_streamon = vpfe_streamon,
.vidioc_streamoff = vpfe_streamoff,
.vidioc_cropcap = vpfe_cropcap,
- .vidioc_g_crop = vpfe_g_crop,
- .vidioc_s_crop = vpfe_s_crop,
+ .vidioc_g_selection = vpfe_g_selection,
+ .vidioc_s_selection = vpfe_s_selection,
.vidioc_default = vpfe_param_handler,
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index ec6494cbdd45..9f03b791b711 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -261,7 +261,7 @@ static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
-static struct vb2_ops gsc_m2m_qops = {
+static const struct vb2_ops gsc_m2m_qops = {
.queue_setup = gsc_m2m_queue_setup,
.buf_prepare = gsc_m2m_buf_prepare,
.buf_queue = gsc_m2m_buf_queue,
@@ -277,9 +277,10 @@ static int gsc_m2m_querycap(struct file *file, void *fh,
struct gsc_ctx *ctx = fh_to_ctx(fh);
struct gsc_dev *gsc = ctx->gsc_dev;
- strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
- strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
- strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ strlcpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&gsc->pdev->dev));
cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index fdec499fbbda..964f4a681934 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -452,7 +452,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&fimc->slock, flags);
}
-static struct vb2_ops fimc_capture_qops = {
+static const struct vb2_ops fimc_capture_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -1796,6 +1796,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
vid_cap->wb_fmt.code = fmt->mbus_code;
vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ vfd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
ret = media_entity_pads_init(&vfd->entity, 1, &vid_cap->vd_pad);
if (ret)
goto err_free_ctx;
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 7521aa59b064..6bba4ca022be 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -55,26 +55,33 @@ static int fimc_is_i2c_probe(struct platform_device *pdev)
i2c_adap->algo = &fimc_is_i2c_algorithm;
i2c_adap->class = I2C_CLASS_SPD;
- ret = i2c_add_adapter(i2c_adap);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add I2C bus %s\n",
- node->full_name);
- return ret;
- }
-
platform_set_drvdata(pdev, isp_i2c);
-
pm_runtime_enable(&pdev->dev);
- pm_runtime_enable(&i2c_adap->dev);
+ ret = i2c_add_adapter(i2c_adap);
+ if (ret < 0)
+ goto err_pm_dis;
+ /*
+ * Client drivers of this adapter don't do any I2C transfers as that
+ * is handled by the ISP firmware. But we rely on the runtime PM
+ * state propagation from the clients up to the adapter driver so
+ * clear the ignore_children flags here. PM rutnime calls are not
+ * used in probe() handler of clients of this adapter so there is
+ * no issues with clearing the flag right after registering the I2C
+ * adapter.
+ */
+ pm_suspend_ignore_children(&i2c_adap->dev, false);
return 0;
+
+err_pm_dis:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
static int fimc_is_i2c_remove(struct platform_device *pdev)
{
struct fimc_is_i2c *isp_i2c = platform_get_drvdata(pdev);
- pm_runtime_disable(&isp_i2c->adapter.dev);
pm_runtime_disable(&pdev->dev);
i2c_del_adapter(&isp_i2c->adapter);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 32ca55f16677..518ad34f80d7 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -52,6 +52,9 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
[ISS_CLK_DRC] = "drc",
[ISS_CLK_FD] = "fd",
[ISS_CLK_MCUISP] = "mcuisp",
+ [ISS_CLK_GICISP] = "gicisp",
+ [ISS_CLK_PWM_ISP] = "pwm_isp",
+ [ISS_CLK_MCUCTL_ISP] = "mcuctl_isp",
[ISS_CLK_UART] = "uart",
[ISS_CLK_ISP_DIV0] = "ispdiv0",
[ISS_CLK_ISP_DIV1] = "ispdiv1",
@@ -165,6 +168,7 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
struct device_node *node)
{
struct fimc_is_sensor *sensor = &is->sensor[index];
+ struct device_node *ep, *port;
u32 tmp = 0;
int ret;
@@ -175,22 +179,25 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
return -EINVAL;
}
- node = of_graph_get_next_endpoint(node, NULL);
- if (!node)
+ ep = of_graph_get_next_endpoint(node, NULL);
+ if (!ep)
return -ENXIO;
- node = of_graph_get_remote_port(node);
- if (!node)
+ port = of_graph_get_remote_port(ep);
+ of_node_put(ep);
+ if (!port)
return -ENXIO;
/* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
- ret = of_property_read_u32(node, "reg", &tmp);
+ ret = of_property_read_u32(port, "reg", &tmp);
if (ret < 0) {
dev_err(&is->pdev->dev, "reg property not found at: %s\n",
- node->full_name);
+ port->full_name);
+ of_node_put(port);
return ret;
}
+ of_node_put(port);
sensor->i2c_bus = tmp - FIMC_INPUT_MIPI_CSI2_0;
return 0;
}
@@ -845,13 +852,18 @@ static int fimc_is_probe(struct platform_device *pdev)
goto err_pm;
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ goto err_pm;
+
/*
* Register FIMC-IS V4L2 subdevs to this driver. The video nodes
* will be created within the subdev's registered() callback.
*/
ret = fimc_is_register_subdevs(is);
if (ret < 0)
- goto err_pm;
+ goto err_of_dep;
ret = fimc_is_debugfs_create(is);
if (ret < 0)
@@ -870,6 +882,8 @@ err_dfs:
fimc_is_debugfs_remove(is);
err_sd:
fimc_is_unregister_subdevs(is);
+err_of_dep:
+ of_platform_depopulate(dev);
err_pm:
if (!pm_runtime_enabled(dev))
fimc_is_runtime_suspend(dev);
@@ -929,6 +943,7 @@ static int fimc_is_remove(struct platform_device *pdev)
if (!pm_runtime_status_suspended(dev))
fimc_is_runtime_suspend(dev);
free_irq(is->irq, is);
+ of_platform_depopulate(dev);
fimc_is_unregister_subdevs(is);
vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index 3a82c6a214c7..ee05da034aa1 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -77,6 +77,9 @@ enum {
ISS_CLK_DRC,
ISS_CLK_FD,
ISS_CLK_MCUISP,
+ ISS_CLK_GICISP,
+ ISS_CLK_PWM_ISP,
+ ISS_CLK_MCUCTL_ISP,
ISS_CLK_UART,
ISS_GATE_CLKS_MAX,
ISS_CLK_ISP_DIV0 = ISS_GATE_CLKS_MAX,
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 293b807020c4..8efe9160ab34 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -705,6 +705,7 @@ int fimc_isp_subdev_create(struct fimc_isp *isp)
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "FIMC-IS-ISP");
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
isp->subdev_pads[FIMC_ISP_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_FIFO].flags = MEDIA_PAD_FL_SOURCE;
isp->subdev_pads[FIMC_ISP_SD_PAD_SRC_DMA].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index a0f149fb88e1..b91abf1c4d43 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1432,6 +1432,7 @@ static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
sd->ctrl_handler = handler;
sd->internal_ops = &fimc_lite_subdev_internal_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
sd->entity.ops = &fimc_lite_subdev_media_ops;
sd->owner = THIS_MODULE;
v4l2_set_subdevdata(sd, fimc);
@@ -1454,25 +1455,17 @@ static void fimc_lite_clk_put(struct fimc_lite *fimc)
if (IS_ERR(fimc->clock))
return;
- clk_unprepare(fimc->clock);
clk_put(fimc->clock);
fimc->clock = ERR_PTR(-EINVAL);
}
static int fimc_lite_clk_get(struct fimc_lite *fimc)
{
- int ret;
-
fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
if (IS_ERR(fimc->clock))
return PTR_ERR(fimc->clock);
- ret = clk_prepare(fimc->clock);
- if (ret < 0) {
- clk_put(fimc->clock);
- fimc->clock = ERR_PTR(-EINVAL);
- }
- return ret;
+ return 0;
}
static const struct of_device_id flite_of_match[];
@@ -1543,7 +1536,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (!pm_runtime_enabled(dev)) {
- ret = clk_enable(fimc->clock);
+ ret = clk_prepare_enable(fimc->clock);
if (ret < 0)
goto err_sd;
}
@@ -1568,7 +1561,7 @@ static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
- clk_enable(fimc->clock);
+ clk_prepare_enable(fimc->clock);
return 0;
}
@@ -1576,7 +1569,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
- clk_disable(fimc->clock);
+ clk_disable_unprepare(fimc->clock);
return 0;
}
#endif
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index b1309e114edb..6028e4fbaed3 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -219,7 +219,7 @@ static void fimc_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
-static struct vb2_ops fimc_qops = {
+static const struct vb2_ops fimc_qops = {
.queue_setup = fimc_queue_setup,
.buf_prepare = fimc_buf_prepare,
.buf_queue = fimc_buf_queue,
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 891625e77ef5..1a1154a9dfa4 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1190,6 +1190,10 @@ static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
return ret ? -EPIPE : 0;
}
+static const struct media_device_ops fimc_md_ops = {
+ .link_notify = fimc_md_link_notify,
+};
+
static ssize_t fimc_md_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1416,7 +1420,7 @@ static int fimc_md_probe(struct platform_device *pdev)
strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC",
sizeof(fmd->media_dev.model));
- fmd->media_dev.link_notify = fimc_md_link_notify;
+ fmd->media_dev.ops = &fimc_md_ops;
fmd->media_dev.dev = dev;
v4l2_dev = &fmd->v4l2_dev;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 86e681daa89d..befd9fc0adc4 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -853,6 +853,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
state->format.width = S5PCSIS_DEF_PIX_WIDTH;
state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
+ state->sd.entity.function = MEDIA_ENT_F_IO_V4L;
state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&state->sd.entity,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 0fcb5c78031d..bedc7cc4c7d6 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -852,7 +852,7 @@ static void deinterlace_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
-static struct vb2_ops deinterlace_qops = {
+static const struct vb2_ops deinterlace_qops = {
.queue_setup = deinterlace_queue_setup,
.buf_prepare = deinterlace_buf_prepare,
.buf_queue = deinterlace_buf_queue,
@@ -1016,7 +1016,7 @@ static int deinterlace_probe(struct platform_device *pdev)
return -ENODEV;
if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
- v4l2_err(&pcdev->v4l2_dev, "DMA does not support INTERLEAVE\n");
+ dev_err(&pdev->dev, "DMA does not support INTERLEAVE\n");
goto rel_dma;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 3a8e6958adae..c8eaa41c00e6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -239,7 +239,7 @@ struct mtk_vcodec_ctx {
enum mtk_encode_param param_change;
struct mtk_enc_params enc_params;
- struct venc_common_if *enc_if;
+ const struct venc_common_if *enc_if;
unsigned long drv_handle;
int int_cond;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 2c5719ac23b2..1b1a28abbf1f 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -243,6 +243,8 @@ static int vidioc_venc_s_parm(struct file *file, void *priv,
a->parm.output.timeperframe.numerator;
ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+
return 0;
}
@@ -254,6 +256,7 @@ static int vidioc_venc_g_parm(struct file *file, void *priv,
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
a->parm.output.timeperframe.denominator =
ctx->enc_params.framerate_num;
a->parm.output.timeperframe.numerator =
@@ -621,6 +624,69 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
return vidioc_try_fmt(f, fmt);
}
+static int vidioc_venc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = q_data->coded_width;
+ s->r.height = q_data->coded_height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = q_data->visible_width;
+ s->r.height = q_data->visible_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_q_data *q_data;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Only support crop from (0,0) */
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = min(s->r.width, q_data->coded_width);
+ s->r.height = min(s->r.height, q_data->coded_height);
+ q_data->visible_width = s->r.width;
+ q_data->visible_height = s->r.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int vidioc_venc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
@@ -679,6 +745,9 @@ const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_g_selection = vidioc_venc_g_selection,
+ .vidioc_s_selection = vidioc_venc_s_selection,
};
static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
@@ -854,7 +923,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
ctx->state = MTK_STATE_FREE;
}
-static struct vb2_ops mtk_venc_vb2_ops = {
+static const struct vb2_ops mtk_venc_vb2_ops = {
.queue_setup = vb2ops_venc_queue_setup,
.buf_prepare = vb2ops_venc_buf_prepare,
.buf_queue = vb2ops_venc_buf_queue,
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 63d4be4ff327..b76c80bdf30b 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -664,16 +664,16 @@ static int h264_enc_deinit(unsigned long handle)
return ret;
}
-static struct venc_common_if venc_h264_if = {
+static const struct venc_common_if venc_h264_if = {
h264_enc_init,
h264_enc_encode,
h264_enc_set_param,
h264_enc_deinit,
};
-struct venc_common_if *get_h264_enc_comm_if(void);
+const struct venc_common_if *get_h264_enc_comm_if(void);
-struct venc_common_if *get_h264_enc_comm_if(void)
+const struct venc_common_if *get_h264_enc_comm_if(void)
{
return &venc_h264_if;
}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 6d9758479f9a..544f57186243 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -469,16 +469,16 @@ static int vp8_enc_deinit(unsigned long handle)
return ret;
}
-static struct venc_common_if venc_vp8_if = {
+static const struct venc_common_if venc_vp8_if = {
vp8_enc_init,
vp8_enc_encode,
vp8_enc_set_param,
vp8_enc_deinit,
};
-struct venc_common_if *get_vp8_enc_comm_if(void);
+const struct venc_common_if *get_vp8_enc_comm_if(void);
-struct venc_common_if *get_vp8_enc_comm_if(void)
+const struct venc_common_if *get_vp8_enc_comm_if(void)
{
return &venc_vp8_if;
}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
index c4c83e7189c3..d02d5f1df279 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -26,8 +26,8 @@
#include "mtk_vcodec_enc_pm.h"
#include "mtk_vpu.h"
-struct venc_common_if *get_h264_enc_comm_if(void);
-struct venc_common_if *get_vp8_enc_comm_if(void);
+const struct venc_common_if *get_h264_enc_comm_if(void);
+const struct venc_common_if *get_vp8_enc_comm_if(void);
int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
{
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index c639406fe72e..e68d271b10af 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -743,7 +743,7 @@ static void emmaprp_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
-static struct vb2_ops emmaprp_qops = {
+static const struct vb2_ops emmaprp_qops = {
.queue_setup = emmaprp_queue_setup,
.buf_prepare = emmaprp_buf_prepare,
.buf_queue = emmaprp_buf_queue,
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6b01e126fe73..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
if (!vec)
return -ENOMEM;
- ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
if (ret != 1) {
frame_vector_destroy(vec);
return -EINVAL;
@@ -1247,36 +1247,33 @@ static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh,
return 0;
}
-static int vidioc_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cropcap)
+static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct omap_vout_device *vout = fh;
struct v4l2_pix_format *pix = &vout->pix;
- if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
- /* Width and height are always even */
- cropcap->bounds.width = pix->width & ~1;
- cropcap->bounds.height = pix->height & ~1;
-
- omap_vout_default_crop(&vout->pix, &vout->fbuf, &cropcap->defrect);
- cropcap->pixelaspect.numerator = 1;
- cropcap->pixelaspect.denominator = 1;
- return 0;
-}
-
-static int vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
-{
- struct omap_vout_device *vout = fh;
-
- if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = vout->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ omap_vout_default_crop(&vout->pix, &vout->fbuf, &sel->r);
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /* Width and height are always even */
+ sel->r.width = pix->width & ~1;
+ sel->r.height = pix->height & ~1;
+ break;
+ default:
return -EINVAL;
- crop->c = vout->crop;
+ }
return 0;
}
-static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
int ret = -EINVAL;
struct omap_vout_device *vout = fh;
@@ -1285,6 +1282,12 @@ static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *cr
struct omap_video_timings *timing;
struct omap_dss_device *dssdev;
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
if (vout->streaming)
return -EBUSY;
@@ -1309,9 +1312,8 @@ static int vidioc_s_crop(struct file *file, void *fh, const struct v4l2_crop *cr
vout->fbuf.fmt.width = timing->x_res;
}
- if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
- ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
- &vout->fbuf, &crop->c);
+ ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win,
+ &vout->fbuf, &sel->r);
s_crop_err:
mutex_unlock(&vout->lock);
@@ -1780,9 +1782,8 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
.vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
- .vidioc_cropcap = vidioc_cropcap,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_s_crop = vidioc_s_crop,
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 5d54e2c6c16b..0321d84addc7 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -657,6 +657,10 @@ static irqreturn_t isp_isr(int irq, void *_isp)
return IRQ_HANDLED;
}
+static const struct media_device_ops isp_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
@@ -1680,7 +1684,7 @@ static int isp_register_entities(struct isp_device *isp)
strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
sizeof(isp->media_dev.model));
isp->media_dev.hw_revision = isp->revision;
- isp->media_dev.link_notify = v4l2_pipeline_link_notify;
+ isp->media_dev.ops = &isp_media_ops;
media_device_init(&isp->media_dev);
isp->v4l2_dev.mdev = &isp->media_dev;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 7d9f35976d18..7354469670b7 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -772,40 +772,45 @@ isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
}
static int
-isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
-{
- struct isp_video *video = video_drvdata(file);
- struct v4l2_subdev *subdev;
- int ret;
-
- subdev = isp_video_remote_subdev(video, NULL);
- if (subdev == NULL)
- return -EINVAL;
-
- mutex_lock(&video->mutex);
- ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
- mutex_unlock(&video->mutex);
-
- return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
-}
-
-static int
-isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct isp_video *video = video_drvdata(file);
struct v4l2_subdev_format format;
struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
u32 pad;
int ret;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
subdev = isp_video_remote_subdev(video, &pad);
if (subdev == NULL)
return -EINVAL;
- /* Try the get crop operation first and fallback to get format if not
+ /* Try the get selection operation first and fallback to get format if not
* implemented.
*/
- ret = v4l2_subdev_call(subdev, video, g_crop, crop);
+ sdsel.pad = pad;
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ sel->r = sdsel.r;
if (ret != -ENOIOCTLCMD)
return ret;
@@ -815,28 +820,50 @@ isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
if (ret < 0)
return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
- crop->c.left = 0;
- crop->c.top = 0;
- crop->c.width = format.format.width;
- crop->c.height = format.format.height;
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format.format.width;
+ sel->r.height = format.format.height;
return 0;
}
static int
-isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct isp_video *video = video_drvdata(file);
struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ u32 pad;
int ret;
- subdev = isp_video_remote_subdev(video, NULL);
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ subdev = isp_video_remote_subdev(video, &pad);
if (subdev == NULL)
return -EINVAL;
+ sdsel.pad = pad;
mutex_lock(&video->mutex);
- ret = v4l2_subdev_call(subdev, video, s_crop, crop);
+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
mutex_unlock(&video->mutex);
+ if (!ret)
+ sel->r = sdsel.r;
return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
}
@@ -1252,9 +1279,8 @@ static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
.vidioc_g_fmt_vid_out = isp_video_get_format,
.vidioc_s_fmt_vid_out = isp_video_set_format,
.vidioc_try_fmt_vid_out = isp_video_try_format,
- .vidioc_cropcap = isp_video_cropcap,
- .vidioc_g_crop = isp_video_get_crop,
- .vidioc_s_crop = isp_video_set_crop,
+ .vidioc_g_selection = isp_video_get_selection,
+ .vidioc_s_selection = isp_video_set_selection,
.vidioc_g_parm = isp_video_get_param,
.vidioc_s_parm = isp_video_set_param,
.vidioc_reqbufs = isp_video_reqbufs,
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 2aaf4a8f71a0..c12209c701d3 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2006, Sascha Hauer, Pengutronix
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ * Copyright (C) 2016, Robert Jarzmik <robert.jarzmik@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -22,8 +24,8 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/time.h>
-#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sched.h>
@@ -32,13 +34,15 @@
#include <linux/dma-mapping.h>
#include <linux/dma/pxa-dma.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/videobuf-dma-sg.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
#include <media/v4l2-of.h>
+#include <media/videobuf2-dma-sg.h>
+
#include <linux/videodev2.h>
#include <linux/platform_data/media/camera-pxa.h>
@@ -46,6 +50,9 @@
#define PXA_CAM_VERSION "0.0.6"
#define PXA_CAM_DRV_NAME "pxa27x-camera"
+#define DEFAULT_WIDTH 640
+#define DEFAULT_HEIGHT 480
+
/* Camera Interface */
#define CICR0 0x0000
#define CICR1 0x0004
@@ -168,6 +175,462 @@
CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \
CICR0_EOFM | CICR0_FOM)
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+/*
+ * Format handling
+ */
+
+/**
+ * enum pxa_mbus_packing - data packing types on the media-bus
+ * @PXA_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM, one
+ * sample represents one pixel
+ * @PXA_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
+ * possibly incomplete byte high bits are padding
+ * @PXA_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
+ * to 16 bits
+ */
+enum pxa_mbus_packing {
+ PXA_MBUS_PACKING_NONE,
+ PXA_MBUS_PACKING_2X8_PADHI,
+ PXA_MBUS_PACKING_EXTEND16,
+};
+
+/**
+ * enum pxa_mbus_order - sample order on the media bus
+ * @PXA_MBUS_ORDER_LE: least significant sample first
+ * @PXA_MBUS_ORDER_BE: most significant sample first
+ */
+enum pxa_mbus_order {
+ PXA_MBUS_ORDER_LE,
+ PXA_MBUS_ORDER_BE,
+};
+
+/**
+ * enum pxa_mbus_layout - planes layout in memory
+ * @PXA_MBUS_LAYOUT_PACKED: color components packed
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2)
+ * @PXA_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is half the size
+ * of Y plane)
+ * @PXA_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a
+ * chroma plane (C plane is the same size
+ * as Y plane)
+ */
+enum pxa_mbus_layout {
+ PXA_MBUS_LAYOUT_PACKED = 0,
+ PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ PXA_MBUS_LAYOUT_PLANAR_2Y_C,
+ PXA_MBUS_LAYOUT_PLANAR_Y_C,
+};
+
+/**
+ * struct pxa_mbus_pixelfmt - Data format on the media bus
+ * @name: Name of the format
+ * @fourcc: Fourcc code, that will be obtained if the data is
+ * stored in memory in the following way:
+ * @packing: Type of sample-packing, that has to be used
+ * @order: Sample order when storing in memory
+ * @bits_per_sample: How many bits the bridge has to sample
+ */
+struct pxa_mbus_pixelfmt {
+ const char *name;
+ u32 fourcc;
+ enum pxa_mbus_packing packing;
+ enum pxa_mbus_order order;
+ enum pxa_mbus_layout layout;
+ u8 bits_per_sample;
+};
+
+/**
+ * struct pxa_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through
+ * @code: mediabus pixel-code
+ * @fmt: pixel format description
+ */
+struct pxa_mbus_lookup {
+ u32 code;
+ struct pxa_mbus_pixelfmt fmt;
+};
+
+static const struct pxa_mbus_lookup mbus_fmt[] = {
+{
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer 8 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Grey",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .name = "Grey 10bit",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .name = "RGB444",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_BE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU 16bit",
+ .bits_per_sample = 16,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .name = "Bayer 8 GRBG",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .name = "Bayer 10 BGGR DPCM 8",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .name = "Bayer 10 GBRG",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .name = "Bayer 10 GRBG",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .name = "Bayer 10 RGGB",
+ .bits_per_sample = 10,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .name = "Bayer 12 BGGR",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .name = "Bayer 12 GBRG",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .name = "Bayer 12 GRBG",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .name = "Bayer 12 RGGB",
+ .bits_per_sample = 12,
+ .packing = PXA_MBUS_PACKING_EXTEND16,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+},
+};
+
+static s32 pxa_mbus_bytes_per_line(u32 width, const struct pxa_mbus_pixelfmt *mf)
+{
+ if (mf->layout != PXA_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
+ switch (mf->packing) {
+ case PXA_MBUS_PACKING_NONE:
+ return width * mf->bits_per_sample / 8;
+ case PXA_MBUS_PACKING_2X8_PADHI:
+ case PXA_MBUS_PACKING_EXTEND16:
+ return width * 2;
+ }
+ return -EINVAL;
+}
+
+static s32 pxa_mbus_image_size(const struct pxa_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ switch (mf->packing) {
+ case PXA_MBUS_PACKING_2X8_PADHI:
+ return bytes_per_line * height * 2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_find_fmtdesc(
+ u32 code,
+ const struct pxa_mbus_lookup *lookup,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ if (lookup[i].code == code)
+ return &lookup[i].fmt;
+
+ return NULL;
+}
+
+static const struct pxa_mbus_pixelfmt *pxa_mbus_get_fmtdesc(
+ u32 code)
+{
+ return pxa_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
+}
+
+static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
+ unsigned int flags)
+{
+ unsigned long common_flags;
+ bool hsync = true, vsync = true, pclk, data, mode;
+ bool mipi_lanes, mipi_clock;
+
+ common_flags = cfg->flags & flags;
+
+ switch (cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ /* fall through */
+ case V4L2_MBUS_BT656:
+ pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
+ return (!hsync || !vsync || !pclk || !data || !mode) ?
+ 0 : common_flags;
+ case V4L2_MBUS_CSI2:
+ mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
+ mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
+ V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
+ return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ }
+ return 0;
+}
+
+/**
+ * struct soc_camera_format_xlate - match between host and sensor formats
+ * @code: code of a sensor provided format
+ * @host_fmt: host format after host translation from code
+ *
+ * Host and sensor translation structure. Used in table of host and sensor
+ * formats matchings in soc_camera_device. A host can override the generic list
+ * generation by implementing get_formats(), and use it for format checks and
+ * format setup.
+ */
+struct soc_camera_format_xlate {
+ u32 code;
+ const struct pxa_mbus_pixelfmt *host_fmt;
+};
+
/*
* Structures
*/
@@ -180,19 +643,33 @@ enum pxa_camera_active_dma {
/* buffer for one video frame */
struct pxa_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head queue;
u32 code;
+ int nb_planes;
/* our descriptor lists for Y, U and V channels */
struct dma_async_tx_descriptor *descs[3];
dma_cookie_t cookie[3];
struct scatterlist *sg[3];
int sg_len[3];
+ size_t plane_sizes[3];
int inwork;
enum pxa_camera_active_dma active_dma;
};
struct pxa_camera_dev {
- struct soc_camera_host soc_host;
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct v4l2_async_notifier notifier;
+ struct vb2_queue vb2_vq;
+ struct v4l2_subdev *sensor;
+ struct soc_camera_format_xlate *user_formats;
+ const struct soc_camera_format_xlate *current_fmt;
+ struct v4l2_pix_format current_pix;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_subdev *asds[1];
+
/*
* PXA27x is only supposed to handle one camera on its Quick Capture
* interface. If anyone ever builds hardware to enable more than
@@ -212,11 +689,14 @@ struct pxa_camera_dev {
unsigned long ciclk;
unsigned long mclk;
u32 mclk_divisor;
+ struct v4l2_clk *mclk_clk;
u16 width_flags; /* max 10 bits */
struct list_head capture;
spinlock_t lock;
+ struct mutex mlock;
+ unsigned int buf_sequence;
struct pxa_buffer *active;
struct tasklet_struct task_eof;
@@ -230,59 +710,90 @@ struct pxa_cam {
static const char *pxa_cam_driver_description = "PXA_Camera";
-static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
-
/*
- * Videobuf operations
+ * Format translation functions
*/
-static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static const struct soc_camera_format_xlate
+*pxa_mbus_xlate_by_fourcc(struct soc_camera_format_xlate *user_formats,
+ unsigned int fourcc)
{
- struct soc_camera_device *icd = vq->priv_data;
+ unsigned int i;
- dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
-
- *size = icd->sizeimage;
-
- if (0 == *count)
- *count = 32;
- if (*size * *count > vid_limit * 1024 * 1024)
- *count = (vid_limit * 1024 * 1024) / *size;
-
- return 0;
+ for (i = 0; user_formats[i].code; i++)
+ if (user_formats[i].host_fmt->fourcc == fourcc)
+ return user_formats + i;
+ return NULL;
}
-static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
+static struct soc_camera_format_xlate *pxa_mbus_build_fmts_xlate(
+ struct v4l2_device *v4l2_dev, struct v4l2_subdev *subdev,
+ int (*get_formats)(struct v4l2_device *, unsigned int,
+ struct soc_camera_format_xlate *xlate))
{
- struct soc_camera_device *icd = vq->priv_data;
- struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
- int i;
-
- BUG_ON(in_interrupt());
+ unsigned int i, fmts = 0, raw_fmts = 0;
+ int ret;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct soc_camera_format_xlate *user_formats;
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
- &buf->vb, buf->vb.baddr, buf->vb.bsize);
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
+ raw_fmts++;
+ code.index++;
+ }
/*
- * This waits until this buffer is out of danger, i.e., until it is no
- * longer in STATE_QUEUED or STATE_ACTIVE
+ * First pass - only count formats this host-sensor
+ * configuration can provide
*/
- videobuf_waiton(vq, &buf->vb, 0, 0);
+ for (i = 0; i < raw_fmts; i++) {
+ ret = get_formats(v4l2_dev, i, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ fmts += ret;
+ }
- for (i = 0; i < 3 && buf->descs[i]; i++) {
- dmaengine_desc_free(buf->descs[i]);
- kfree(buf->sg[i]);
- buf->descs[i] = NULL;
- buf->sg[i] = NULL;
- buf->sg_len[i] = 0;
+ if (!fmts)
+ return ERR_PTR(-ENXIO);
+
+ user_formats = kcalloc(fmts + 1, sizeof(*user_formats), GFP_KERNEL);
+ if (!user_formats)
+ return ERR_PTR(-ENOMEM);
+
+ /* Second pass - actually fill data formats */
+ fmts = 0;
+ for (i = 0; i < raw_fmts; i++) {
+ ret = get_formats(v4l2_dev, i, user_formats + fmts);
+ if (ret < 0)
+ goto egfmt;
+ fmts += ret;
}
- videobuf_dma_unmap(vq->dev, dma);
- videobuf_dma_free(dma);
+ user_formats[fmts].code = 0;
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return user_formats;
+egfmt:
+ kfree(user_formats);
+ return ERR_PTR(ret);
+}
- dev_dbg(icd->parent, "%s end (vb=0x%p) 0x%08lx %d\n", __func__,
- &buf->vb, buf->vb.baddr, buf->vb.bsize);
+/*
+ * Videobuf operations
+ */
+static struct pxa_buffer *vb2_to_pxa_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct pxa_buffer, vbuf);
+}
+
+static struct device *pcdev_to_dev(struct pxa_camera_dev *pcdev)
+{
+ return pcdev->v4l2_dev.dev;
+}
+
+static struct pxa_camera_dev *v4l2_dev_to_pcdev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct pxa_camera_dev, v4l2_dev);
}
static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
@@ -312,31 +823,26 @@ static void pxa_camera_dma_irq_v(void *data)
/**
* pxa_init_dma_channel - init dma descriptors
* @pcdev: pxa camera device
- * @buf: pxa buffer to find pxa dma channel
+ * @vb: videobuffer2 buffer
* @dma: dma video buffer
* @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
* @cibr: camera Receive Buffer Register
- * @size: bytes to transfer
- * @offset: offset in videobuffer of the first byte to transfer
*
* Prepares the pxa dma descriptors to transfer one camera channel.
*
* Returns 0 if success or -ENOMEM if no memory is available
*/
static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
- struct pxa_buffer *buf,
- struct videobuf_dmabuf *dma, int channel,
- int cibr, int size, int offset)
+ struct pxa_buffer *buf, int channel,
+ struct scatterlist *sg, int sglen)
{
struct dma_chan *dma_chan = pcdev->dma_chans[channel];
- struct scatterlist *sg = buf->sg[channel];
- int sglen = buf->sg_len[channel];
struct dma_async_tx_descriptor *tx;
tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
if (!tx) {
- dev_err(pcdev->soc_host.v4l2_dev.dev,
+ dev_err(pcdev_to_dev(pcdev),
"dmaengine_prep_slave_sg failed\n");
goto fail;
}
@@ -357,11 +863,9 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
buf->descs[channel] = tx;
return 0;
fail:
- kfree(sg);
-
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
- "%s (vb=0x%p) dma_tx=%p\n",
- __func__, &buf->vb, tx);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (vb=%p) dma_tx=%p\n",
+ __func__, buf, tx);
return -ENOMEM;
}
@@ -370,133 +874,10 @@ static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf)
{
buf->active_dma = DMA_Y;
- if (pcdev->channels == 3)
+ if (buf->nb_planes == 3)
buf->active_dma |= DMA_U | DMA_V;
}
-/*
- * Please check the DMA prepared buffer structure in :
- * Documentation/video4linux/pxa_camera.txt
- * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
- * modification while DMA chain is running will work anyway.
- */
-static int pxa_videobuf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb, enum v4l2_field field)
-{
- struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
- struct device *dev = pcdev->soc_host.v4l2_dev.dev;
- struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
- int ret;
- int size_y, size_u = 0, size_v = 0;
- size_t sizes[3];
-
- dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
- vb, vb->baddr, vb->bsize);
-
- /* Added list head initialization on alloc */
- WARN_ON(!list_empty(&vb->queue));
-
-#ifdef DEBUG
- /*
- * This can be useful if you want to see if we actually fill
- * the buffer with something
- */
- memset((void *)vb->baddr, 0xaa, vb->bsize);
-#endif
-
- BUG_ON(NULL == icd->current_fmt);
-
- /*
- * I think, in buf_prepare you only have to protect global data,
- * the actual buffer is yours
- */
- buf->inwork = 1;
-
- if (buf->code != icd->current_fmt->code ||
- vb->width != icd->user_width ||
- vb->height != icd->user_height ||
- vb->field != field) {
- buf->code = icd->current_fmt->code;
- vb->width = icd->user_width;
- vb->height = icd->user_height;
- vb->field = field;
- vb->state = VIDEOBUF_NEEDS_INIT;
- }
-
- vb->size = icd->sizeimage;
- if (0 != vb->baddr && vb->bsize < vb->size) {
- ret = -EINVAL;
- goto out;
- }
-
- if (vb->state == VIDEOBUF_NEEDS_INIT) {
- int size = vb->size;
- struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
-
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret)
- goto out;
-
- if (pcdev->channels == 3) {
- size_y = size / 2;
- size_u = size_v = size / 4;
- } else {
- size_y = size;
- }
-
- sizes[0] = size_y;
- sizes[1] = size_u;
- sizes[2] = size_v;
- ret = sg_split(dma->sglist, dma->sglen, 0, pcdev->channels,
- sizes, buf->sg, buf->sg_len, GFP_KERNEL);
- if (ret < 0) {
- dev_err(dev, "sg_split failed: %d\n", ret);
- goto fail;
- }
-
- /* init DMA for Y channel */
- ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0,
- size_y, 0);
- if (ret) {
- dev_err(dev, "DMA initialization for Y/RGB failed\n");
- goto fail;
- }
-
- /* init DMA for U channel */
- if (size_u)
- ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
- size_u, size_y);
- if (ret) {
- dev_err(dev, "DMA initialization for U failed\n");
- goto fail;
- }
-
- /* init DMA for V channel */
- if (size_v)
- ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
- size_v, size_y + size_u);
- if (ret) {
- dev_err(dev, "DMA initialization for V failed\n");
- goto fail;
- }
-
- vb->state = VIDEOBUF_PREPARED;
- }
-
- buf->inwork = 0;
- pxa_videobuf_set_actdma(pcdev, buf);
-
- return 0;
-
-fail:
- free_buffer(vq, buf);
-out:
- buf->inwork = 0;
- return ret;
-}
-
/**
* pxa_dma_start_channels - start DMA channel for active buffer
* @pcdev: pxa camera device
@@ -507,12 +888,9 @@ out:
static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
{
int i;
- struct pxa_buffer *active;
-
- active = pcdev->active;
for (i = 0; i < pcdev->channels; i++) {
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"%s (channel=%d)\n", __func__, i);
dma_async_issue_pending(pcdev->dma_chans[i]);
}
@@ -523,7 +901,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
int i;
for (i = 0; i < pcdev->channels; i++) {
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"%s (channel=%d)\n", __func__, i);
dmaengine_terminate_all(pcdev->dma_chans[i]);
}
@@ -536,7 +914,7 @@ static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
for (i = 0; i < pcdev->channels; i++) {
buf->cookie[i] = dmaengine_submit(buf->descs[i]);
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"%s (channel=%d) : submit vb=%p cookie=%d\n",
__func__, i, buf, buf->descs[i]->cookie);
}
@@ -554,7 +932,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
{
unsigned long cicr0;
- dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+ dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
__raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
/* Enable End-Of-Frame Interrupt */
cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
@@ -572,72 +950,24 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
__raw_writel(cicr0, pcdev->base + CICR0);
pcdev->active = NULL;
- dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
-}
-
-/* Called under spinlock_irqsave(&pcdev->lock, ...) */
-static void pxa_videobuf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
- struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
-
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
- __func__, vb, vb->baddr, vb->bsize, pcdev->active);
-
- list_add_tail(&vb->queue, &pcdev->capture);
-
- vb->state = VIDEOBUF_ACTIVE;
- pxa_dma_add_tail_buf(pcdev, buf);
-
- if (!pcdev->active)
- pxa_camera_start_capture(pcdev);
-}
-
-static void pxa_videobuf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
-#ifdef DEBUG
- struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->parent;
-
- dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
- vb, vb->baddr, vb->bsize);
-
- switch (vb->state) {
- case VIDEOBUF_ACTIVE:
- dev_dbg(dev, "%s (active)\n", __func__);
- break;
- case VIDEOBUF_QUEUED:
- dev_dbg(dev, "%s (queued)\n", __func__);
- break;
- case VIDEOBUF_PREPARED:
- dev_dbg(dev, "%s (prepared)\n", __func__);
- break;
- default:
- dev_dbg(dev, "%s (unknown)\n", __func__);
- break;
- }
-#endif
-
- free_buffer(vq, buf);
+ dev_dbg(pcdev_to_dev(pcdev), "%s\n", __func__);
}
static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
- struct videobuf_buffer *vb,
- struct pxa_buffer *buf)
+ struct pxa_buffer *buf,
+ enum vb2_buffer_state state)
{
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
- list_del_init(&vb->queue);
- vb->state = VIDEOBUF_DONE;
- v4l2_get_timestamp(&vb->ts);
- vb->field_count++;
- wake_up(&vb->done);
- dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n",
- __func__, vb);
+ list_del_init(&buf->queue);
+ vb->timestamp = ktime_get_ns();
+ vbuf->sequence = pcdev->buf_sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ dev_dbg(pcdev_to_dev(pcdev), "%s dequeued buffer (buf=0x%p)\n",
+ __func__, buf);
if (list_empty(&pcdev->capture)) {
pxa_camera_stop_capture(pcdev);
@@ -645,7 +975,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
}
pcdev->active = list_entry(pcdev->capture.next,
- struct pxa_buffer, vb.queue);
+ struct pxa_buffer, queue);
}
/**
@@ -670,7 +1000,7 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
{
bool is_dma_stopped = last_submitted != last_issued;
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"%s : top queued buffer=%p, is_dma_stopped=%d\n",
__func__, pcdev->active, is_dma_stopped);
@@ -681,19 +1011,17 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
enum pxa_camera_active_dma act_dma)
{
- struct device *dev = pcdev->soc_host.v4l2_dev.dev;
struct pxa_buffer *buf, *last_buf;
unsigned long flags;
u32 camera_status, overrun;
int chan;
- struct videobuf_buffer *vb;
enum dma_status last_status;
dma_cookie_t last_issued;
spin_lock_irqsave(&pcdev->lock, flags);
camera_status = __raw_readl(pcdev->base + CISR);
- dev_dbg(dev, "camera dma irq, cisr=0x%x dma=%d\n",
+ dev_dbg(pcdev_to_dev(pcdev), "camera dma irq, cisr=0x%x dma=%d\n",
camera_status, act_dma);
overrun = CISR_IFO_0;
if (pcdev->channels == 3)
@@ -714,9 +1042,8 @@ static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
if (!pcdev->active)
goto out;
- vb = &pcdev->active->vb;
- buf = container_of(vb, struct pxa_buffer, vb);
- WARN_ON(buf->inwork || list_empty(&vb->queue));
+ buf = pcdev->active;
+ WARN_ON(buf->inwork || list_empty(&buf->queue));
/*
* It's normal if the last frame creates an overrun, as there
@@ -734,23 +1061,23 @@ static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
break;
}
last_buf = list_entry(pcdev->capture.prev,
- struct pxa_buffer, vb.queue);
+ struct pxa_buffer, queue);
last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
last_buf->cookie[chan],
NULL, &last_issued);
if (camera_status & overrun &&
last_status != DMA_COMPLETE) {
- dev_dbg(dev, "FIFO overrun! CISR: %x\n",
+ dev_dbg(pcdev_to_dev(pcdev), "FIFO overrun! CISR: %x\n",
camera_status);
pxa_camera_stop_capture(pcdev);
- list_for_each_entry(buf, &pcdev->capture, vb.queue)
+ list_for_each_entry(buf, &pcdev->capture, queue)
pxa_dma_add_tail_buf(pcdev, buf);
pxa_camera_start_capture(pcdev);
goto out;
}
buf->active_dma &= ~act_dma;
if (!buf->active_dma) {
- pxa_camera_wakeup(pcdev, vb, buf);
+ pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_DONE);
pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
last_issued);
}
@@ -759,33 +1086,10 @@ out:
spin_unlock_irqrestore(&pcdev->lock, flags);
}
-static struct videobuf_queue_ops pxa_videobuf_ops = {
- .buf_setup = pxa_videobuf_setup,
- .buf_prepare = pxa_videobuf_prepare,
- .buf_queue = pxa_videobuf_queue,
- .buf_release = pxa_videobuf_release,
-};
-
-static void pxa_camera_init_videobuf(struct videobuf_queue *q,
- struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
-
- /*
- * We must pass NULL as dev pointer, then all pci_* dma operations
- * transform to normal dma_* ones.
- */
- videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct pxa_buffer), icd, &ici->host_lock);
-}
-
static u32 mclk_get_divisor(struct platform_device *pdev,
struct pxa_camera_dev *pcdev)
{
unsigned long mclk = pcdev->mclk;
- struct device *dev = &pdev->dev;
u32 div;
unsigned long lcdclk;
@@ -795,7 +1099,8 @@ static u32 mclk_get_divisor(struct platform_device *pdev,
/* mclk <= ciclk / 4 (27.4.2) */
if (mclk > lcdclk / 4) {
mclk = lcdclk / 4;
- dev_warn(dev, "Limiting master clock to %lu\n", mclk);
+ dev_warn(pcdev_to_dev(pcdev),
+ "Limiting master clock to %lu\n", mclk);
}
/* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
@@ -805,7 +1110,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev,
if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
pcdev->mclk = lcdclk / (2 * (div + 1));
- dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
+ dev_dbg(pcdev_to_dev(pcdev), "LCD clock %luHz, target freq %luHz, divisor %u\n",
lcdclk, mclk, div);
return div;
@@ -860,9 +1165,8 @@ static void pxa_camera_eof(unsigned long arg)
struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
unsigned long cifr;
struct pxa_buffer *buf;
- struct videobuf_buffer *vb;
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"Camera interrupt status 0x%x\n",
__raw_readl(pcdev->base + CISR));
@@ -871,9 +1175,8 @@ static void pxa_camera_eof(unsigned long arg)
__raw_writel(cifr, pcdev->base + CIFR);
pcdev->active = list_first_entry(&pcdev->capture,
- struct pxa_buffer, vb.queue);
- vb = &pcdev->active->vb;
- buf = container_of(vb, struct pxa_buffer, vb);
+ struct pxa_buffer, queue);
+ buf = pcdev->active;
pxa_videobuf_set_actdma(pcdev, buf);
pxa_dma_start_channels(pcdev);
@@ -885,7 +1188,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
unsigned long status, cicr0;
status = __raw_readl(pcdev->base + CISR);
- dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"Camera interrupt status 0x%lx\n", status);
if (!status)
@@ -902,47 +1205,6 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int pxa_camera_add_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
- icd->devnum);
-
- return 0;
-}
-
-static void pxa_camera_remove_device(struct soc_camera_device *icd)
-{
- dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
- icd->devnum);
-}
-
-/*
- * The following two functions absolutely depend on the fact, that
- * there can be only one camera on PXA quick capture interface
- * Called with .host_lock held
- */
-static int pxa_camera_clock_start(struct soc_camera_host *ici)
-{
- struct pxa_camera_dev *pcdev = ici->priv;
-
- pxa_camera_activate(pcdev);
-
- return 0;
-}
-
-/* Called with .host_lock held */
-static void pxa_camera_clock_stop(struct soc_camera_host *ici)
-{
- struct pxa_camera_dev *pcdev = ici->priv;
-
- /* disable capture, disable interrupts */
- __raw_writel(0x3ff, pcdev->base + CICR0);
-
- /* Stop DMA engine */
- pxa_dma_stop_channels(pcdev);
- pxa_camera_deactivate(pcdev);
-}
-
static int test_platform_param(struct pxa_camera_dev *pcdev,
unsigned char buswidth, unsigned long *flags)
{
@@ -968,15 +1230,12 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
return -EINVAL;
}
-static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
+static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev,
unsigned long flags, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
unsigned long dw, bpp;
u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
- int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
+ int ret = sensor_call(pcdev, sensor, g_skip_top_lines, &y_skip_top);
if (ret < 0)
y_skip_top = 0;
@@ -985,7 +1244,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
* Datawidth is now guaranteed to be equal to one of the three values.
* We fix bit-per-pixel equal to data-width...
*/
- switch (icd->current_fmt->host_fmt->bits_per_sample) {
+ switch (pcdev->current_fmt->host_fmt->bits_per_sample) {
case 10:
dw = 4;
bpp = 0x40;
@@ -1019,7 +1278,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
if (cicr0 & CICR0_ENB)
__raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0);
- cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw;
+ cicr1 = CICR1_PPL_VAL(pcdev->current_pix.width - 1) | bpp | dw;
switch (pixfmt) {
case V4L2_PIX_FMT_YUV422P:
@@ -1048,7 +1307,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
}
cicr2 = 0;
- cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
+ cicr3 = CICR3_LPF_VAL(pcdev->current_pix.height - 1) |
CICR3_BFW_VAL(min((u32)255, y_skip_top));
cicr4 |= pcdev->mclk_divisor;
@@ -1064,28 +1323,271 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
__raw_writel(cicr0, pcdev->base + CICR0);
}
-static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
+/*
+ * Videobuf2 section
+ */
+static void pxa_buffer_cleanup(struct pxa_buffer *buf)
+{
+ int i;
+
+ for (i = 0; i < 3 && buf->descs[i]; i++) {
+ dmaengine_desc_free(buf->descs[i]);
+ kfree(buf->sg[i]);
+ buf->descs[i] = NULL;
+ buf->sg[i] = NULL;
+ buf->sg_len[i] = 0;
+ buf->plane_sizes[i] = 0;
+ }
+ buf->nb_planes = 0;
+}
+
+static int pxa_buffer_init(struct pxa_camera_dev *pcdev,
+ struct pxa_buffer *buf)
+{
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
+ int nb_channels = pcdev->channels;
+ int i, ret = 0;
+ unsigned long size = vb2_plane_size(vb, 0);
+
+ switch (nb_channels) {
+ case 1:
+ buf->plane_sizes[0] = size;
+ break;
+ case 3:
+ buf->plane_sizes[0] = size / 2;
+ buf->plane_sizes[1] = size / 4;
+ buf->plane_sizes[2] = size / 4;
+ break;
+ default:
+ return -EINVAL;
+ };
+ buf->nb_planes = nb_channels;
+
+ ret = sg_split(sgt->sgl, sgt->nents, 0, nb_channels,
+ buf->plane_sizes, buf->sg, buf->sg_len, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(pcdev_to_dev(pcdev),
+ "sg_split failed: %d\n", ret);
+ return ret;
+ }
+ for (i = 0; i < nb_channels; i++) {
+ ret = pxa_init_dma_channel(pcdev, buf, i,
+ buf->sg[i], buf->sg_len[i]);
+ if (ret) {
+ pxa_buffer_cleanup(buf);
+ return ret;
+ }
+ }
+ INIT_LIST_HEAD(&buf->queue);
+
+ return ret;
+}
+
+static void pxac_vb2_cleanup(struct vb2_buffer *vb)
+{
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vb=%p)\n", __func__, vb);
+ pxa_buffer_cleanup(buf);
+}
+
+static void pxac_vb2_queue(struct vb2_buffer *vb)
+{
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vb=%p) nb_channels=%d size=%lu active=%p\n",
+ __func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0),
+ pcdev->active);
+
+ list_add_tail(&buf->queue, &pcdev->capture);
+
+ pxa_dma_add_tail_buf(pcdev, buf);
+}
+
+/*
+ * Please check the DMA prepared buffer structure in :
+ * Documentation/video4linux/pxa_camera.txt
+ * Please check also in pxa_camera_check_link_miss() to understand why DMA chain
+ * modification while DMA chain is running will work anyway.
+ */
+static int pxac_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ int ret = 0;
+
+ switch (pcdev->channels) {
+ case 1:
+ case 3:
+ vb2_set_plane_payload(vb, 0, pcdev->current_pix.sizeimage);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s (vb=%p) nb_channels=%d size=%lu\n",
+ __func__, vb, pcdev->channels, vb2_get_plane_payload(vb, 0));
+
+ WARN_ON(!pcdev->current_fmt);
+
+#ifdef DEBUG
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
+ for (i = 0; i < vb->num_planes; i++)
+ memset((void *)vb2_plane_vaddr(vb, i),
+ 0xaa, vb2_get_plane_payload(vb, i));
+#endif
+
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
+ buf->inwork = 0;
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ return ret;
+}
+
+static int pxac_vb2_init(struct vb2_buffer *vb)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(nb_channels=%d)\n",
+ __func__, pcdev->channels);
+
+ return pxa_buffer_init(pcdev, buf);
+}
+
+static int pxac_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbufs,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+ int size = pcdev->current_pix.sizeimage;
+
+ dev_dbg(pcdev_to_dev(pcdev),
+ "%s(vq=%p nbufs=%d num_planes=%d size=%d)\n",
+ __func__, vq, *nbufs, *num_planes, size);
+ /*
+ * Called from VIDIOC_REQBUFS or in compatibility mode For YUV422P
+ * format, even if there are 3 planes Y, U and V, we reply there is only
+ * one plane, containing Y, U and V data, one after the other.
+ */
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ switch (pcdev->channels) {
+ case 1:
+ case 3:
+ sizes[0] = size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!*nbufs)
+ *nbufs = 1;
+
+ return 0;
+}
+
+static int pxac_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+
+ dev_dbg(pcdev_to_dev(pcdev), "%s(count=%d) active=%p\n",
+ __func__, count, pcdev->active);
+
+ pcdev->buf_sequence = 0;
+ if (!pcdev->active)
+ pxa_camera_start_capture(pcdev);
+
+ return 0;
+}
+
+static void pxac_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vq);
+ struct pxa_buffer *buf, *tmp;
+
+ dev_dbg(pcdev_to_dev(pcdev), "%s active=%p\n",
+ __func__, pcdev->active);
+ pxa_camera_stop_capture(pcdev);
+
+ list_for_each_entry_safe(buf, tmp, &pcdev->capture, queue)
+ pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_ERROR);
+}
+
+static struct vb2_ops pxac_vb2_ops = {
+ .queue_setup = pxac_vb2_queue_setup,
+ .buf_init = pxac_vb2_init,
+ .buf_prepare = pxac_vb2_prepare,
+ .buf_queue = pxac_vb2_queue,
+ .buf_cleanup = pxac_vb2_cleanup,
+ .start_streaming = pxac_vb2_start_streaming,
+ .stop_streaming = pxac_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int pxa_camera_init_videobuf2(struct pxa_camera_dev *pcdev)
+{
+ int ret;
+ struct vb2_queue *vq = &pcdev->vb2_vq;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ vq->drv_priv = pcdev;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->buf_struct_size = sizeof(struct pxa_buffer);
+ vq->dev = pcdev->v4l2_dev.dev;
+
+ vq->ops = &pxac_vb2_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->lock = &pcdev->mlock;
+
+ ret = vb2_queue_init(vq);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "vb2_queue_init(vq=%p): %d\n", vq, ret);
+
+ return ret;
+}
+
+/*
+ * Video ioctls section
+ */
+static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- u32 pixfmt = icd->current_fmt->host_fmt->fourcc;
+ u32 pixfmt = pcdev->current_fmt->host_fmt->fourcc;
unsigned long bus_flags, common_flags;
int ret;
- struct pxa_cam *cam = icd->host_priv;
- ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample,
+ ret = test_platform_param(pcdev,
+ pcdev->current_fmt->host_fmt->bits_per_sample,
&bus_flags);
if (ret < 0)
return ret;
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags = pxa_mbus_config_compatible(&cfg,
bus_flags);
if (!common_flags) {
- dev_warn(icd->parent,
+ dev_warn(pcdev_to_dev(pcdev),
"Flags incompatible: camera 0x%x, host 0x%lx\n",
cfg.flags, bus_flags);
return -EINVAL;
@@ -1124,26 +1626,22 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd)
}
cfg.flags = common_flags;
- ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ ret = sensor_call(pcdev, video, s_mbus_config, &cfg);
if (ret < 0 && ret != -ENOIOCTLCMD) {
- dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
+ dev_dbg(pcdev_to_dev(pcdev),
+ "camera s_mbus_config(0x%lx) returned %d\n",
common_flags, ret);
return ret;
}
- cam->flags = common_flags;
-
- pxa_camera_setup_cicr(icd, common_flags, pixfmt);
+ pxa_camera_setup_cicr(pcdev, common_flags, pixfmt);
return 0;
}
-static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
+static int pxa_camera_try_bus_param(struct pxa_camera_dev *pcdev,
unsigned char buswidth)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct pxa_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
unsigned long bus_flags, common_flags;
int ret = test_platform_param(pcdev, buswidth, &bus_flags);
@@ -1151,12 +1649,12 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
+ common_flags = pxa_mbus_config_compatible(&cfg,
bus_flags);
if (!common_flags) {
- dev_warn(icd->parent,
+ dev_warn(pcdev_to_dev(pcdev),
"Flags incompatible: camera 0x%x, host 0x%lx\n",
cfg.flags, bus_flags);
return -EINVAL;
@@ -1168,66 +1666,56 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
return ret;
}
-static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
+static const struct pxa_mbus_pixelfmt pxa_camera_formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUV422P,
.name = "Planar YUV422 16 bit",
.bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
+ .packing = PXA_MBUS_PACKING_2X8_PADHI,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PLANAR_2Y_U_V,
},
};
/* This will be corrected as we get more formats */
-static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+static bool pxa_camera_packing_supported(const struct pxa_mbus_pixelfmt *fmt)
{
- return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ return fmt->packing == PXA_MBUS_PACKING_NONE ||
(fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ fmt->packing == PXA_MBUS_PACKING_2X8_PADHI) ||
(fmt->bits_per_sample > 8 &&
- fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+ fmt->packing == PXA_MBUS_PACKING_EXTEND16);
}
-static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx,
+static int pxa_camera_get_formats(struct v4l2_device *v4l2_dev,
+ unsigned int idx,
struct soc_camera_format_xlate *xlate)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
int formats = 0, ret;
- struct pxa_cam *cam;
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.index = idx,
};
- const struct soc_mbus_pixelfmt *fmt;
+ const struct pxa_mbus_pixelfmt *fmt;
- ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
+ ret = sensor_call(pcdev, pad, enum_mbus_code, NULL, &code);
if (ret < 0)
/* No more formats */
return 0;
- fmt = soc_mbus_get_fmtdesc(code.code);
+ fmt = pxa_mbus_get_fmtdesc(code.code);
if (!fmt) {
- dev_err(dev, "Invalid format code #%u: %d\n", idx, code.code);
+ dev_err(pcdev_to_dev(pcdev),
+ "Invalid format code #%u: %d\n", idx, code.code);
return 0;
}
/* This also checks support for the requested bits-per-sample */
- ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
+ ret = pxa_camera_try_bus_param(pcdev, fmt->bits_per_sample);
if (ret < 0)
return 0;
- if (!icd->host_priv) {
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
- if (!cam)
- return -ENOMEM;
-
- icd->host_priv = cam;
- } else {
- cam = icd->host_priv;
- }
-
switch (code.code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
formats++;
@@ -1235,25 +1723,29 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
xlate->host_fmt = &pxa_camera_formats[0];
xlate->code = code.code;
xlate++;
- dev_dbg(dev, "Providing format %s using code %d\n",
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Providing format %s using code %d\n",
pxa_camera_formats[0].name, code.code);
}
+ /* fall through */
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_RGB565_2X8_LE:
case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
if (xlate)
- dev_dbg(dev, "Providing format %s packed\n",
+ dev_dbg(pcdev_to_dev(pcdev),
+ "Providing format %s packed\n",
fmt->name);
break;
default:
if (!pxa_camera_packing_supported(fmt))
return 0;
if (xlate)
- dev_dbg(dev,
+ dev_dbg(pcdev_to_dev(pcdev),
"Providing format %s in pass-through mode\n",
fmt->name);
+ break;
}
/* Generic pass-through */
@@ -1267,10 +1759,22 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
return formats;
}
-static void pxa_camera_put_formats(struct soc_camera_device *icd)
+static int pxa_camera_build_formats(struct pxa_camera_dev *pcdev)
{
- kfree(icd->host_priv);
- icd->host_priv = NULL;
+ struct soc_camera_format_xlate *xlate;
+
+ xlate = pxa_mbus_build_fmts_xlate(&pcdev->v4l2_dev, pcdev->sensor,
+ pxa_camera_get_formats);
+ if (IS_ERR(xlate))
+ return PTR_ERR(xlate);
+
+ pcdev->user_formats = xlate;
+ return 0;
+}
+
+static void pxa_camera_destroy_formats(struct pxa_camera_dev *pcdev)
+{
+ kfree(pcdev->user_formats);
}
static int pxa_camera_check_frame(u32 width, u32 height)
@@ -1280,158 +1784,72 @@ static int pxa_camera_check_frame(u32 width, u32 height)
(width & 0x01);
}
-static int pxa_camera_set_crop(struct soc_camera_device *icd,
- const struct v4l2_crop *a)
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int pxac_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
{
- const struct v4l2_rect *rect = &a->c;
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct pxa_camera_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_sense sense = {
- .master_clock = pcdev->mclk,
- .pixel_clock_max = pcdev->ciclk / 4,
- };
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- struct pxa_cam *cam = icd->host_priv;
- u32 fourcc = icd->current_fmt->host_fmt->fourcc;
- int ret;
-
- /* If PCLK is used to latch data from the sensor, check sense */
- if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
- icd->sense = &sense;
-
- ret = v4l2_subdev_call(sd, video, s_crop, a);
-
- icd->sense = NULL;
-
- if (ret < 0) {
- dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n",
- rect->width, rect->height, rect->left, rect->top);
- return ret;
- }
+ struct pxa_camera_dev *pcdev = video_drvdata(file);
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- if (pxa_camera_check_frame(mf->width, mf->height)) {
- /*
- * Camera cropping produced a frame beyond our capabilities.
- * FIXME: just extract a subframe, that we can process.
- */
- v4l_bound_align_image(&mf->width, 48, 2048, 1,
- &mf->height, 32, 2048, 0,
- fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
- ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
+ if (reg->reg > CIBR2)
+ return -ERANGE;
- if (pxa_camera_check_frame(mf->width, mf->height)) {
- dev_warn(icd->parent,
- "Inconsistent state. Use S_FMT to repair\n");
- return -EINVAL;
- }
- }
-
- if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
- if (sense.pixel_clock > sense.pixel_clock_max) {
- dev_err(dev,
- "pixel clock %lu set by the camera too high!",
- sense.pixel_clock);
- return -EIO;
- }
- recalculate_fifo_timeout(pcdev, sense.pixel_clock);
- }
-
- icd->user_width = mf->width;
- icd->user_height = mf->height;
-
- pxa_camera_setup_cicr(icd, cam->flags, fourcc);
-
- return ret;
+ reg->val = __raw_readl(pcdev->base + reg->reg);
+ reg->size = sizeof(__u32);
+ return 0;
}
-static int pxa_camera_set_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
+static int pxac_vidioc_s_register(struct file *file, void *priv,
+ const struct v4l2_dbg_register *reg)
{
- struct device *dev = icd->parent;
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct pxa_camera_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- const struct soc_camera_format_xlate *xlate = NULL;
- struct soc_camera_sense sense = {
- .master_clock = pcdev->mclk,
- .pixel_clock_max = pcdev->ciclk / 4,
- };
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- int ret;
+ struct pxa_camera_dev *pcdev = video_drvdata(file);
- xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
- if (!xlate) {
- dev_warn(dev, "Format %x not found\n", pix->pixelformat);
+ if (reg->reg > CIBR2)
+ return -ERANGE;
+ if (reg->size != sizeof(__u32))
return -EINVAL;
- }
-
- /* If PCLK is used to latch data from the sensor, check sense */
- if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
- /* The caller holds a mutex. */
- icd->sense = &sense;
-
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->colorspace = pix->colorspace;
- mf->code = xlate->code;
+ __raw_writel(reg->val, pcdev->base + reg->reg);
+ return 0;
+}
+#endif
- ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &format);
+static int pxac_vidioc_enum_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ const struct pxa_mbus_pixelfmt *format;
+ unsigned int idx;
- if (mf->code != xlate->code)
+ for (idx = 0; pcdev->user_formats[idx].code; idx++);
+ if (f->index >= idx)
return -EINVAL;
- icd->sense = NULL;
-
- if (ret < 0) {
- dev_warn(dev, "Failed to configure for format %x\n",
- pix->pixelformat);
- } else if (pxa_camera_check_frame(mf->width, mf->height)) {
- dev_warn(dev,
- "Camera driver produced an unsupported frame %dx%d\n",
- mf->width, mf->height);
- ret = -EINVAL;
- } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
- if (sense.pixel_clock > sense.pixel_clock_max) {
- dev_err(dev,
- "pixel clock %lu set by the camera too high!",
- sense.pixel_clock);
- return -EIO;
- }
- recalculate_fifo_timeout(pcdev, sense.pixel_clock);
- }
-
- if (ret < 0)
- return ret;
+ format = pcdev->user_formats[f->index].host_fmt;
+ f->pixelformat = format->fourcc;
+ return 0;
+}
- pix->width = mf->width;
- pix->height = mf->height;
- pix->field = mf->field;
- pix->colorspace = mf->colorspace;
- icd->current_fmt = xlate;
+static int pxac_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
- return ret;
+ pix->width = pcdev->current_pix.width;
+ pix->height = pcdev->current_pix.height;
+ pix->bytesperline = pcdev->current_pix.bytesperline;
+ pix->sizeimage = pcdev->current_pix.sizeimage;
+ pix->field = pcdev->current_pix.field;
+ pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
+ pix->colorspace = pcdev->current_pix.colorspace;
+ dev_dbg(pcdev_to_dev(pcdev), "current_fmt->fourcc: 0x%08x\n",
+ pcdev->current_fmt->host_fmt->fourcc);
+ return 0;
}
-static int pxa_camera_try_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
+static int pxac_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
@@ -1442,9 +1860,9 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
__u32 pixfmt = pix->pixelformat;
int ret;
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats, pixfmt);
if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ dev_warn(pcdev_to_dev(pcdev), "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1458,90 +1876,311 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
&pix->height, 32, 2048, 0,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
- /* limit to sensor capabilities */
- mf->width = pix->width;
- mf->height = pix->height;
- /* Only progressive video supported so far */
- mf->field = V4L2_FIELD_NONE;
- mf->colorspace = pix->colorspace;
- mf->code = xlate->code;
-
- ret = v4l2_subdev_call(sd, pad, set_fmt, &pad_cfg, &format);
+ v4l2_fill_mbus_format(mf, pix, xlate->code);
+ ret = sensor_call(pcdev, pad, set_fmt, &pad_cfg, &format);
if (ret < 0)
return ret;
- pix->width = mf->width;
- pix->height = mf->height;
- pix->colorspace = mf->colorspace;
+ v4l2_fill_pix_format(pix, mf);
+ /* Only progressive video supported so far */
switch (mf->field) {
case V4L2_FIELD_ANY:
case V4L2_FIELD_NONE:
- pix->field = V4L2_FIELD_NONE;
+ pix->field = V4L2_FIELD_NONE;
break;
default:
/* TODO: support interlaced at least in pass-through mode */
- dev_err(icd->parent, "Field type %d unsupported.\n",
+ dev_err(pcdev_to_dev(pcdev), "Field type %d unsupported.\n",
mf->field);
return -EINVAL;
}
- return ret;
+ ret = pxa_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = ret;
+ ret = pxa_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
+
+ pix->sizeimage = ret;
+ return 0;
}
-static int pxa_camera_reqbufs(struct soc_camera_device *icd,
- struct v4l2_requestbuffers *p)
+static int pxac_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
{
- int i;
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ unsigned long flags;
+ int ret, is_busy;
- /*
- * This is for locking debugging only. I removed spinlocks and now I
- * check whether .prepare is ever called on a linked buffer, or whether
- * a dma IRQ can occur for an in-work or unlinked buffer. Until now
- * it hadn't triggered
- */
- for (i = 0; i < p->count; i++) {
- struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i],
- struct pxa_buffer, vb);
- buf->inwork = 0;
- INIT_LIST_HEAD(&buf->vb.queue);
+ dev_dbg(pcdev_to_dev(pcdev),
+ "s_fmt_vid_cap(pix=%dx%d:%x)\n",
+ pix->width, pix->height, pix->pixelformat);
+
+ spin_lock_irqsave(&pcdev->lock, flags);
+ is_busy = pcdev->active || vb2_is_busy(&pcdev->vb2_vq);
+ spin_unlock_irqrestore(&pcdev->lock, flags);
+
+ if (is_busy)
+ return -EBUSY;
+
+ ret = pxac_vidioc_try_fmt_vid_cap(filp, priv, f);
+ if (ret)
+ return ret;
+
+ xlate = pxa_mbus_xlate_by_fourcc(pcdev->user_formats,
+ pix->pixelformat);
+ v4l2_fill_mbus_format(&format.format, pix, xlate->code);
+ ret = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (ret < 0) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Failed to configure for format %x\n",
+ pix->pixelformat);
+ } else if (pxa_camera_check_frame(pix->width, pix->height)) {
+ dev_warn(pcdev_to_dev(pcdev),
+ "Camera driver produced an unsupported frame %dx%d\n",
+ pix->width, pix->height);
+ return -EINVAL;
}
+ pcdev->current_fmt = xlate;
+ pcdev->current_pix = *pix;
+
+ ret = pxa_camera_set_bus_param(pcdev);
+ return ret;
+}
+
+static int pxac_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->bus_info, "platform:pxa-camera", sizeof(cap->bus_info));
+ strlcpy(cap->driver, PXA_CAM_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
-static unsigned int pxa_camera_poll(struct file *file, poll_table *pt)
+static int pxac_vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
{
- struct soc_camera_device *icd = file->private_data;
- struct pxa_buffer *buf;
+ if (i->index > 0)
+ return -EINVAL;
- buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer,
- vb.stream);
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
- poll_wait(file, &buf->vb.done, pt);
+ return 0;
+}
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- return POLLIN|POLLRDNORM;
+static int pxac_vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
return 0;
}
-static int pxa_camera_querycap(struct soc_camera_host *ici,
- struct v4l2_capability *cap)
+static int pxac_vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- /* cap->name is set by the firendly caller:-> */
- strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ if (i > 0)
+ return -EINVAL;
return 0;
}
+static int pxac_fops_camera_open(struct file *filp)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ int ret;
+
+ mutex_lock(&pcdev->mlock);
+ ret = v4l2_fh_open(filp);
+ if (ret < 0)
+ goto out;
+
+ ret = sensor_call(pcdev, core, s_power, 1);
+ if (ret)
+ v4l2_fh_release(filp);
+out:
+ mutex_unlock(&pcdev->mlock);
+ return ret;
+}
+
+static int pxac_fops_camera_release(struct file *filp)
+{
+ struct pxa_camera_dev *pcdev = video_drvdata(filp);
+ int ret;
+
+ ret = vb2_fop_release(filp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&pcdev->mlock);
+ ret = sensor_call(pcdev, core, s_power, 0);
+ mutex_unlock(&pcdev->mlock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations pxa_camera_fops = {
+ .owner = THIS_MODULE,
+ .open = pxac_fops_camera_open,
+ .release = pxac_fops_camera_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
+ .vidioc_querycap = pxac_vidioc_querycap,
+
+ .vidioc_enum_input = pxac_vidioc_enum_input,
+ .vidioc_g_input = pxac_vidioc_g_input,
+ .vidioc_s_input = pxac_vidioc_s_input,
+
+ .vidioc_enum_fmt_vid_cap = pxac_vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pxac_vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pxac_vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pxac_vidioc_try_fmt_vid_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = pxac_vidioc_g_register,
+ .vidioc_s_register = pxac_vidioc_s_register,
+#endif
+};
+
+static struct v4l2_clk_ops pxa_camera_mclk_ops = {
+};
+
+static const struct video_device pxa_camera_videodev_template = {
+ .name = "pxa-camera",
+ .minor = -1,
+ .fops = &pxa_camera_fops,
+ .ioctl_ops = &pxa_camera_ioctl_ops,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
+};
+
+static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int err;
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(v4l2_dev);
+ struct video_device *vdev = &pcdev->vdev;
+ struct v4l2_pix_format *pix = &pcdev->current_pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &format.format;
+
+ dev_info(pcdev_to_dev(pcdev), "%s(): trying to bind a device\n",
+ __func__);
+ mutex_lock(&pcdev->mlock);
+ *vdev = pxa_camera_videodev_template;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &pcdev->mlock;
+ pcdev->sensor = subdev;
+ pcdev->vdev.queue = &pcdev->vb2_vq;
+ pcdev->vdev.v4l2_dev = &pcdev->v4l2_dev;
+ pcdev->vdev.ctrl_handler = subdev->ctrl_handler;
+ video_set_drvdata(&pcdev->vdev, pcdev);
+
+ err = pxa_camera_build_formats(pcdev);
+ if (err) {
+ dev_err(pcdev_to_dev(pcdev), "building formats failed: %d\n",
+ err);
+ goto out;
+ }
+
+ pcdev->current_fmt = pcdev->user_formats;
+ pix->field = V4L2_FIELD_NONE;
+ pix->width = DEFAULT_WIDTH;
+ pix->height = DEFAULT_HEIGHT;
+ pix->bytesperline =
+ pxa_mbus_bytes_per_line(pix->width,
+ pcdev->current_fmt->host_fmt);
+ pix->sizeimage =
+ pxa_mbus_image_size(pcdev->current_fmt->host_fmt,
+ pix->bytesperline, pix->height);
+ pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
+ v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
+ err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+ if (err)
+ goto out;
+
+ v4l2_fill_pix_format(pix, mf);
+ pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
+ __func__, pix->colorspace, pix->pixelformat);
+
+ err = pxa_camera_init_videobuf2(pcdev);
+ if (err)
+ goto out;
+
+ err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ v4l2_err(v4l2_dev, "register video device failed: %d\n", err);
+ pcdev->sensor = NULL;
+ } else {
+ dev_info(pcdev_to_dev(pcdev),
+ "PXA Camera driver attached to camera %s\n",
+ subdev->name);
+ }
+out:
+ mutex_unlock(&pcdev->mlock);
+ return err;
+}
+
+static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(notifier->v4l2_dev);
+
+ mutex_lock(&pcdev->mlock);
+ dev_info(pcdev_to_dev(pcdev),
+ "PXA Camera driver detached from camera %s\n",
+ subdev->name);
+
+ /* disable capture, disable interrupts */
+ __raw_writel(0x3ff, pcdev->base + CICR0);
+
+ /* Stop DMA engine */
+ pxa_dma_stop_channels(pcdev);
+
+ pxa_camera_destroy_formats(pcdev);
+ video_unregister_device(&pcdev->vdev);
+ pcdev->sensor = NULL;
+
+ mutex_unlock(&pcdev->mlock);
+}
+
+/*
+ * Driver probe, remove, suspend and resume operations
+ */
static int pxa_camera_suspend(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct pxa_camera_dev *pcdev = ici->priv;
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
int i = 0, ret = 0;
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0);
@@ -1550,9 +2189,8 @@ static int pxa_camera_suspend(struct device *dev)
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
- if (pcdev->soc_host.icd) {
- struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd);
- ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (pcdev->sensor) {
+ ret = sensor_call(pcdev, core, s_power, 0);
if (ret == -ENOIOCTLCMD)
ret = 0;
}
@@ -1562,8 +2200,7 @@ static int pxa_camera_suspend(struct device *dev)
static int pxa_camera_resume(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(dev);
- struct pxa_camera_dev *pcdev = ici->priv;
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(dev);
int i = 0, ret = 0;
__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
@@ -1572,9 +2209,8 @@ static int pxa_camera_resume(struct device *dev)
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
- if (pcdev->soc_host.icd) {
- struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->soc_host.icd);
- ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (pcdev->sensor) {
+ ret = sensor_call(pcdev, core, s_power, 1);
if (ret == -ENOIOCTLCMD)
ret = 0;
}
@@ -1586,29 +2222,12 @@ static int pxa_camera_resume(struct device *dev)
return ret;
}
-static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
- .owner = THIS_MODULE,
- .add = pxa_camera_add_device,
- .remove = pxa_camera_remove_device,
- .clock_start = pxa_camera_clock_start,
- .clock_stop = pxa_camera_clock_stop,
- .set_crop = pxa_camera_set_crop,
- .get_formats = pxa_camera_get_formats,
- .put_formats = pxa_camera_put_formats,
- .set_fmt = pxa_camera_set_fmt,
- .try_fmt = pxa_camera_try_fmt,
- .init_videobuf = pxa_camera_init_videobuf,
- .reqbufs = pxa_camera_reqbufs,
- .poll = pxa_camera_poll,
- .querycap = pxa_camera_querycap,
- .set_bus_param = pxa_camera_set_bus_param,
-};
-
static int pxa_camera_pdata_from_dt(struct device *dev,
- struct pxa_camera_dev *pcdev)
+ struct pxa_camera_dev *pcdev,
+ struct v4l2_async_subdev *asd)
{
u32 mclk_rate;
- struct device_node *np = dev->of_node;
+ struct device_node *remote, *np = dev->of_node;
struct v4l2_of_endpoint ep;
int err = of_property_read_u32(np, "clock-frequency",
&mclk_rate);
@@ -1660,6 +2279,15 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
+ asd->match_type = V4L2_ASYNC_MATCH_OF;
+ remote = of_graph_get_remote_port(np);
+ if (remote) {
+ asd->match.of.node = remote;
+ of_node_put(remote);
+ } else {
+ dev_notice(dev, "no remote for %s\n", of_node_full_name(np));
+ }
+
out:
of_node_put(np);
@@ -1678,6 +2306,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
};
dma_cap_mask_t mask;
struct pxad_param params;
+ char clk_name[V4L2_CLK_NAME_SIZE];
int irq;
int err = 0, i;
@@ -1700,10 +2329,14 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->pdata = pdev->dev.platform_data;
if (&pdev->dev.of_node && !pcdev->pdata) {
- err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev);
+ err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
} else {
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
+ pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+ pcdev->asd.match.i2c.adapter_id =
+ pcdev->pdata->sensor_i2c_adapter_id;
+ pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address;
}
if (err < 0)
return err;
@@ -1735,6 +2368,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
+ mutex_init(&pcdev->mlock);
/*
* Request the regions.
@@ -1767,6 +2401,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
&params, &pdev->dev, "CI_U");
if (!pcdev->dma_chans[1]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
+ err = -ENODEV;
goto exit_free_dma_y;
}
@@ -1776,6 +2411,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
&params, &pdev->dev, "CI_V");
if (!pcdev->dma_chans[2]) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
+ err = -ENODEV;
goto exit_free_dma_u;
}
@@ -1797,19 +2433,50 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_free_dma;
}
- pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME;
- pcdev->soc_host.ops = &pxa_soc_camera_host_ops;
- pcdev->soc_host.priv = pcdev;
- pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
- pcdev->soc_host.nr = pdev->id;
tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
- err = soc_camera_host_register(&pcdev->soc_host);
+ pxa_camera_activate(pcdev);
+
+ dev_set_drvdata(&pdev->dev, pcdev);
+ err = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
if (err)
goto exit_free_dma;
- return 0;
+ pcdev->asds[0] = &pcdev->asd;
+ pcdev->notifier.subdevs = pcdev->asds;
+ pcdev->notifier.num_subdevs = 1;
+ pcdev->notifier.bound = pxa_camera_sensor_bound;
+ pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+ if (!of_have_populated_dt())
+ pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
+
+ err = pxa_camera_init_videobuf2(pcdev);
+ if (err)
+ goto exit_free_v4l2dev;
+
+ if (pcdev->mclk) {
+ v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
+ pcdev->asd.match.i2c.adapter_id,
+ pcdev->asd.match.i2c.address);
+
+ pcdev->mclk_clk = v4l2_clk_register(&pxa_camera_mclk_ops,
+ clk_name, NULL);
+ if (IS_ERR(pcdev->mclk_clk)) {
+ err = PTR_ERR(pcdev->mclk_clk);
+ goto exit_free_v4l2dev;
+ }
+ }
+
+ err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ if (err)
+ goto exit_free_clk;
+
+ return 0;
+exit_free_clk:
+ v4l2_clk_unregister(pcdev->mclk_clk);
+exit_free_v4l2dev:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
exit_free_dma:
dma_release_channel(pcdev->dma_chans[2]);
exit_free_dma_u:
@@ -1821,15 +2488,15 @@ exit_free_dma_y:
static int pxa_camera_remove(struct platform_device *pdev)
{
- struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
- struct pxa_camera_dev *pcdev = container_of(soc_host,
- struct pxa_camera_dev, soc_host);
+ struct pxa_camera_dev *pcdev = dev_get_drvdata(&pdev->dev);
+ pxa_camera_deactivate(pcdev);
dma_release_channel(pcdev->dma_chans[0]);
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- soc_camera_host_unregister(soc_host);
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ v4l2_device_unregister(&pcdev->v4l2_dev);
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index bc50c69ee0c5..f3a3f31cdfa9 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -99,14 +99,14 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
*/
int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
- int error;
+ int ret;
if (!fcp)
return 0;
- error = pm_runtime_get_sync(fcp->dev);
- if (error < 0)
- return error;
+ ret = pm_runtime_get_sync(fcp->dev);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -165,6 +165,7 @@ static int rcar_fcp_remove(struct platform_device *pdev)
}
static const struct of_device_id rcar_fcp_of_match[] = {
+ { .compatible = "renesas,fcpf" },
{ .compatible = "renesas,fcpv" },
{ },
};
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index b2ff2d4e8bb1..111d2a151f6a 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) Driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA && MEDIA_CONTROLLER
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
---help---
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 4b2007b73463..098a0b1cc10a 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -31,26 +31,22 @@
#define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
-static int rvin_mbus_supported(struct rvin_dev *vin)
+static bool rvin_mbus_supported(struct rvin_graph_entity *entity)
{
- struct v4l2_subdev *sd;
+ struct v4l2_subdev *sd = entity->subdev;
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- sd = vin_to_source(vin);
-
code.index = 0;
while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) {
code.index++;
switch (code.code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
case MEDIA_BUS_FMT_RGB888_1X24:
- vin->source.code = code.code;
- vin_dbg(vin, "Found supported media bus format: %d\n",
- vin->source.code);
+ entity->code = code.code;
return true;
default:
break;
@@ -60,142 +56,168 @@ static int rvin_mbus_supported(struct rvin_dev *vin)
return false;
}
-static int rvin_graph_notify_complete(struct v4l2_async_notifier *notifier)
+static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
{
struct rvin_dev *vin = notifier_to_vin(notifier);
int ret;
+ /* Verify subdevices mbus format */
+ if (!rvin_mbus_supported(&vin->digital)) {
+ vin_err(vin, "Unsupported media bus format for %s\n",
+ vin->digital.subdev->name);
+ return -EINVAL;
+ }
+
+ vin_dbg(vin, "Found media bus format for %s: %d\n",
+ vin->digital.subdev->name, vin->digital.code);
+
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
if (ret < 0) {
vin_err(vin, "Failed to register subdev nodes\n");
return ret;
}
- if (!rvin_mbus_supported(vin)) {
- vin_err(vin, "No supported mediabus format found\n");
- return -EINVAL;
+ return rvin_v4l2_probe(vin);
+}
+
+static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = notifier_to_vin(notifier);
+
+ if (vin->digital.subdev == subdev) {
+ vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
+ rvin_v4l2_remove(vin);
+ vin->digital.subdev = NULL;
+ return;
}
- return rvin_v4l2_probe(vin);
+ vin_err(vin, "no entity for subdev %s to unbind\n", subdev->name);
}
-static void rvin_graph_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
+static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
{
struct rvin_dev *vin = notifier_to_vin(notifier);
- rvin_v4l2_remove(vin);
+ v4l2_set_subdev_hostdata(subdev, vin);
+
+ if (vin->digital.asd.match.of.node == subdev->dev->of_node) {
+ vin_dbg(vin, "bound digital subdev %s\n", subdev->name);
+ vin->digital.subdev = subdev;
+ return 0;
+ }
+
+ vin_err(vin, "no entity for subdev %s to bind\n", subdev->name);
+ return -EINVAL;
}
-static int rvin_graph_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
+ struct device_node *ep,
+ struct v4l2_mbus_config *mbus_cfg)
{
- struct rvin_dev *vin = notifier_to_vin(notifier);
+ struct v4l2_of_endpoint v4l2_ep;
+ int ret;
- vin_dbg(vin, "subdev %s bound\n", subdev->name);
+ ret = v4l2_of_parse_endpoint(ep, &v4l2_ep);
+ if (ret) {
+ vin_err(vin, "Could not parse v4l2 endpoint\n");
+ return -EINVAL;
+ }
- vin->entity.entity = &subdev->entity;
- vin->entity.subdev = subdev;
+ mbus_cfg->type = v4l2_ep.bus_type;
+
+ switch (mbus_cfg->type) {
+ case V4L2_MBUS_PARALLEL:
+ vin_dbg(vin, "Found PARALLEL media bus\n");
+ mbus_cfg->flags = v4l2_ep.bus.parallel.flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vin_dbg(vin, "Found BT656 media bus\n");
+ mbus_cfg->flags = 0;
+ break;
+ default:
+ vin_err(vin, "Unknown media bus type\n");
+ return -EINVAL;
+ }
return 0;
}
-static int rvin_graph_parse(struct rvin_dev *vin,
- struct device_node *node)
+static int rvin_digital_graph_parse(struct rvin_dev *vin)
{
- struct device_node *remote;
- struct device_node *ep = NULL;
- struct device_node *next;
- int ret = 0;
-
- while (1) {
- next = of_graph_get_next_endpoint(node, ep);
- if (!next)
- break;
-
- of_node_put(ep);
- ep = next;
+ struct device_node *ep, *np;
+ int ret;
- remote = of_graph_get_remote_port_parent(ep);
- if (!remote) {
- ret = -EINVAL;
- break;
- }
+ vin->digital.asd.match.of.node = NULL;
+ vin->digital.subdev = NULL;
- /* Skip entities that we have already processed. */
- if (remote == vin->dev->of_node) {
- of_node_put(remote);
- continue;
- }
+ /*
+ * Port 0 id 0 is local digital input, try to get it.
+ * Not all instances can or will have this, that is OK
+ */
+ ep = of_graph_get_endpoint_by_regs(vin->dev->of_node, 0, 0);
+ if (!ep)
+ return 0;
- /* Remote node to connect */
- if (!vin->entity.node) {
- vin->entity.node = remote;
- vin->entity.asd.match_type = V4L2_ASYNC_MATCH_OF;
- vin->entity.asd.match.of.node = remote;
- ret++;
- }
+ np = of_graph_get_remote_port_parent(ep);
+ if (!np) {
+ vin_err(vin, "No remote parent for digital input\n");
+ of_node_put(ep);
+ return -EINVAL;
}
+ of_node_put(np);
+ ret = rvin_digitial_parse_v4l2(vin, ep, &vin->digital.mbus_cfg);
of_node_put(ep);
+ if (ret)
+ return ret;
- return ret;
+ vin->digital.asd.match.of.node = np;
+ vin->digital.asd.match_type = V4L2_ASYNC_MATCH_OF;
+
+ return 0;
}
-static int rvin_graph_init(struct rvin_dev *vin)
+static int rvin_digital_graph_init(struct rvin_dev *vin)
{
struct v4l2_async_subdev **subdevs = NULL;
int ret;
- /* Parse the graph to extract a list of subdevice DT nodes. */
- ret = rvin_graph_parse(vin, vin->dev->of_node);
- if (ret < 0) {
- vin_err(vin, "Graph parsing failed\n");
- goto done;
- }
-
- if (!ret) {
- vin_err(vin, "No subdev found in graph\n");
- goto done;
- }
+ ret = rvin_digital_graph_parse(vin);
+ if (ret)
+ return ret;
- if (ret != 1) {
- vin_err(vin, "More then one subdev found in graph\n");
- goto done;
+ if (!vin->digital.asd.match.of.node) {
+ vin_dbg(vin, "No digital subdevice found\n");
+ return -ENODEV;
}
/* Register the subdevices notifier. */
subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL) {
- ret = -ENOMEM;
- goto done;
- }
+ if (subdevs == NULL)
+ return -ENOMEM;
- subdevs[0] = &vin->entity.asd;
+ subdevs[0] = &vin->digital.asd;
+
+ vin_dbg(vin, "Found digital subdevice %s\n",
+ of_node_full_name(subdevs[0]->match.of.node));
- vin->notifier.subdevs = subdevs;
vin->notifier.num_subdevs = 1;
- vin->notifier.bound = rvin_graph_notify_bound;
- vin->notifier.unbind = rvin_graph_notify_unbind;
- vin->notifier.complete = rvin_graph_notify_complete;
+ vin->notifier.subdevs = subdevs;
+ vin->notifier.bound = rvin_digital_notify_bound;
+ vin->notifier.unbind = rvin_digital_notify_unbind;
+ vin->notifier.complete = rvin_digital_notify_complete;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
- goto done;
- }
-
- ret = 0;
-
-done:
- if (ret < 0) {
- v4l2_async_notifier_unregister(&vin->notifier);
- of_node_put(vin->entity.node);
+ return ret;
}
- return ret;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -209,56 +231,14 @@ static const struct of_device_id rvin_of_id_table[] = {
{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
+ { .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
{ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
-static int rvin_parse_dt(struct rvin_dev *vin)
-{
- const struct of_device_id *match;
- struct v4l2_of_endpoint ep;
- struct device_node *np;
- int ret;
-
- match = of_match_device(of_match_ptr(rvin_of_id_table), vin->dev);
- if (!match)
- return -ENODEV;
-
- vin->chip = (enum chip_id)match->data;
-
- np = of_graph_get_next_endpoint(vin->dev->of_node, NULL);
- if (!np) {
- vin_err(vin, "Could not find endpoint\n");
- return -EINVAL;
- }
-
- ret = v4l2_of_parse_endpoint(np, &ep);
- if (ret) {
- vin_err(vin, "Could not parse endpoint\n");
- return ret;
- }
-
- of_node_put(np);
-
- vin->mbus_cfg.type = ep.bus_type;
-
- switch (vin->mbus_cfg.type) {
- case V4L2_MBUS_PARALLEL:
- vin->mbus_cfg.flags = ep.bus.parallel.flags;
- break;
- case V4L2_MBUS_BT656:
- vin->mbus_cfg.flags = 0;
- break;
- default:
- vin_err(vin, "Unknown media bus type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int rcar_vin_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct rvin_dev *vin;
struct resource *mem;
int irq, ret;
@@ -267,11 +247,12 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (!vin)
return -ENOMEM;
- vin->dev = &pdev->dev;
+ match = of_match_device(of_match_ptr(rvin_of_id_table), &pdev->dev);
+ if (!match)
+ return -ENODEV;
- ret = rvin_parse_dt(vin);
- if (ret)
- return ret;
+ vin->dev = &pdev->dev;
+ vin->chip = (enum chip_id)match->data;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL)
@@ -282,14 +263,14 @@ static int rcar_vin_probe(struct platform_device *pdev)
return PTR_ERR(vin->base);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return ret;
+ if (irq < 0)
+ return irq;
ret = rvin_dma_probe(vin, irq);
if (ret)
return ret;
- ret = rvin_graph_init(vin);
+ ret = rvin_digital_graph_init(vin);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 496aa97b6400..9ccd5ff55e19 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -95,6 +95,7 @@
/* Video n Module Status Register bits */
#define VNMS_FBS_MASK (3 << 3)
#define VNMS_FBS_SHIFT 3
+#define VNMS_FS (1 << 2)
#define VNMS_AV (1 << 1)
#define VNMS_CA (1 << 0)
@@ -131,6 +132,7 @@ static u32 rvin_read(struct rvin_dev *vin, u32 offset)
static int rvin_setup(struct rvin_dev *vin)
{
u32 vnmc, dmr, dmr2, interrupts;
+ v4l2_std_id std;
bool progressive = false, output_is_yuv = false, input_is_yuv = false;
switch (vin->format.field) {
@@ -141,12 +143,21 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_EVEN;
break;
case V4L2_FIELD_INTERLACED:
+ /* Default to TB */
+ vnmc = VNMC_IM_FULL;
+ /* Use BT if video standard can be read and is 60 Hz format */
+ if (!v4l2_subdev_call(vin_to_source(vin), video, g_std, &std)) {
+ if (std & V4L2_STD_525_60)
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ }
+ break;
case V4L2_FIELD_INTERLACED_TB:
vnmc = VNMC_IM_FULL;
break;
case V4L2_FIELD_INTERLACED_BT:
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
+ case V4L2_FIELD_ALTERNATE:
case V4L2_FIELD_NONE:
if (vin->continuous) {
vnmc = VNMC_IM_ODD_EVEN;
@@ -163,24 +174,24 @@ static int rvin_setup(struct rvin_dev *vin)
/*
* Input interface
*/
- switch (vin->source.code) {
+ switch (vin->digital.code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
input_is_yuv = true;
break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
input_is_yuv = true;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
vnmc |= VNMC_INF_RGB888;
break;
- case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
input_is_yuv = true;
break;
@@ -192,11 +203,11 @@ static int rvin_setup(struct rvin_dev *vin)
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
/* Hsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_HPS;
/* Vsync Signal Polarity Select */
- if (!(vin->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_VPS;
/*
@@ -225,11 +236,9 @@ static int rvin_setup(struct rvin_dev *vin)
dmr = 0;
break;
case V4L2_PIX_FMT_XBGR32:
- if (vin->chip == RCAR_GEN2 || vin->chip == RCAR_H1) {
- dmr = VNDMR_EXRGB;
- break;
- }
- /* fall through */
+ /* Note: not supported on M1 */
+ dmr = VNDMR_EXRGB;
+ break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
vin->format.pixelformat);
@@ -322,15 +331,26 @@ static bool rvin_capture_active(struct rvin_dev *vin)
return rvin_read(vin, VNMS_REG) & VNMS_CA;
}
-static int rvin_get_active_slot(struct rvin_dev *vin)
+static int rvin_get_active_slot(struct rvin_dev *vin, u32 vnms)
{
if (vin->continuous)
- return (rvin_read(vin, VNMS_REG) & VNMS_FBS_MASK)
- >> VNMS_FBS_SHIFT;
+ return (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
return 0;
}
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it's a Even field */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
{
const struct rvin_video_format *fmt;
@@ -871,7 +891,7 @@ static bool rvin_fill_hw(struct rvin_dev *vin)
static irqreturn_t rvin_irq(int irq, void *data)
{
struct rvin_dev *vin = data;
- u32 int_status;
+ u32 int_status, vnms;
int slot;
unsigned int sequence, handled = 0;
unsigned long flags;
@@ -898,7 +918,8 @@ static irqreturn_t rvin_irq(int irq, void *data)
}
/* Prepare for capture and update state */
- slot = rvin_get_active_slot(vin);
+ vnms = rvin_read(vin, VNMS_REG);
+ slot = rvin_get_active_slot(vin, vnms);
sequence = vin->sequence++;
vin_dbg(vin, "IRQ %02d: %d\tbuf0: %c buf1: %c buf2: %c\tmore: %d\n",
@@ -913,7 +934,7 @@ static irqreturn_t rvin_irq(int irq, void *data)
goto done;
/* Capture frame */
- vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
vin->queue_buf[slot]->sequence = sequence;
vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf, VB2_BUF_STATE_DONE);
@@ -1116,7 +1137,7 @@ static void rvin_stop_streaming(struct vb2_queue *vq)
rvin_disable_interrupts(vin);
}
-static struct vb2_ops rvin_qops = {
+static const struct vb2_ops rvin_qops = {
.queue_setup = rvin_queue_setup,
.buf_prepare = rvin_buffer_prepare,
.buf_queue = rvin_buffer_queue,
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 10a5c107e8b9..2bbe6d495fa6 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -92,21 +92,84 @@ static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
* V4L2
*/
+static void rvin_reset_crop_compose(struct rvin_dev *vin)
+{
+ vin->crop.top = vin->crop.left = 0;
+ vin->crop.width = vin->source.width;
+ vin->crop.height = vin->source.height;
+
+ vin->compose.top = vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
+}
+
+static int rvin_reset_format(struct rvin_dev *vin)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ int ret;
+
+ fmt.pad = vin->src_pad_idx;
+
+ ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ vin->format.width = mf->width;
+ vin->format.height = mf->height;
+ vin->format.colorspace = mf->colorspace;
+ vin->format.field = mf->field;
+
+ /*
+ * If the subdevice uses ALTERNATE field mode and G_STD is
+ * implemented use the VIN HW to combine the two fields to
+ * one INTERLACED frame. The ALTERNATE field mode can still
+ * be requested in S_FMT and be respected, this is just the
+ * default which is applied at probing or when S_STD is called.
+ */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE &&
+ v4l2_subdev_has_op(vin_to_source(vin), video, g_std))
+ vin->format.field = V4L2_FIELD_INTERLACED;
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_ALTERNATE:
+ vin->format.height /= 2;
+ break;
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ vin->format.field = V4L2_FIELD_NONE;
+ break;
+ }
+
+ rvin_reset_crop_compose(vin);
+
+ return 0;
+}
+
static int __rvin_try_format_source(struct rvin_dev *vin,
- u32 which,
- struct v4l2_pix_format *pix,
- struct rvin_source_fmt *source)
+ u32 which,
+ struct v4l2_pix_format *pix,
+ struct rvin_source_fmt *source)
{
struct v4l2_subdev *sd;
struct v4l2_subdev_pad_config *pad_cfg;
struct v4l2_subdev_format format = {
.which = which,
};
+ enum v4l2_field field;
int ret;
sd = vin_to_source(vin);
- v4l2_fill_mbus_format(&format.format, pix, vin->source.code);
+ v4l2_fill_mbus_format(&format.format, pix, vin->digital.code);
pad_cfg = v4l2_subdev_alloc_pad_config(sd);
if (pad_cfg == NULL)
@@ -114,28 +177,31 @@ static int __rvin_try_format_source(struct rvin_dev *vin,
format.pad = vin->src_pad_idx;
- ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, pad, set_fmt,
- pad_cfg, &format);
- if (ret < 0)
- goto cleanup;
+ field = pix->field;
+
+ ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto done;
v4l2_fill_pix_format(pix, &format.format);
+ pix->field = field;
+
source->width = pix->width;
source->height = pix->height;
vin_dbg(vin, "Source resolution: %ux%u\n", source->width,
source->height);
-cleanup:
+done:
v4l2_subdev_free_pad_config(pad_cfg);
- return 0;
+ return ret;
}
static int __rvin_try_format(struct rvin_dev *vin,
- u32 which,
- struct v4l2_pix_format *pix,
- struct rvin_source_fmt *source)
+ u32 which,
+ struct v4l2_pix_format *pix,
+ struct rvin_source_fmt *source)
{
const struct rvin_video_format *info;
u32 rwidth, rheight, walign;
@@ -144,6 +210,10 @@ static int __rvin_try_format(struct rvin_dev *vin,
rwidth = pix->width;
rheight = pix->height;
+ /* Keep current field if no specific one is asked for */
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = vin->format.field;
+
/*
* Retrieve format information and select the current format if the
* requested format isn't supported.
@@ -164,21 +234,14 @@ static int __rvin_try_format(struct rvin_dev *vin,
/* Limit to source capabilities */
__rvin_try_format_source(vin, which, pix, source);
- /* If source can't match format try if VIN can scale */
- if (source->width != rwidth || source->height != rheight)
- rvin_scale_try(vin, pix, rwidth, rheight);
-
- /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
- walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
-
- /* Limit to VIN capabilities */
- v4l_bound_align_image(&pix->width, 2, RVIN_MAX_WIDTH, walign,
- &pix->height, 4, RVIN_MAX_HEIGHT, 2, 0);
-
switch (pix->field) {
- case V4L2_FIELD_NONE:
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_ALTERNATE:
+ pix->height /= 2;
+ source->height /= 2;
+ break;
+ case V4L2_FIELD_NONE:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
@@ -188,11 +251,27 @@ static int __rvin_try_format(struct rvin_dev *vin,
break;
}
+ /* If source can't match format try if VIN can scale */
+ if (source->width != rwidth || source->height != rheight)
+ rvin_scale_try(vin, pix, rwidth, rheight);
+
+ /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
+ walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+
+ /* Limit to VIN capabilities */
+ v4l_bound_align_image(&pix->width, 2, RVIN_MAX_WIDTH, walign,
+ &pix->height, 4, RVIN_MAX_HEIGHT, 2, 0);
+
pix->bytesperline = max_t(u32, pix->bytesperline,
rvin_format_bytesperline(pix));
pix->sizeimage = max_t(u32, pix->sizeimage,
rvin_format_sizeimage(pix));
+ if (vin->chip == RCAR_M1 && pix->pixelformat == V4L2_PIX_FMT_XBGR32) {
+ vin_err(vin, "pixel format XBGR32 not supported on M1\n");
+ return -EINVAL;
+ }
+
vin_dbg(vin, "Requested %ux%u Got %ux%u bpl: %d size: %d\n",
rwidth, rheight, pix->width, pix->height,
pix->bytesperline, pix->sizeimage);
@@ -219,7 +298,7 @@ static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
struct rvin_source_fmt source;
return __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix,
- &source);
+ &source);
}
static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
@@ -233,7 +312,7 @@ static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
return -EBUSY;
ret = __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &source);
+ &source);
if (ret)
return ret;
@@ -242,6 +321,8 @@ static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
vin->format = f->fmt.pix;
+ rvin_reset_crop_compose(vin);
+
return 0;
}
@@ -334,8 +415,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->crop = s->r = r;
vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
- r.width, r.height, r.left, r.top,
- vin->source.width, vin->source.height);
+ r.width, r.height, r.left, r.top,
+ vin->source.width, vin->source.height);
break;
case V4L2_SEL_TGT_COMPOSE:
/* Make sure compose rect fits inside output format */
@@ -359,8 +440,8 @@ static int rvin_s_selection(struct file *file, void *fh,
vin->compose = s->r = r;
vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
- r.width, r.height, r.left, r.top,
- vin->format.width, vin->format.height);
+ r.width, r.height, r.left, r.top,
+ vin->format.width, vin->format.height);
break;
default:
return -EINVAL;
@@ -381,7 +462,7 @@ static int rvin_cropcap(struct file *file, void *priv,
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- return v4l2_subdev_call(sd, video, cropcap, crop);
+ return v4l2_subdev_call(sd, video, g_pixelaspect, &crop->pixelaspect);
}
static int rvin_enum_input(struct file *file, void *priv,
@@ -433,35 +514,14 @@ static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
{
struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_subdev *sd = vin_to_source(vin);
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- int ret = v4l2_subdev_call(sd, video, s_std, a);
+ int ret;
+ ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
if (ret < 0)
return ret;
/* Changing the standard will change the width/height */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret) {
- vin_err(vin, "Failed to get initial format\n");
- return ret;
- }
-
- vin->format.width = mf->width;
- vin->format.height = mf->height;
-
- vin->crop.top = vin->crop.left = 0;
- vin->crop.width = mf->width;
- vin->crop.height = mf->height;
-
- vin->compose.top = vin->compose.left = 0;
- vin->compose.width = mf->width;
- vin->compose.height = mf->height;
-
- return 0;
+ return rvin_reset_format(vin);
}
static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
@@ -483,14 +543,14 @@ static int rvin_subscribe_event(struct v4l2_fh *fh,
}
static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_enum_dv_timings *timings)
+ struct v4l2_enum_dv_timings *timings)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
int pad, ret;
pad = timings->pad;
- timings->pad = vin->src_pad_idx;
+ timings->pad = vin->sink_pad_idx;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -500,52 +560,51 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
}
static int rvin_s_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
+ struct v4l2_dv_timings *timings)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- int err;
-
- err = v4l2_subdev_call(sd,
- video, s_dv_timings, timings);
- if (!err) {
- vin->source.width = timings->bt.width;
- vin->source.height = timings->bt.height;
- vin->format.width = timings->bt.width;
- vin->format.height = timings->bt.height;
- }
- return err;
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+ if (ret)
+ return ret;
+
+ vin->source.width = timings->bt.width;
+ vin->source.height = timings->bt.height;
+ vin->format.width = timings->bt.width;
+ vin->format.height = timings->bt.height;
+
+ return 0;
}
static int rvin_g_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
+ struct v4l2_dv_timings *timings)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- return v4l2_subdev_call(sd,
- video, g_dv_timings, timings);
+ return v4l2_subdev_call(sd, video, g_dv_timings, timings);
}
static int rvin_query_dv_timings(struct file *file, void *priv_fh,
- struct v4l2_dv_timings *timings)
+ struct v4l2_dv_timings *timings)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- return v4l2_subdev_call(sd,
- video, query_dv_timings, timings);
+ return v4l2_subdev_call(sd, video, query_dv_timings, timings);
}
static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
- struct v4l2_dv_timings_cap *cap)
+ struct v4l2_dv_timings_cap *cap)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
int pad, ret;
pad = cap->pad;
- cap->pad = vin->src_pad_idx;
+ cap->pad = vin->sink_pad_idx;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -554,6 +613,44 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
return ret;
}
+static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int input, ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ input = edid->pad;
+ edid->pad = vin->sink_pad_idx;
+
+ ret = v4l2_subdev_call(sd, pad, get_edid, edid);
+
+ edid->pad = input;
+
+ return ret;
+}
+
+static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int input, ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ input = edid->pad;
+ edid->pad = vin->sink_pad_idx;
+
+ ret = v4l2_subdev_call(sd, pad, set_edid, edid);
+
+ edid->pad = input;
+
+ return ret;
+}
+
static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_querycap = rvin_querycap,
.vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap,
@@ -576,6 +673,9 @@ static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_s_dv_timings = rvin_s_dv_timings,
.vidioc_query_dv_timings = rvin_query_dv_timings,
+ .vidioc_g_edid = rvin_g_edid,
+ .vidioc_s_edid = rvin_s_edid,
+
.vidioc_querystd = rvin_querystd,
.vidioc_g_std = rvin_g_std,
.vidioc_s_std = rvin_s_std,
@@ -767,16 +867,9 @@ static void rvin_notify(struct v4l2_subdev *sd,
int rvin_v4l2_probe(struct rvin_dev *vin)
{
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
struct video_device *vdev = &vin->vdev;
struct v4l2_subdev *sd = vin_to_source(vin);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- int pad_idx;
-#endif
- int ret;
+ int pad_idx, ret;
v4l2_set_subdev_hostdata(sd, vin);
@@ -823,41 +916,23 @@ int rvin_v4l2_probe(struct rvin_dev *vin)
V4L2_CAP_READWRITE;
vin->src_pad_idx = 0;
-#if defined(CONFIG_MEDIA_CONTROLLER)
for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
- if (sd->entity.pads[pad_idx].flags
- == MEDIA_PAD_FL_SOURCE)
+ if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SOURCE)
break;
if (pad_idx >= sd->entity.num_pads)
return -EINVAL;
vin->src_pad_idx = pad_idx;
-#endif
- fmt.pad = vin->src_pad_idx;
- /* Try to improve our guess of a reasonable window format */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret) {
- vin_err(vin, "Failed to get initial format\n");
- return ret;
- }
+ vin->sink_pad_idx = 0;
+ for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
+ if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SINK) {
+ vin->sink_pad_idx = pad_idx;
+ break;
+ }
- /* Set default format */
- vin->format.width = mf->width;
- vin->format.height = mf->height;
- vin->format.colorspace = mf->colorspace;
- vin->format.field = mf->field;
vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
-
-
- /* Set initial crop and compose */
- vin->crop.top = vin->crop.left = 0;
- vin->crop.width = mf->width;
- vin->crop.height = mf->height;
-
- vin->compose.top = vin->compose.left = 0;
- vin->compose.width = mf->width;
- vin->compose.height = mf->height;
+ rvin_reset_format(vin);
ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 31ad39a39937..727e215c0871 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -30,9 +30,9 @@
#define HW_BUFFER_MASK 0x7f
enum chip_id {
- RCAR_GEN2,
RCAR_H1,
RCAR_M1,
+ RCAR_GEN2,
};
/**
@@ -50,12 +50,10 @@ enum rvin_dma_state {
/**
* struct rvin_source_fmt - Source information
- * @code: Media bus format from source
* @width: Width from source
* @height: Height from source
*/
struct rvin_source_fmt {
- u32 code;
u32 width;
u32 height;
};
@@ -70,12 +68,19 @@ struct rvin_video_format {
u8 bpp;
};
+/**
+ * struct rvin_graph_entity - Video endpoint from async framework
+ * @asd: sub-device descriptor for async framework
+ * @subdev: subdevice matched using async framework
+ * @code: Media bus format from source
+ * @mbus_cfg: Media bus format from DT
+ */
struct rvin_graph_entity {
- struct device_node *node;
- struct media_entity *entity;
-
struct v4l2_async_subdev asd;
struct v4l2_subdev *subdev;
+
+ u32 code;
+ struct v4l2_mbus_config mbus_cfg;
};
/**
@@ -83,14 +88,14 @@ struct rvin_graph_entity {
* @dev: (OF) device
* @base: device I/O register space remapped to virtual memory
* @chip: type of VIN chip
- * @mbus_cfg media bus configuration
*
* @vdev: V4L2 video device associated with VIN
* @v4l2_dev: V4L2 device
* @src_pad_idx: source pad index for media controller drivers
+ * @sink_pad_idx: sink pad index for media controller drivers
* @ctrl_handler: V4L2 control handler
* @notifier: V4L2 asynchronous subdevs notifier
- * @entity: entity in the DT for subdevice
+ * @digital: entity in the DT for local digital subdevice
*
* @lock: protects @queue
* @queue: vb2 buffers queue
@@ -113,14 +118,14 @@ struct rvin_dev {
struct device *dev;
void __iomem *base;
enum chip_id chip;
- struct v4l2_mbus_config mbus_cfg;
struct video_device vdev;
struct v4l2_device v4l2_dev;
int src_pad_idx;
+ int sink_pad_idx;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity entity;
+ struct rvin_graph_entity digital;
struct mutex lock;
struct vb2_queue queue;
@@ -139,7 +144,7 @@ struct rvin_dev {
struct v4l2_rect compose;
};
-#define vin_to_source(vin) vin->entity.subdev
+#define vin_to_source(vin) vin->digital.subdev
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 16782ceb29c3..d1746ecc645d 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1183,7 +1183,7 @@ static void jpu_stop_streaming(struct vb2_queue *vq)
}
}
-static struct vb2_ops jpu_qops = {
+static const struct vb2_ops jpu_qops = {
.queue_setup = jpu_queue_setup,
.buf_prepare = jpu_buf_prepare,
.buf_queue = jpu_buf_queue,
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 391dd7a7b362..62c0dec30b59 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -138,7 +138,7 @@ static void g2d_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
-static struct vb2_ops g2d_qops = {
+static const struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
.buf_queue = g2d_buf_queue,
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 785e6936c881..52dc7941db65 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -537,6 +537,7 @@ static const u32 fourcc_to_dwngrd_schema_id[] = {
static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
{
int i;
+
for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
if (fourcc_to_dwngrd_schema_id[i] == fourcc)
return i;
@@ -1246,17 +1247,18 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE) {
- strlcpy(cap->driver, S5P_JPEG_M2M_NAME " encoder",
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
sizeof(cap->driver));
strlcpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
sizeof(cap->card));
} else {
- strlcpy(cap->driver, S5P_JPEG_M2M_NAME " decoder",
+ strlcpy(cap->driver, S5P_JPEG_M2M_NAME,
sizeof(cap->driver));
strlcpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
sizeof(cap->card));
}
- cap->bus_info[0] = 0;
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(ctx->jpeg->dev));
cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
@@ -1273,7 +1275,8 @@ static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
if (num == f->index)
break;
/* Correct type but haven't reached our index yet,
- * just increment per-type index */
+ * just increment per-type index
+ */
++num;
}
}
@@ -1349,6 +1352,7 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix->bytesperline = 0;
if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
u32 bpl = q_data->w;
+
if (q_data->fmt->colplanes == 1)
bpl = (bpl * q_data->fmt->depth) >> 3;
pix->bytesperline = bpl;
@@ -1374,6 +1378,7 @@ static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
+
if (fmt->fourcc == pixelformat &&
fmt->flags & fmt_flag &&
fmt->flags & ctx->jpeg->variant->fmt_ver_flag) {
@@ -1431,7 +1436,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
return -EINVAL;
/* V4L2 specification suggests the driver corrects the format struct
- * if any of the dimensions is unsupported */
+ * if any of the dimensions is unsupported
+ */
if (q_type == FMT_TYPE_OUTPUT)
jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, 0,
@@ -2489,6 +2495,7 @@ static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
if (ctx->mode == S5P_JPEG_DECODE &&
vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
struct s5p_jpeg_q_data tmp, *q_data;
+
ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp,
(unsigned long)vb2_plane_vaddr(vb, 0),
min((unsigned long)ctx->out_q.size,
@@ -2538,7 +2545,7 @@ static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
pm_runtime_put(ctx->jpeg->dev);
}
-static struct vb2_ops s5p_jpeg_qops = {
+static const struct vb2_ops s5p_jpeg_qops = {
.queue_setup = s5p_jpeg_queue_setup,
.buf_prepare = s5p_jpeg_buf_prepare,
.buf_queue = s5p_jpeg_buf_queue,
@@ -2996,27 +3003,11 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
-#ifdef CONFIG_PM_SLEEP
-static int s5p_jpeg_suspend(struct device *dev)
-{
- if (pm_runtime_suspended(dev))
- return 0;
-
- return s5p_jpeg_runtime_suspend(dev);
-}
-
-static int s5p_jpeg_resume(struct device *dev)
-{
- if (pm_runtime_suspended(dev))
- return 0;
-
- return s5p_jpeg_runtime_resume(dev);
-}
-#endif
-
static const struct dev_pm_ops s5p_jpeg_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
- SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume,
+ NULL)
};
static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index e3f104fafd0a..0a5b8f5e011e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -153,7 +153,7 @@ static void s5p_mfc_watchdog(unsigned long arg)
* error. Now it is time to kill all instances and
* reset the MFC. */
mfc_err("Time out during waiting for HW\n");
- queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+ schedule_work(&dev->watchdog_work);
}
dev->watchdog_timer.expires = jiffies +
msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
@@ -494,7 +494,6 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
s5p_mfc_clock_off();
wake_up_dev(dev, reason, err);
- return;
}
/* Header parsing interrupt handling */
@@ -759,7 +758,6 @@ static int s5p_mfc_open(struct file *file)
/* Allocate memory for context */
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
- mfc_err("Not enough memory\n");
ret = -ENOMEM;
goto err_alloc;
}
@@ -776,7 +774,7 @@ static int s5p_mfc_open(struct file *file)
while (dev->ctx[ctx->num]) {
ctx->num++;
if (ctx->num >= MFC_NUM_CONTEXTS) {
- mfc_err("Too many open contexts\n");
+ mfc_debug(2, "Too many open contexts\n");
ret = -EBUSY;
goto err_no_ctx;
}
@@ -924,39 +922,50 @@ static int s5p_mfc_release(struct file *file)
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
struct s5p_mfc_dev *dev = ctx->dev;
+ /* if dev is null, do cleanup that doesn't need dev */
mfc_debug_enter();
- mutex_lock(&dev->mfc_mutex);
+ if (dev)
+ mutex_lock(&dev->mfc_mutex);
s5p_mfc_clock_on();
vb2_queue_release(&ctx->vq_src);
vb2_queue_release(&ctx->vq_dst);
- /* Mark context as idle */
- clear_work_bit_irqsave(ctx);
- /* If instance was initialised and not yet freed,
- * return instance and free resources */
- if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
- mfc_debug(2, "Has to free instance\n");
- s5p_mfc_close_mfc_inst(dev, ctx);
- }
- /* hardware locking scheme */
- if (dev->curr_ctx == ctx->num)
- clear_bit(0, &dev->hw_lock);
- dev->num_inst--;
- if (dev->num_inst == 0) {
- mfc_debug(2, "Last instance\n");
- s5p_mfc_deinit_hw(dev);
- del_timer_sync(&dev->watchdog_timer);
- if (s5p_mfc_power_off() < 0)
- mfc_err("Power off failed\n");
+ if (dev) {
+ /* Mark context as idle */
+ clear_work_bit_irqsave(ctx);
+ /*
+ * If instance was initialised and not yet freed,
+ * return instance and free resources
+ */
+ if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
+ mfc_debug(2, "Has to free instance\n");
+ s5p_mfc_close_mfc_inst(dev, ctx);
+ }
+ /* hardware locking scheme */
+ if (dev->curr_ctx == ctx->num)
+ clear_bit(0, &dev->hw_lock);
+ dev->num_inst--;
+ if (dev->num_inst == 0) {
+ mfc_debug(2, "Last instance\n");
+ s5p_mfc_deinit_hw(dev);
+ del_timer_sync(&dev->watchdog_timer);
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("Power off failed\n");
+ }
}
mfc_debug(2, "Shutting down clock\n");
s5p_mfc_clock_off();
- dev->ctx[ctx->num] = NULL;
+ if (dev)
+ dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
+ /* vdev is gone if dev is null */
+ if (dev)
+ v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mfc_debug_leave();
- mutex_unlock(&dev->mfc_mutex);
+ if (dev)
+ mutex_unlock(&dev->mfc_mutex);
+
return 0;
}
@@ -1158,10 +1167,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->variant = mfc_get_drv_data(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get io resource\n");
- return -ENOENT;
- }
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
@@ -1241,7 +1246,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev->hw_lock = 0;
- dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
atomic_set(&dev->watchdog_cnt, 0);
init_timer(&dev->watchdog_timer);
@@ -1298,12 +1302,28 @@ err_dma:
static int s5p_mfc_remove(struct platform_device *pdev)
{
struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+ struct s5p_mfc_ctx *ctx;
+ int i;
v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
+ /*
+ * Clear ctx dev pointer to avoid races between s5p_mfc_remove()
+ * and s5p_mfc_release() and s5p_mfc_release() accessing ctx->dev
+ * after s5p_mfc_remove() is run during unbind.
+ */
+ mutex_lock(&dev->mfc_mutex);
+ for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+ ctx = dev->ctx[i];
+ if (!ctx)
+ continue;
+ /* clear ctx->dev */
+ ctx->dev = NULL;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+
del_timer_sync(&dev->watchdog_timer);
- flush_workqueue(dev->watchdog_workqueue);
- destroy_workqueue(dev->watchdog_workqueue);
+ flush_work(&dev->watchdog_work);
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 373e346fce3e..46b99f28cbd7 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -292,7 +292,9 @@ struct s5p_mfc_priv_buf {
* @warn_start: hardware error code from which warnings start
* @mfc_ops: ops structure holding HW operation function pointers
* @mfc_cmds: cmd structure holding HW commands function pointers
+ * @mfc_regs: structure holding MFC registers
* @fw_ver: loaded firmware sub-version
+ * risc_on: flag indicates RISC is on or off
*
*/
struct s5p_mfc_dev {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 47c997d9e8cb..52081ddc9bf2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -776,11 +776,12 @@ static int vidioc_g_crop(struct file *file, void *priv,
u32 left, right, top, bottom;
if (ctx->state != MFCINST_HEAD_PARSED &&
- ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
- && ctx->state != MFCINST_FINISHED) {
- mfc_err("Cannont set crop\n");
- return -EINVAL;
- }
+ ctx->state != MFCINST_RUNNING &&
+ ctx->state != MFCINST_FINISHING &&
+ ctx->state != MFCINST_FINISHED) {
+ mfc_err("Can not get crop information\n");
+ return -EINVAL;
+ }
if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx);
right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
deleted file mode 100644
index 697aaed42486..000000000000
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ /dev/null
@@ -1,88 +0,0 @@
-# drivers/media/platform/s5p-tv/Kconfig
-#
-# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-# Tomasz Stanislawski <t.stanislaws@samsung.com>
-#
-# Licensed under GPL
-
-config VIDEO_SAMSUNG_S5P_TV
- bool "Samsung TV driver for S5P platform"
- depends on PM
- depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
- default n
- ---help---
- Say Y here to enable selecting the TV output devices for
- Samsung S5P platform.
-
-if VIDEO_SAMSUNG_S5P_TV
-
-config VIDEO_SAMSUNG_S5P_HDMI
- tristate "Samsung HDMI Driver"
- depends on VIDEO_V4L2
- depends on I2C
- depends on VIDEO_SAMSUNG_S5P_TV
- select VIDEO_SAMSUNG_S5P_HDMIPHY
- help
- Say Y here if you want support for the HDMI output
- interface in S5P Samsung SoC. The driver can be compiled
- as module. It is an auxiliary driver, that exposes a V4L2
- subdev for use by other drivers. This driver requires
- hdmiphy driver to work correctly.
-
-config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
- bool "Enable debug for HDMI Driver"
- depends on VIDEO_SAMSUNG_S5P_HDMI
- default n
- help
- Enables debugging for HDMI driver.
-
-config VIDEO_SAMSUNG_S5P_HDMIPHY
- tristate "Samsung HDMIPHY Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && I2C
- depends on VIDEO_SAMSUNG_S5P_TV
- help
- Say Y here if you want support for the physical HDMI
- interface in S5P Samsung SoC. The driver can be compiled
- as module. It is an I2C driver, that exposes a V4L2
- subdev for use by other drivers.
-
-config VIDEO_SAMSUNG_S5P_SII9234
- tristate "Samsung SII9234 Driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && I2C
- depends on VIDEO_SAMSUNG_S5P_TV
- help
- Say Y here if you want support for the MHL interface
- in S5P Samsung SoC. The driver can be compiled
- as module. It is an I2C driver, that exposes a V4L2
- subdev for use by other drivers.
-
-config VIDEO_SAMSUNG_S5P_SDO
- tristate "Samsung Analog TV Driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on VIDEO_SAMSUNG_S5P_TV
- help
- Say Y here if you want support for the analog TV output
- interface in S5P Samsung SoC. The driver can be compiled
- as module. It is an auxiliary driver, that exposes a V4L2
- subdev for use by other drivers. This driver requires
- hdmiphy driver to work correctly.
-
-config VIDEO_SAMSUNG_S5P_MIXER
- tristate "Samsung Mixer and Video Processor Driver"
- depends on VIDEO_DEV && VIDEO_V4L2
- depends on VIDEO_SAMSUNG_S5P_TV
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- help
- Say Y here if you want support for the Mixer in Samsung S5P SoCs.
- This device produce image data to one of output interfaces.
-
-config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
- bool "Enable debug for Mixer Driver"
- depends on VIDEO_SAMSUNG_S5P_MIXER
- default n
- help
- Enables debugging for Mixer driver.
-
-endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/platform/s5p-tv/Makefile b/drivers/media/platform/s5p-tv/Makefile
deleted file mode 100644
index 7cd47902e269..000000000000
--- a/drivers/media/platform/s5p-tv/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# drivers/media/platform/samsung/tvout/Makefile
-#
-# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-# Tomasz Stanislawski <t.stanislaws@samsung.com>
-#
-# Licensed under GPL
-
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
-s5p-hdmiphy-y += hdmiphy_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SII9234) += s5p-sii9234.o
-s5p-sii9234-y += sii9234_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
-s5p-hdmi-y += hdmi_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
-s5p-sdo-y += sdo_drv.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
-s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
-
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
deleted file mode 100644
index e71b13e40f59..000000000000
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * Samsung HDMI interface driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#define pr_fmt(fmt) "s5p-tv (hdmi_drv): " fmt
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <media/v4l2-subdev.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/bug.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-#include <linux/v4l2-dv-timings.h>
-
-#include <linux/platform_data/media/s5p_hdmi.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-dv-timings.h>
-
-#include "regs-hdmi.h"
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung HDMI");
-MODULE_LICENSE("GPL");
-
-struct hdmi_pulse {
- u32 beg;
- u32 end;
-};
-
-struct hdmi_timings {
- struct hdmi_pulse hact;
- u32 hsyn_pol; /* 0 - high, 1 - low */
- struct hdmi_pulse hsyn;
- u32 interlaced;
- struct hdmi_pulse vact[2];
- u32 vsyn_pol; /* 0 - high, 1 - low */
- u32 vsyn_off;
- struct hdmi_pulse vsyn[2];
-};
-
-struct hdmi_resources {
- struct clk *hdmi;
- struct clk *sclk_hdmi;
- struct clk *sclk_pixel;
- struct clk *sclk_hdmiphy;
- struct clk *hdmiphy;
- struct regulator_bulk_data *regul_bulk;
- int regul_count;
-};
-
-struct hdmi_device {
- /** base address of HDMI registers */
- void __iomem *regs;
- /** HDMI interrupt */
- unsigned int irq;
- /** pointer to device parent */
- struct device *dev;
- /** subdev generated by HDMI device */
- struct v4l2_subdev sd;
- /** V4L2 device structure */
- struct v4l2_device v4l2_dev;
- /** subdev of HDMIPHY interface */
- struct v4l2_subdev *phy_sd;
- /** subdev of MHL interface */
- struct v4l2_subdev *mhl_sd;
- /** configuration of current graphic mode */
- const struct hdmi_timings *cur_conf;
- /** flag indicating that timings are dirty */
- int cur_conf_dirty;
- /** current timings */
- struct v4l2_dv_timings cur_timings;
- /** other resources */
- struct hdmi_resources res;
-};
-
-static const struct platform_device_id hdmi_driver_types[] = {
- {
- .name = "s5pv210-hdmi",
- }, {
- .name = "exynos4-hdmi",
- }, {
- /* end node */
- }
-};
-
-static const struct v4l2_subdev_ops hdmi_sd_ops;
-
-static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct hdmi_device, sd);
-}
-
-static inline
-void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
-{
- writel(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
-{
- u32 old = readl(hdev->regs + reg_id);
- value = (value & mask) | (old & ~mask);
- writel(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
-{
- writeb(value, hdev->regs + reg_id);
-}
-
-static inline
-void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
-{
- switch (n) {
- default:
- writeb(value >> 24, hdev->regs + reg_id + 12);
- case 3:
- writeb(value >> 16, hdev->regs + reg_id + 8);
- case 2:
- writeb(value >> 8, hdev->regs + reg_id + 4);
- case 1:
- writeb(value >> 0, hdev->regs + reg_id + 0);
- }
-}
-
-static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
-{
- return readl(hdev->regs + reg_id);
-}
-
-static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
-{
- struct hdmi_device *hdev = dev_data;
- u32 intc_flag;
-
- (void)irq;
- intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
- /* clearing flags for HPD plug/unplug */
- if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- pr_info("unplugged\n");
- hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_UNPLUG);
- }
- if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- pr_info("plugged\n");
- hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
- HDMI_INTC_FLAG_HPD_PLUG);
- }
-
- return IRQ_HANDLED;
-}
-
-static void hdmi_reg_init(struct hdmi_device *hdev)
-{
- /* enable HPD interrupts */
- hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
- HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
- /* choose DVI mode */
- hdmi_write_mask(hdev, HDMI_MODE_SEL,
- HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
- hdmi_write_mask(hdev, HDMI_CON_2, ~0,
- HDMI_DVI_PERAMBLE_EN | HDMI_DVI_BAND_EN);
- /* disable bluescreen */
- hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
- /* choose bluescreen (fecal) color */
- hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
- hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
- hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
-}
-
-static void hdmi_timing_apply(struct hdmi_device *hdev,
- const struct hdmi_timings *t)
-{
- /* setting core registers */
- hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
- hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
- (t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
- hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
- hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
- (t->vact[0].beg << 11) | t->vact[0].end);
- hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
- (t->vsyn[0].beg << 12) | t->vsyn[0].end);
- if (t->interlaced) {
- u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
-
- hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
- hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
- (t->hact.end << 12) | t->vact[1].end);
- hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
- (t->vact[1].end << 11) | t->vact[1].beg);
- hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
- (t->vsyn[1].beg << 12) | t->vsyn[1].end);
- hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
- (vsyn_trans << 12) | vsyn_trans);
- } else {
- hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
- hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
- (t->hact.end << 12) | t->vact[0].end);
- }
-
- /* Timing generator registers */
- hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
- hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
- hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
- hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
- hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
- hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
- t->vact[0].end - t->vact[0].beg);
- hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
- hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
- if (t->interlaced) {
- hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
- hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
- hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
- hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
- hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
- hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
- hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
- } else {
- hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
- hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
- }
-}
-
-static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
-{
- struct device *dev = hdmi_dev->dev;
- const struct hdmi_timings *conf = hdmi_dev->cur_conf;
- int ret;
-
- dev_dbg(dev, "%s\n", __func__);
-
- /* skip if conf is already synchronized with HW */
- if (!hdmi_dev->cur_conf_dirty)
- return 0;
-
- /* reset hdmiphy */
- hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
- hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
- mdelay(10);
-
- /* configure timings */
- ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_timings,
- &hdmi_dev->cur_timings);
- if (ret) {
- dev_err(dev, "failed to set timings\n");
- return ret;
- }
-
- /* resetting HDMI core */
- hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
- hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
- mdelay(10);
-
- hdmi_reg_init(hdmi_dev);
-
- /* setting core registers */
- hdmi_timing_apply(hdmi_dev, conf);
-
- hdmi_dev->cur_conf_dirty = 0;
-
- return 0;
-}
-
-static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
-{
-#define DUMPREG(reg_id) \
- dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
- readl(hdev->regs + reg_id))
-
- dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
- DUMPREG(HDMI_INTC_FLAG);
- DUMPREG(HDMI_INTC_CON);
- DUMPREG(HDMI_HPD_STATUS);
- DUMPREG(HDMI_PHY_RSTOUT);
- DUMPREG(HDMI_PHY_VPLL);
- DUMPREG(HDMI_PHY_CMU);
- DUMPREG(HDMI_CORE_RSTOUT);
-
- dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
- DUMPREG(HDMI_CON_0);
- DUMPREG(HDMI_CON_1);
- DUMPREG(HDMI_CON_2);
- DUMPREG(HDMI_SYS_STATUS);
- DUMPREG(HDMI_PHY_STATUS);
- DUMPREG(HDMI_STATUS_EN);
- DUMPREG(HDMI_HPD);
- DUMPREG(HDMI_MODE_SEL);
- DUMPREG(HDMI_HPD_GEN);
- DUMPREG(HDMI_DC_CONTROL);
- DUMPREG(HDMI_VIDEO_PATTERN_GEN);
-
- dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
- DUMPREG(HDMI_H_BLANK_0);
- DUMPREG(HDMI_H_BLANK_1);
- DUMPREG(HDMI_V_BLANK_0);
- DUMPREG(HDMI_V_BLANK_1);
- DUMPREG(HDMI_V_BLANK_2);
- DUMPREG(HDMI_H_V_LINE_0);
- DUMPREG(HDMI_H_V_LINE_1);
- DUMPREG(HDMI_H_V_LINE_2);
- DUMPREG(HDMI_VSYNC_POL);
- DUMPREG(HDMI_INT_PRO_MODE);
- DUMPREG(HDMI_V_BLANK_F_0);
- DUMPREG(HDMI_V_BLANK_F_1);
- DUMPREG(HDMI_V_BLANK_F_2);
- DUMPREG(HDMI_H_SYNC_GEN_0);
- DUMPREG(HDMI_H_SYNC_GEN_1);
- DUMPREG(HDMI_H_SYNC_GEN_2);
- DUMPREG(HDMI_V_SYNC_GEN_1_0);
- DUMPREG(HDMI_V_SYNC_GEN_1_1);
- DUMPREG(HDMI_V_SYNC_GEN_1_2);
- DUMPREG(HDMI_V_SYNC_GEN_2_0);
- DUMPREG(HDMI_V_SYNC_GEN_2_1);
- DUMPREG(HDMI_V_SYNC_GEN_2_2);
- DUMPREG(HDMI_V_SYNC_GEN_3_0);
- DUMPREG(HDMI_V_SYNC_GEN_3_1);
- DUMPREG(HDMI_V_SYNC_GEN_3_2);
-
- dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
- DUMPREG(HDMI_TG_CMD);
- DUMPREG(HDMI_TG_H_FSZ_L);
- DUMPREG(HDMI_TG_H_FSZ_H);
- DUMPREG(HDMI_TG_HACT_ST_L);
- DUMPREG(HDMI_TG_HACT_ST_H);
- DUMPREG(HDMI_TG_HACT_SZ_L);
- DUMPREG(HDMI_TG_HACT_SZ_H);
- DUMPREG(HDMI_TG_V_FSZ_L);
- DUMPREG(HDMI_TG_V_FSZ_H);
- DUMPREG(HDMI_TG_VSYNC_L);
- DUMPREG(HDMI_TG_VSYNC_H);
- DUMPREG(HDMI_TG_VSYNC2_L);
- DUMPREG(HDMI_TG_VSYNC2_H);
- DUMPREG(HDMI_TG_VACT_ST_L);
- DUMPREG(HDMI_TG_VACT_ST_H);
- DUMPREG(HDMI_TG_VACT_SZ_L);
- DUMPREG(HDMI_TG_VACT_SZ_H);
- DUMPREG(HDMI_TG_FIELD_CHG_L);
- DUMPREG(HDMI_TG_FIELD_CHG_H);
- DUMPREG(HDMI_TG_VACT_ST2_L);
- DUMPREG(HDMI_TG_VACT_ST2_H);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-#undef DUMPREG
-}
-
-static const struct hdmi_timings hdmi_timings_480p = {
- .hact = { .beg = 138, .end = 858 },
- .hsyn_pol = 1,
- .hsyn = { .beg = 16, .end = 16 + 62 },
- .interlaced = 0,
- .vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
- .vsyn_pol = 1,
- .vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
-};
-
-static const struct hdmi_timings hdmi_timings_576p50 = {
- .hact = { .beg = 144, .end = 864 },
- .hsyn_pol = 1,
- .hsyn = { .beg = 12, .end = 12 + 64 },
- .interlaced = 0,
- .vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
- .vsyn_pol = 1,
- .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_720p60 = {
- .hact = { .beg = 370, .end = 1650 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 110, .end = 110 + 40 },
- .interlaced = 0,
- .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
- .vsyn_pol = 0,
- .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_720p50 = {
- .hact = { .beg = 700, .end = 1980 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 440, .end = 440 + 40 },
- .interlaced = 0,
- .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
- .vsyn_pol = 0,
- .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p24 = {
- .hact = { .beg = 830, .end = 2750 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 638, .end = 638 + 44 },
- .interlaced = 0,
- .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
- .vsyn_pol = 0,
- .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p60 = {
- .hact = { .beg = 280, .end = 2200 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 88, .end = 88 + 44 },
- .interlaced = 0,
- .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
- .vsyn_pol = 0,
- .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-static const struct hdmi_timings hdmi_timings_1080i60 = {
- .hact = { .beg = 280, .end = 2200 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 88, .end = 88 + 44 },
- .interlaced = 1,
- .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
- .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
- .vsyn_pol = 0,
- .vsyn_off = 1100,
- .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
- .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
-};
-
-static const struct hdmi_timings hdmi_timings_1080i50 = {
- .hact = { .beg = 720, .end = 2640 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 528, .end = 528 + 44 },
- .interlaced = 1,
- .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
- .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
- .vsyn_pol = 0,
- .vsyn_off = 1320,
- .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
- .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
-};
-
-static const struct hdmi_timings hdmi_timings_1080p50 = {
- .hact = { .beg = 720, .end = 2640 },
- .hsyn_pol = 0,
- .hsyn = { .beg = 528, .end = 528 + 44 },
- .interlaced = 0,
- .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
- .vsyn_pol = 0,
- .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
-};
-
-/* default hdmi_timings index of the timings configured on probe */
-#define HDMI_DEFAULT_TIMINGS_IDX (0)
-
-static const struct {
- bool reduced_fps;
- const struct v4l2_dv_timings dv_timings;
- const struct hdmi_timings *hdmi_timings;
-} hdmi_timings[] = {
- { false, V4L2_DV_BT_CEA_720X480P59_94, &hdmi_timings_480p },
- { false, V4L2_DV_BT_CEA_720X576P50, &hdmi_timings_576p50 },
- { false, V4L2_DV_BT_CEA_1280X720P50, &hdmi_timings_720p50 },
- { true, V4L2_DV_BT_CEA_1280X720P60, &hdmi_timings_720p60 },
- { false, V4L2_DV_BT_CEA_1920X1080P24, &hdmi_timings_1080p24 },
- { false, V4L2_DV_BT_CEA_1920X1080P30, &hdmi_timings_1080p60 },
- { false, V4L2_DV_BT_CEA_1920X1080P50, &hdmi_timings_1080p50 },
- { false, V4L2_DV_BT_CEA_1920X1080I50, &hdmi_timings_1080i50 },
- { false, V4L2_DV_BT_CEA_1920X1080I60, &hdmi_timings_1080i60 },
- { false, V4L2_DV_BT_CEA_1920X1080P60, &hdmi_timings_1080p60 },
-};
-
-static int hdmi_streamon(struct hdmi_device *hdev)
-{
- struct device *dev = hdev->dev;
- struct hdmi_resources *res = &hdev->res;
- int ret, tries;
-
- dev_dbg(dev, "%s\n", __func__);
-
- ret = hdmi_conf_apply(hdev);
- if (ret)
- return ret;
-
- ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
- if (ret)
- return ret;
-
- /* waiting for HDMIPHY's PLL to get to steady state */
- for (tries = 100; tries; --tries) {
- u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
- if (val & HDMI_PHY_STATUS_READY)
- break;
- mdelay(1);
- }
- /* steady state not achieved */
- if (tries == 0) {
- dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
- v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
- hdmi_dumpregs(hdev, "hdmiphy - s_stream");
- return -EIO;
- }
-
- /* starting MHL */
- ret = v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 1);
- if (hdev->mhl_sd && ret) {
- v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
- hdmi_dumpregs(hdev, "mhl - s_stream");
- return -EIO;
- }
-
- /* hdmiphy clock is used for HDMI in streaming mode */
- clk_disable(res->sclk_hdmi);
- clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
- clk_enable(res->sclk_hdmi);
-
- /* enable HDMI and timing generator */
- hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
- hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
- hdmi_dumpregs(hdev, "streamon");
- return 0;
-}
-
-static int hdmi_streamoff(struct hdmi_device *hdev)
-{
- struct device *dev = hdev->dev;
- struct hdmi_resources *res = &hdev->res;
-
- dev_dbg(dev, "%s\n", __func__);
-
- hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
- hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
-
- /* pixel(vpll) clock is used for HDMI in config mode */
- clk_disable(res->sclk_hdmi);
- clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- clk_enable(res->sclk_hdmi);
-
- v4l2_subdev_call(hdev->mhl_sd, video, s_stream, 0);
- v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
-
- hdmi_dumpregs(hdev, "streamoff");
- return 0;
-}
-
-static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- struct device *dev = hdev->dev;
-
- dev_dbg(dev, "%s(%d)\n", __func__, enable);
- if (enable)
- return hdmi_streamon(hdev);
- return hdmi_streamoff(hdev);
-}
-
-static int hdmi_resource_poweron(struct hdmi_resources *res)
-{
- int ret;
-
- /* turn HDMI power on */
- ret = regulator_bulk_enable(res->regul_count, res->regul_bulk);
- if (ret < 0)
- return ret;
- /* power-on hdmi physical interface */
- clk_enable(res->hdmiphy);
- /* use VPP as parent clock; HDMIPHY is not working yet */
- clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
- /* turn clocks on */
- clk_enable(res->sclk_hdmi);
-
- return 0;
-}
-
-static void hdmi_resource_poweroff(struct hdmi_resources *res)
-{
- /* turn clocks off */
- clk_disable(res->sclk_hdmi);
- /* power-off hdmiphy */
- clk_disable(res->hdmiphy);
- /* turn HDMI power off */
- regulator_bulk_disable(res->regul_count, res->regul_bulk);
-}
-
-static int hdmi_s_power(struct v4l2_subdev *sd, int on)
-{
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- int ret;
-
- if (on)
- ret = pm_runtime_get_sync(hdev->dev);
- else
- ret = pm_runtime_put_sync(hdev->dev);
- /* only values < 0 indicate errors */
- return ret < 0 ? ret : 0;
-}
-
-static int hdmi_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
-{
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- struct device *dev = hdev->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_timings); i++)
- if (v4l2_match_dv_timings(&hdmi_timings[i].dv_timings,
- timings, 0, false))
- break;
- if (i == ARRAY_SIZE(hdmi_timings)) {
- dev_err(dev, "timings not supported\n");
- return -EINVAL;
- }
- hdev->cur_conf = hdmi_timings[i].hdmi_timings;
- hdev->cur_conf_dirty = 1;
- hdev->cur_timings = *timings;
- if (!hdmi_timings[i].reduced_fps)
- hdev->cur_timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
- return 0;
-}
-
-static int hdmi_g_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
-{
- *timings = sd_to_hdmi_dev(sd)->cur_timings;
- return 0;
-}
-
-static int hdmi_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- const struct hdmi_timings *t = hdev->cur_conf;
-
- dev_dbg(hdev->dev, "%s\n", __func__);
- if (!hdev->cur_conf)
- return -EINVAL;
- if (format->pad)
- return -EINVAL;
-
- memset(fmt, 0, sizeof(*fmt));
- fmt->width = t->hact.end - t->hact.beg;
- fmt->height = t->vact[0].end - t->vact[0].beg;
- fmt->code = MEDIA_BUS_FMT_FIXED; /* means RGB888 */
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- if (t->interlaced) {
- fmt->field = V4L2_FIELD_INTERLACED;
- fmt->height *= 2;
- } else {
- fmt->field = V4L2_FIELD_NONE;
- }
- return 0;
-}
-
-static int hdmi_enum_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_enum_dv_timings *timings)
-{
- if (timings->pad != 0)
- return -EINVAL;
- if (timings->index >= ARRAY_SIZE(hdmi_timings))
- return -EINVAL;
- timings->timings = hdmi_timings[timings->index].dv_timings;
- if (!hdmi_timings[timings->index].reduced_fps)
- timings->timings.bt.flags &= ~V4L2_DV_FL_CAN_REDUCE_FPS;
- return 0;
-}
-
-static int hdmi_dv_timings_cap(struct v4l2_subdev *sd,
- struct v4l2_dv_timings_cap *cap)
-{
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-
- if (cap->pad != 0)
- return -EINVAL;
-
- /* Let the phy fill in the pixelclock range */
- v4l2_subdev_call(hdev->phy_sd, pad, dv_timings_cap, cap);
- cap->type = V4L2_DV_BT_656_1120;
- cap->bt.min_width = 720;
- cap->bt.max_width = 1920;
- cap->bt.min_height = 480;
- cap->bt.max_height = 1080;
- cap->bt.standards = V4L2_DV_BT_STD_CEA861;
- cap->bt.capabilities = V4L2_DV_BT_CAP_INTERLACED |
- V4L2_DV_BT_CAP_PROGRESSIVE;
- return 0;
-}
-
-static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
- .s_power = hdmi_s_power,
-};
-
-static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
- .s_dv_timings = hdmi_s_dv_timings,
- .g_dv_timings = hdmi_g_dv_timings,
- .s_stream = hdmi_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops hdmi_sd_pad_ops = {
- .enum_dv_timings = hdmi_enum_dv_timings,
- .dv_timings_cap = hdmi_dv_timings_cap,
- .get_fmt = hdmi_get_fmt,
-};
-
-static const struct v4l2_subdev_ops hdmi_sd_ops = {
- .core = &hdmi_sd_core_ops,
- .video = &hdmi_sd_video_ops,
- .pad = &hdmi_sd_pad_ops,
-};
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
-
- dev_dbg(dev, "%s\n", __func__);
- v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
- hdmi_resource_poweroff(&hdev->res);
- /* flag that device context is lost */
- hdev->cur_conf_dirty = 1;
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- int ret;
-
- dev_dbg(dev, "%s\n", __func__);
-
- ret = hdmi_resource_poweron(&hdev->res);
- if (ret < 0)
- return ret;
-
- /* starting MHL */
- ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
- if (hdev->mhl_sd && ret)
- goto fail;
-
- dev_dbg(dev, "poweron succeed\n");
-
- return 0;
-
-fail:
- hdmi_resource_poweroff(&hdev->res);
- dev_err(dev, "poweron failed\n");
-
- return ret;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
-static void hdmi_resource_clear_clocks(struct hdmi_resources *res)
-{
- res->hdmi = ERR_PTR(-EINVAL);
- res->sclk_hdmi = ERR_PTR(-EINVAL);
- res->sclk_pixel = ERR_PTR(-EINVAL);
- res->sclk_hdmiphy = ERR_PTR(-EINVAL);
- res->hdmiphy = ERR_PTR(-EINVAL);
-}
-
-static void hdmi_resources_cleanup(struct hdmi_device *hdev)
-{
- struct hdmi_resources *res = &hdev->res;
-
- dev_dbg(hdev->dev, "HDMI resource cleanup\n");
- /* put clocks, power */
- if (res->regul_count)
- regulator_bulk_free(res->regul_count, res->regul_bulk);
- /* kfree is NULL-safe */
- kfree(res->regul_bulk);
- if (!IS_ERR(res->hdmiphy))
- clk_put(res->hdmiphy);
- if (!IS_ERR(res->sclk_hdmiphy))
- clk_put(res->sclk_hdmiphy);
- if (!IS_ERR(res->sclk_pixel))
- clk_put(res->sclk_pixel);
- if (!IS_ERR(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR(res->hdmi))
- clk_put(res->hdmi);
- memset(res, 0, sizeof(*res));
- hdmi_resource_clear_clocks(res);
-}
-
-static int hdmi_resources_init(struct hdmi_device *hdev)
-{
- struct device *dev = hdev->dev;
- struct hdmi_resources *res = &hdev->res;
- static char *supply[] = {
- "hdmi-en",
- "vdd",
- "vdd_osc",
- "vdd_pll",
- };
- int i, ret;
-
- dev_dbg(dev, "HDMI resource init\n");
-
- memset(res, 0, sizeof(*res));
- hdmi_resource_clear_clocks(res);
-
- /* get clocks, power */
- res->hdmi = clk_get(dev, "hdmi");
- if (IS_ERR(res->hdmi)) {
- dev_err(dev, "failed to get clock 'hdmi'\n");
- goto fail;
- }
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR(res->sclk_hdmi)) {
- dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
- goto fail;
- }
- res->sclk_pixel = clk_get(dev, "sclk_pixel");
- if (IS_ERR(res->sclk_pixel)) {
- dev_err(dev, "failed to get clock 'sclk_pixel'\n");
- goto fail;
- }
- res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
- if (IS_ERR(res->sclk_hdmiphy)) {
- dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
- goto fail;
- }
- res->hdmiphy = clk_get(dev, "hdmiphy");
- if (IS_ERR(res->hdmiphy)) {
- dev_err(dev, "failed to get clock 'hdmiphy'\n");
- goto fail;
- }
- res->regul_bulk = kcalloc(ARRAY_SIZE(supply),
- sizeof(res->regul_bulk[0]), GFP_KERNEL);
- if (!res->regul_bulk) {
- dev_err(dev, "failed to get memory for regulators\n");
- goto fail;
- }
- for (i = 0; i < ARRAY_SIZE(supply); ++i) {
- res->regul_bulk[i].supply = supply[i];
- res->regul_bulk[i].consumer = NULL;
- }
-
- ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
- if (ret) {
- dev_err(dev, "failed to get regulators\n");
- goto fail;
- }
- res->regul_count = ARRAY_SIZE(supply);
-
- return 0;
-fail:
- dev_err(dev, "HDMI resource init - failed\n");
- hdmi_resources_cleanup(hdev);
- return -ENODEV;
-}
-
-static int hdmi_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct i2c_adapter *adapter;
- struct v4l2_subdev *sd;
- struct hdmi_device *hdmi_dev = NULL;
- struct s5p_hdmi_platform_data *pdata = dev->platform_data;
- int ret;
-
- dev_dbg(dev, "probe start\n");
-
- if (!pdata) {
- dev_err(dev, "platform data is missing\n");
- ret = -ENODEV;
- goto fail;
- }
-
- hdmi_dev = devm_kzalloc(&pdev->dev, sizeof(*hdmi_dev), GFP_KERNEL);
- if (!hdmi_dev) {
- dev_err(dev, "out of memory\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- hdmi_dev->dev = dev;
-
- ret = hdmi_resources_init(hdmi_dev);
- if (ret)
- goto fail;
-
- /* mapping HDMI registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail_init;
- }
-
- hdmi_dev->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (hdmi_dev->regs == NULL) {
- dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail_init;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail_init;
- }
-
- ret = devm_request_irq(&pdev->dev, res->start, hdmi_irq_handler, 0,
- "hdmi", hdmi_dev);
- if (ret) {
- dev_err(dev, "request interrupt failed.\n");
- goto fail_init;
- }
- hdmi_dev->irq = res->start;
-
- /* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
- strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
- sizeof(hdmi_dev->v4l2_dev.name));
- /* passing NULL owner prevents driver from erasing drvdata */
- ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
- if (ret) {
- dev_err(dev, "could not register v4l2 device.\n");
- goto fail_init;
- }
-
- /* testing if hdmiphy info is present */
- if (!pdata->hdmiphy_info) {
- dev_err(dev, "hdmiphy info is missing in platform data\n");
- ret = -ENXIO;
- goto fail_vdev;
- }
-
- adapter = i2c_get_adapter(pdata->hdmiphy_bus);
- if (adapter == NULL) {
- dev_err(dev, "hdmiphy adapter request failed\n");
- ret = -ENXIO;
- goto fail_vdev;
- }
-
- hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
- adapter, pdata->hdmiphy_info, NULL);
- /* on failure or not adapter is no longer useful */
- i2c_put_adapter(adapter);
- if (hdmi_dev->phy_sd == NULL) {
- dev_err(dev, "missing subdev for hdmiphy\n");
- ret = -ENODEV;
- goto fail_vdev;
- }
-
- /* initialization of MHL interface if present */
- if (pdata->mhl_info) {
- adapter = i2c_get_adapter(pdata->mhl_bus);
- if (adapter == NULL) {
- dev_err(dev, "MHL adapter request failed\n");
- ret = -ENXIO;
- goto fail_vdev;
- }
-
- hdmi_dev->mhl_sd = v4l2_i2c_new_subdev_board(
- &hdmi_dev->v4l2_dev, adapter,
- pdata->mhl_info, NULL);
- /* on failure or not adapter is no longer useful */
- i2c_put_adapter(adapter);
- if (hdmi_dev->mhl_sd == NULL) {
- dev_err(dev, "missing subdev for MHL\n");
- ret = -ENODEV;
- goto fail_vdev;
- }
- }
-
- clk_enable(hdmi_dev->res.hdmi);
-
- pm_runtime_enable(dev);
-
- sd = &hdmi_dev->sd;
- v4l2_subdev_init(sd, &hdmi_sd_ops);
- sd->owner = THIS_MODULE;
-
- strlcpy(sd->name, "s5p-hdmi", sizeof(sd->name));
- hdmi_dev->cur_timings =
- hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].dv_timings;
- /* FIXME: missing fail timings is not supported */
- hdmi_dev->cur_conf =
- hdmi_timings[HDMI_DEFAULT_TIMINGS_IDX].hdmi_timings;
- hdmi_dev->cur_conf_dirty = 1;
-
- /* storing subdev for call that have only access to struct device */
- dev_set_drvdata(dev, sd);
-
- dev_info(dev, "probe successful\n");
-
- return 0;
-
-fail_vdev:
- v4l2_device_unregister(&hdmi_dev->v4l2_dev);
-
-fail_init:
- hdmi_resources_cleanup(hdmi_dev);
-
-fail:
- dev_err(dev, "probe failed\n");
- return ret;
-}
-
-static int hdmi_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
-
- pm_runtime_disable(dev);
- clk_disable(hdmi_dev->res.hdmi);
- v4l2_device_unregister(&hdmi_dev->v4l2_dev);
- disable_irq(hdmi_dev->irq);
- hdmi_resources_cleanup(hdmi_dev);
- dev_info(dev, "remove successful\n");
-
- return 0;
-}
-
-static struct platform_driver hdmi_driver __refdata = {
- .probe = hdmi_probe,
- .remove = hdmi_remove,
- .id_table = hdmi_driver_types,
- .driver = {
- .name = "s5p-hdmi",
- .pm = &hdmi_pm_ops,
- }
-};
-
-module_platform_driver(hdmi_driver);
diff --git a/drivers/media/platform/s5p-tv/hdmiphy_drv.c b/drivers/media/platform/s5p-tv/hdmiphy_drv.c
deleted file mode 100644
index aae652351aa8..000000000000
--- a/drivers/media/platform/s5p-tv/hdmiphy_drv.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Samsung HDMI Physical interface driver
- *
- * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
- * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/err.h>
-
-#include <media/v4l2-subdev.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
-MODULE_LICENSE("GPL");
-
-struct hdmiphy_conf {
- unsigned long pixclk;
- const u8 *data;
-};
-
-struct hdmiphy_ctx {
- struct v4l2_subdev sd;
- const struct hdmiphy_conf *conf_tab;
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_s5pv210[] = {
- { .pixclk = 27000000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
- 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 27027000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
- 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 74176000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
- 0x6D, 0x10, 0x01, 0x52, 0xEF, 0xF3, 0x54, 0xB9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 74250000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
- 0x6A, 0x10, 0x01, 0x52, 0xFF, 0xF1, 0x54, 0xBA,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
- },
- { /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4210[] = {
- { .pixclk = 27000000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
- 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 27027000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
- 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 74176000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
- 0x6D, 0x10, 0x01, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 74250000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
- 0x6A, 0x10, 0x01, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 148352000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
- 0x6D, 0x18, 0x00, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x11, 0x40, 0xA5, 0x26, 0x02, 0x00, 0x00, 0x00, }
- },
- { .pixclk = 148500000, .data = (u8 [32]) {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
- 0x6A, 0x18, 0x00, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x11, 0x40, 0xA4, 0x26, 0x02, 0x00, 0x00, 0x00, }
- },
- { /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4212[] = {
- { .pixclk = 27000000, .data = (u8 [32]) {
- 0x01, 0x11, 0x2D, 0x75, 0x00, 0x01, 0x00, 0x08,
- 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
- 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
- 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 27027000, .data = (u8 [32]) {
- 0x01, 0x91, 0x2D, 0x72, 0x00, 0x64, 0x12, 0x08,
- 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
- 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
- 0x54, 0xE2, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 74176000, .data = (u8 [32]) {
- 0x01, 0x91, 0x3E, 0x35, 0x00, 0x5B, 0xDE, 0x08,
- 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
- 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
- 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 74250000, .data = (u8 [32]) {
- 0x01, 0x91, 0x3E, 0x35, 0x00, 0x40, 0xF0, 0x08,
- 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
- 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
- 0x54, 0xA4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 148500000, .data = (u8 [32]) {
- 0x01, 0x91, 0x3E, 0x15, 0x00, 0x40, 0xF0, 0x08,
- 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
- 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0xA4,
- 0x54, 0x4A, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
- },
- { /* end marker */ }
-};
-
-static const struct hdmiphy_conf hdmiphy_conf_exynos4412[] = {
- { .pixclk = 27000000, .data = (u8 [32]) {
- 0x01, 0x11, 0x2D, 0x75, 0x40, 0x01, 0x00, 0x08,
- 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
- 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 27027000, .data = (u8 [32]) {
- 0x01, 0x91, 0x2D, 0x72, 0x40, 0x64, 0x12, 0x08,
- 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
- 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 74176000, .data = (u8 [32]) {
- 0x01, 0x91, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0x08,
- 0x81, 0x20, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
- 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 74250000, .data = (u8 [32]) {
- 0x01, 0x91, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
- 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
- 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
- },
- { .pixclk = 148500000, .data = (u8 [32]) {
- 0x01, 0x91, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
- 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
- 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
- 0x54, 0x4B, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
- },
- { /* end marker */ }
-};
-
-static inline struct hdmiphy_ctx *sd_to_ctx(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct hdmiphy_ctx, sd);
-}
-
-static const u8 *hdmiphy_find_conf(unsigned long pixclk,
- const struct hdmiphy_conf *conf)
-{
- for (; conf->pixclk; ++conf)
- if (conf->pixclk == pixclk)
- return conf->data;
- return NULL;
-}
-
-static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
-{
- /* to be implemented */
- return 0;
-}
-
-static int hdmiphy_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings)
-{
- const u8 *data;
- u8 buffer[32];
- int ret;
- struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct device *dev = &client->dev;
- unsigned long pixclk = timings->bt.pixelclock;
-
- dev_info(dev, "s_dv_timings\n");
- if ((timings->bt.flags & V4L2_DV_FL_REDUCED_FPS) && pixclk == 74250000)
- pixclk = 74176000;
- data = hdmiphy_find_conf(pixclk, ctx->conf_tab);
- if (!data) {
- dev_err(dev, "format not supported\n");
- return -EINVAL;
- }
-
- /* storing configuration to the device */
- memcpy(buffer, data, 32);
- ret = i2c_master_send(client, buffer, 32);
- if (ret != 32) {
- dev_err(dev, "failed to configure HDMIPHY via I2C\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int hdmiphy_dv_timings_cap(struct v4l2_subdev *sd,
- struct v4l2_dv_timings_cap *cap)
-{
- if (cap->pad != 0)
- return -EINVAL;
-
- cap->type = V4L2_DV_BT_656_1120;
- /* The phy only determines the pixelclock, leave the other values
- * at 0 to signify that we have no information for them. */
- cap->bt.min_pixelclock = 27000000;
- cap->bt.max_pixelclock = 148500000;
- return 0;
-}
-
-static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct device *dev = &client->dev;
- u8 buffer[2];
- int ret;
-
- dev_info(dev, "s_stream(%d)\n", enable);
- /* going to/from configuration from/to operation mode */
- buffer[0] = 0x1f;
- buffer[1] = enable ? 0x80 : 0x00;
-
- ret = i2c_master_send(client, buffer, 2);
- if (ret != 2) {
- dev_err(dev, "stream (%d) failed\n", enable);
- return -EIO;
- }
- return 0;
-}
-
-static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
- .s_power = hdmiphy_s_power,
-};
-
-static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
- .s_dv_timings = hdmiphy_s_dv_timings,
- .s_stream = hdmiphy_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops hdmiphy_pad_ops = {
- .dv_timings_cap = hdmiphy_dv_timings_cap,
-};
-
-static const struct v4l2_subdev_ops hdmiphy_ops = {
- .core = &hdmiphy_core_ops,
- .video = &hdmiphy_video_ops,
- .pad = &hdmiphy_pad_ops,
-};
-
-static int hdmiphy_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct hdmiphy_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->conf_tab = (struct hdmiphy_conf *)id->driver_data;
- v4l2_i2c_subdev_init(&ctx->sd, client, &hdmiphy_ops);
-
- dev_info(&client->dev, "probe successful\n");
- return 0;
-}
-
-static int hdmiphy_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
-
- kfree(ctx);
- dev_info(&client->dev, "remove successful\n");
-
- return 0;
-}
-
-static const struct i2c_device_id hdmiphy_id[] = {
- { "hdmiphy", (unsigned long)hdmiphy_conf_exynos4210 },
- { "hdmiphy-s5pv210", (unsigned long)hdmiphy_conf_s5pv210 },
- { "hdmiphy-exynos4210", (unsigned long)hdmiphy_conf_exynos4210 },
- { "hdmiphy-exynos4212", (unsigned long)hdmiphy_conf_exynos4212 },
- { "hdmiphy-exynos4412", (unsigned long)hdmiphy_conf_exynos4412 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
-
-static struct i2c_driver hdmiphy_driver = {
- .driver = {
- .name = "s5p-hdmiphy",
- },
- .probe = hdmiphy_probe,
- .remove = hdmiphy_remove,
- .id_table = hdmiphy_id,
-};
-
-module_i2c_driver(hdmiphy_driver);
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
deleted file mode 100644
index 869f0ce86f6e..000000000000
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#ifndef SAMSUNG_MIXER_H
-#define SAMSUNG_MIXER_H
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
- #define DEBUG
-#endif
-
-#include <linux/fb.h>
-#include <linux/irqreturn.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <media/v4l2-device.h>
-#include <media/videobuf2-v4l2.h>
-
-#include "regs-mixer.h"
-
-/** maximum number of output interfaces */
-#define MXR_MAX_OUTPUTS 2
-/** maximum number of input interfaces (layers) */
-#define MXR_MAX_LAYERS 3
-#define MXR_DRIVER_NAME "s5p-mixer"
-/** maximal number of planes for every layer */
-#define MXR_MAX_PLANES 2
-
-#define MXR_ENABLE 1
-#define MXR_DISABLE 0
-
-/** description of a macroblock for packed formats */
-struct mxr_block {
- /** vertical number of pixels in macroblock */
- unsigned int width;
- /** horizontal number of pixels in macroblock */
- unsigned int height;
- /** size of block in bytes */
- unsigned int size;
-};
-
-/** description of supported format */
-struct mxr_format {
- /** format name/mnemonic */
- const char *name;
- /** fourcc identifier */
- u32 fourcc;
- /** colorspace identifier */
- enum v4l2_colorspace colorspace;
- /** number of planes in image data */
- int num_planes;
- /** description of block for each plane */
- struct mxr_block plane[MXR_MAX_PLANES];
- /** number of subframes in image data */
- int num_subframes;
- /** specifies to which subframe belong given plane */
- int plane2subframe[MXR_MAX_PLANES];
- /** internal code, driver dependent */
- unsigned long cookie;
-};
-
-/** description of crop configuration for image */
-struct mxr_crop {
- /** width of layer in pixels */
- unsigned int full_width;
- /** height of layer in pixels */
- unsigned int full_height;
- /** horizontal offset of first pixel to be displayed */
- unsigned int x_offset;
- /** vertical offset of first pixel to be displayed */
- unsigned int y_offset;
- /** width of displayed data in pixels */
- unsigned int width;
- /** height of displayed data in pixels */
- unsigned int height;
- /** indicate which fields are present in buffer */
- unsigned int field;
-};
-
-/** stages of geometry operations */
-enum mxr_geometry_stage {
- MXR_GEOMETRY_SINK,
- MXR_GEOMETRY_COMPOSE,
- MXR_GEOMETRY_CROP,
- MXR_GEOMETRY_SOURCE,
-};
-
-/* flag indicating that offset should be 0 */
-#define MXR_NO_OFFSET 0x80000000
-
-/** description of transformation from source to destination image */
-struct mxr_geometry {
- /** cropping for source image */
- struct mxr_crop src;
- /** cropping for destination image */
- struct mxr_crop dst;
- /** layer-dependant description of horizontal scaling */
- unsigned int x_ratio;
- /** layer-dependant description of vertical scaling */
- unsigned int y_ratio;
-};
-
-/** instance of a buffer */
-struct mxr_buffer {
- /** common v4l buffer stuff -- must be first */
- struct vb2_v4l2_buffer vb;
- /** node for layer's lists */
- struct list_head list;
-};
-
-
-/** internal states of layer */
-enum mxr_layer_state {
- /** layers is not shown */
- MXR_LAYER_IDLE = 0,
- /** layer is shown */
- MXR_LAYER_STREAMING,
- /** state before STREAMOFF is finished */
- MXR_LAYER_STREAMING_FINISH,
-};
-
-/** forward declarations */
-struct mxr_device;
-struct mxr_layer;
-
-/** callback for layers operation */
-struct mxr_layer_ops {
- /* TODO: try to port it to subdev API */
- /** handler for resource release function */
- void (*release)(struct mxr_layer *);
- /** setting buffer to HW */
- void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
- /** setting format and geometry in HW */
- void (*format_set)(struct mxr_layer *);
- /** streaming stop/start */
- void (*stream_set)(struct mxr_layer *, int);
- /** adjusting geometry */
- void (*fix_geometry)(struct mxr_layer *,
- enum mxr_geometry_stage, unsigned long);
-};
-
-/** layer instance, a single window and content displayed on output */
-struct mxr_layer {
- /** parent mixer device */
- struct mxr_device *mdev;
- /** layer index (unique identifier) */
- int idx;
- /** callbacks for layer methods */
- struct mxr_layer_ops ops;
- /** format array */
- const struct mxr_format **fmt_array;
- /** size of format array */
- unsigned long fmt_array_size;
-
- /** lock for protection of list and state fields */
- spinlock_t enq_slock;
- /** list for enqueued buffers */
- struct list_head enq_list;
- /** buffer currently owned by hardware in temporary registers */
- struct mxr_buffer *update_buf;
- /** buffer currently owned by hardware in shadow registers */
- struct mxr_buffer *shadow_buf;
- /** state of layer IDLE/STREAMING */
- enum mxr_layer_state state;
-
- /** mutex for protection of fields below */
- struct mutex mutex;
- /** handler for video node */
- struct video_device vfd;
- /** queue for output buffers */
- struct vb2_queue vb_queue;
- /** current image format */
- const struct mxr_format *fmt;
- /** current geometry of image */
- struct mxr_geometry geo;
-};
-
-/** description of mixers output interface */
-struct mxr_output {
- /** name of output */
- char name[32];
- /** output subdev */
- struct v4l2_subdev *sd;
- /** cookie used for configuration of registers */
- int cookie;
-};
-
-/** specify source of output subdevs */
-struct mxr_output_conf {
- /** name of output (connector) */
- char *output_name;
- /** name of module that generates output subdev */
- char *module_name;
- /** cookie need for mixer HW */
- int cookie;
-};
-
-struct clk;
-struct regulator;
-
-/** auxiliary resources used my mixer */
-struct mxr_resources {
- /** interrupt index */
- int irq;
- /** pointer to Mixer registers */
- void __iomem *mxr_regs;
- /** pointer to Video Processor registers */
- void __iomem *vp_regs;
- /** other resources, should used under mxr_device.mutex */
- struct clk *mixer;
- struct clk *vp;
- struct clk *sclk_mixer;
- struct clk *sclk_hdmi;
- struct clk *sclk_dac;
-};
-
-/* event flags used */
-enum mxr_devide_flags {
- MXR_EVENT_VSYNC = 0,
- MXR_EVENT_TOP = 1,
-};
-
-/** drivers instance */
-struct mxr_device {
- /** master device */
- struct device *dev;
- /** state of each layer */
- struct mxr_layer *layer[MXR_MAX_LAYERS];
- /** state of each output */
- struct mxr_output *output[MXR_MAX_OUTPUTS];
- /** number of registered outputs */
- int output_cnt;
-
- /* video resources */
-
- /** V4L2 device */
- struct v4l2_device v4l2_dev;
- /** event wait queue */
- wait_queue_head_t event_queue;
- /** state flags */
- unsigned long event_flags;
-
- /** spinlock for protection of registers */
- spinlock_t reg_slock;
-
- /** mutex for protection of fields below */
- struct mutex mutex;
- /** number of entities depndant on output configuration */
- int n_output;
- /** number of users that do streaming */
- int n_streamer;
- /** index of current output */
- int current_output;
- /** auxiliary resources used my mixer */
- struct mxr_resources res;
-};
-
-/** transform device structure into mixer device */
-static inline struct mxr_device *to_mdev(struct device *dev)
-{
- struct v4l2_device *vdev = dev_get_drvdata(dev);
- return container_of(vdev, struct mxr_device, v4l2_dev);
-}
-
-/** get current output data, should be called under mdev's mutex */
-static inline struct mxr_output *to_output(struct mxr_device *mdev)
-{
- return mdev->output[mdev->current_output];
-}
-
-/** get current output subdev, should be called under mdev's mutex */
-static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
-{
- struct mxr_output *out = to_output(mdev);
- return out ? out->sd : NULL;
-}
-
-/** forward declaration for mixer platform data */
-struct mxr_platform_data;
-
-/** acquiring common video resources */
-int mxr_acquire_video(struct mxr_device *mdev,
- struct mxr_output_conf *output_cont, int output_count);
-
-/** releasing common video resources */
-void mxr_release_video(struct mxr_device *mdev);
-
-struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
-struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
-struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
- int idx, char *name, const struct mxr_layer_ops *ops);
-
-void mxr_base_layer_release(struct mxr_layer *layer);
-void mxr_layer_release(struct mxr_layer *layer);
-
-int mxr_base_layer_register(struct mxr_layer *layer);
-void mxr_base_layer_unregister(struct mxr_layer *layer);
-
-unsigned long mxr_get_plane_size(const struct mxr_block *blk,
- unsigned int width, unsigned int height);
-
-/** adds new consumer for mixer's power */
-int __must_check mxr_power_get(struct mxr_device *mdev);
-/** removes consumer for mixer's power */
-void mxr_power_put(struct mxr_device *mdev);
-/** add new client for output configuration */
-void mxr_output_get(struct mxr_device *mdev);
-/** removes new client for output configuration */
-void mxr_output_put(struct mxr_device *mdev);
-/** add new client for streaming */
-void mxr_streamer_get(struct mxr_device *mdev);
-/** removes new client for streaming */
-void mxr_streamer_put(struct mxr_device *mdev);
-/** returns format of data delivared to current output */
-void mxr_get_mbus_fmt(struct mxr_device *mdev,
- struct v4l2_mbus_framefmt *mbus_fmt);
-
-/* Debug */
-
-#define mxr_err(mdev, fmt, ...) dev_err(mdev->dev, fmt, ##__VA_ARGS__)
-#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
-#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
-
-#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
- #define mxr_dbg(mdev, fmt, ...) dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
-#else
- #define mxr_dbg(mdev, fmt, ...) do { (void) mdev; } while (0)
-#endif
-
-/* accessing Mixer's and Video Processor's registers */
-
-void mxr_vsync_set_update(struct mxr_device *mdev, int en);
-void mxr_reg_reset(struct mxr_device *mdev);
-irqreturn_t mxr_irq_handler(int irq, void *dev_data);
-void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
-void mxr_reg_streamon(struct mxr_device *mdev);
-void mxr_reg_streamoff(struct mxr_device *mdev);
-int mxr_reg_wait4vsync(struct mxr_device *mdev);
-void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
- struct v4l2_mbus_framefmt *fmt);
-void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
-void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
-void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
- const struct mxr_format *fmt, const struct mxr_geometry *geo);
-
-void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
-void mxr_reg_vp_buffer(struct mxr_device *mdev,
- dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
-void mxr_reg_vp_format(struct mxr_device *mdev,
- const struct mxr_format *fmt, const struct mxr_geometry *geo);
-void mxr_reg_dump(struct mxr_device *mdev);
-
-#endif /* SAMSUNG_MIXER_H */
-
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
deleted file mode 100644
index 8a5d19469ddc..000000000000
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung MIXER");
-MODULE_LICENSE("GPL");
-
-/* --------- DRIVER PARAMETERS ---------- */
-
-static struct mxr_output_conf mxr_output_conf[] = {
- {
- .output_name = "S5P HDMI connector",
- .module_name = "s5p-hdmi",
- .cookie = 1,
- },
- {
- .output_name = "S5P SDO connector",
- .module_name = "s5p-sdo",
- .cookie = 0,
- },
-};
-
-void mxr_get_mbus_fmt(struct mxr_device *mdev,
- struct v4l2_mbus_framefmt *mbus_fmt)
-{
- struct v4l2_subdev *sd;
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- int ret;
-
- mutex_lock(&mdev->mutex);
- sd = to_outsd(mdev);
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- *mbus_fmt = fmt.format;
- WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
- mutex_unlock(&mdev->mutex);
-}
-
-void mxr_streamer_get(struct mxr_device *mdev)
-{
- mutex_lock(&mdev->mutex);
- ++mdev->n_streamer;
- mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
- if (mdev->n_streamer == 1) {
- struct v4l2_subdev *sd = to_outsd(mdev);
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
- struct mxr_resources *res = &mdev->res;
- int ret;
-
- if (to_output(mdev)->cookie == 0)
- clk_set_parent(res->sclk_mixer, res->sclk_dac);
- else
- clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
- mxr_reg_s_output(mdev, to_output(mdev)->cookie);
-
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
- WARN(ret, "starting stream failed for output %s\n", sd->name);
-
- mxr_reg_set_mbus_fmt(mdev, mbus_fmt);
- mxr_reg_streamon(mdev);
- ret = mxr_reg_wait4vsync(mdev);
- WARN(ret, "failed to get vsync (%d) from output\n", ret);
- }
- mutex_unlock(&mdev->mutex);
- mxr_reg_dump(mdev);
- /* FIXME: what to do when streaming fails? */
-}
-
-void mxr_streamer_put(struct mxr_device *mdev)
-{
- mutex_lock(&mdev->mutex);
- --mdev->n_streamer;
- mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
- if (mdev->n_streamer == 0) {
- int ret;
- struct v4l2_subdev *sd = to_outsd(mdev);
-
- mxr_reg_streamoff(mdev);
- /* vsync applies Mixer setup */
- ret = mxr_reg_wait4vsync(mdev);
- WARN(ret, "failed to get vsync (%d) from output\n", ret);
- ret = v4l2_subdev_call(sd, video, s_stream, 0);
- WARN(ret, "stopping stream failed for output %s\n", sd->name);
- }
- WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
- mdev->n_streamer);
- mutex_unlock(&mdev->mutex);
- mxr_reg_dump(mdev);
-}
-
-void mxr_output_get(struct mxr_device *mdev)
-{
- mutex_lock(&mdev->mutex);
- ++mdev->n_output;
- mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
- /* turn on auxiliary driver */
- if (mdev->n_output == 1)
- v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
- mutex_unlock(&mdev->mutex);
-}
-
-void mxr_output_put(struct mxr_device *mdev)
-{
- mutex_lock(&mdev->mutex);
- --mdev->n_output;
- mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
- /* turn on auxiliary driver */
- if (mdev->n_output == 0)
- v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
- WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
- mdev->n_output);
- mutex_unlock(&mdev->mutex);
-}
-
-int mxr_power_get(struct mxr_device *mdev)
-{
- int ret = pm_runtime_get_sync(mdev->dev);
-
- /* returning 1 means that power is already enabled,
- * so zero success be returned */
- if (ret < 0)
- return ret;
- return 0;
-}
-
-void mxr_power_put(struct mxr_device *mdev)
-{
- pm_runtime_put_sync(mdev->dev);
-}
-
-/* --------- RESOURCE MANAGEMENT -------------*/
-
-static int mxr_acquire_plat_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
-{
- struct resource *res;
- int ret;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
- if (res == NULL) {
- mxr_err(mdev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
- }
-
- mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
- if (mdev->res.mxr_regs == NULL) {
- mxr_err(mdev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
- if (res == NULL) {
- mxr_err(mdev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail_mxr_regs;
- }
-
- mdev->res.vp_regs = ioremap(res->start, resource_size(res));
- if (mdev->res.vp_regs == NULL) {
- mxr_err(mdev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail_mxr_regs;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
- if (res == NULL) {
- mxr_err(mdev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail_vp_regs;
- }
-
- ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
- if (ret) {
- mxr_err(mdev, "request interrupt failed.\n");
- goto fail_vp_regs;
- }
- mdev->res.irq = res->start;
-
- return 0;
-
-fail_vp_regs:
- iounmap(mdev->res.vp_regs);
-
-fail_mxr_regs:
- iounmap(mdev->res.mxr_regs);
-
-fail:
- return ret;
-}
-
-static void mxr_resource_clear_clocks(struct mxr_resources *res)
-{
- res->mixer = ERR_PTR(-EINVAL);
- res->vp = ERR_PTR(-EINVAL);
- res->sclk_mixer = ERR_PTR(-EINVAL);
- res->sclk_hdmi = ERR_PTR(-EINVAL);
- res->sclk_dac = ERR_PTR(-EINVAL);
-}
-
-static void mxr_release_plat_resources(struct mxr_device *mdev)
-{
- free_irq(mdev->res.irq, mdev);
- iounmap(mdev->res.vp_regs);
- iounmap(mdev->res.mxr_regs);
-}
-
-static void mxr_release_clocks(struct mxr_device *mdev)
-{
- struct mxr_resources *res = &mdev->res;
-
- if (!IS_ERR(res->sclk_dac))
- clk_put(res->sclk_dac);
- if (!IS_ERR(res->sclk_hdmi))
- clk_put(res->sclk_hdmi);
- if (!IS_ERR(res->sclk_mixer))
- clk_put(res->sclk_mixer);
- if (!IS_ERR(res->vp))
- clk_put(res->vp);
- if (!IS_ERR(res->mixer))
- clk_put(res->mixer);
-}
-
-static int mxr_acquire_clocks(struct mxr_device *mdev)
-{
- struct mxr_resources *res = &mdev->res;
- struct device *dev = mdev->dev;
-
- mxr_resource_clear_clocks(res);
-
- res->mixer = clk_get(dev, "mixer");
- if (IS_ERR(res->mixer)) {
- mxr_err(mdev, "failed to get clock 'mixer'\n");
- goto fail;
- }
- res->vp = clk_get(dev, "vp");
- if (IS_ERR(res->vp)) {
- mxr_err(mdev, "failed to get clock 'vp'\n");
- goto fail;
- }
- res->sclk_mixer = clk_get(dev, "sclk_mixer");
- if (IS_ERR(res->sclk_mixer)) {
- mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
- goto fail;
- }
- res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
- if (IS_ERR(res->sclk_hdmi)) {
- mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
- goto fail;
- }
- res->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR(res->sclk_dac)) {
- mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
- goto fail;
- }
-
- return 0;
-fail:
- mxr_release_clocks(mdev);
- return -ENODEV;
-}
-
-static int mxr_acquire_resources(struct mxr_device *mdev,
- struct platform_device *pdev)
-{
- int ret;
- ret = mxr_acquire_plat_resources(mdev, pdev);
-
- if (ret)
- goto fail;
-
- ret = mxr_acquire_clocks(mdev);
- if (ret)
- goto fail_plat;
-
- mxr_info(mdev, "resources acquired\n");
- return 0;
-
-fail_plat:
- mxr_release_plat_resources(mdev);
-fail:
- mxr_err(mdev, "resources acquire failed\n");
- return ret;
-}
-
-static void mxr_release_resources(struct mxr_device *mdev)
-{
- mxr_release_clocks(mdev);
- mxr_release_plat_resources(mdev);
- memset(&mdev->res, 0, sizeof(mdev->res));
- mxr_resource_clear_clocks(&mdev->res);
-}
-
-static void mxr_release_layers(struct mxr_device *mdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
- if (mdev->layer[i])
- mxr_layer_release(mdev->layer[i]);
-}
-
-static int mxr_acquire_layers(struct mxr_device *mdev,
- struct mxr_platform_data *pdata)
-{
- mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
- mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
- mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
-
- if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
- mxr_err(mdev, "failed to acquire layers\n");
- goto fail;
- }
-
- return 0;
-
-fail:
- mxr_release_layers(mdev);
- return -ENODEV;
-}
-
-/* ---------- POWER MANAGEMENT ----------- */
-
-static int mxr_runtime_resume(struct device *dev)
-{
- struct mxr_device *mdev = to_mdev(dev);
- struct mxr_resources *res = &mdev->res;
- int ret;
-
- mxr_dbg(mdev, "resume - start\n");
- mutex_lock(&mdev->mutex);
- /* turn clocks on */
- ret = clk_prepare_enable(res->mixer);
- if (ret < 0) {
- dev_err(mdev->dev, "clk_prepare_enable(mixer) failed\n");
- goto fail;
- }
- ret = clk_prepare_enable(res->vp);
- if (ret < 0) {
- dev_err(mdev->dev, "clk_prepare_enable(vp) failed\n");
- goto fail_mixer;
- }
- ret = clk_prepare_enable(res->sclk_mixer);
- if (ret < 0) {
- dev_err(mdev->dev, "clk_prepare_enable(sclk_mixer) failed\n");
- goto fail_vp;
- }
- /* apply default configuration */
- mxr_reg_reset(mdev);
- mxr_dbg(mdev, "resume - finished\n");
-
- mutex_unlock(&mdev->mutex);
- return 0;
-
-fail_vp:
- clk_disable_unprepare(res->vp);
-fail_mixer:
- clk_disable_unprepare(res->mixer);
-fail:
- mutex_unlock(&mdev->mutex);
- dev_err(mdev->dev, "resume failed\n");
- return ret;
-}
-
-static int mxr_runtime_suspend(struct device *dev)
-{
- struct mxr_device *mdev = to_mdev(dev);
- struct mxr_resources *res = &mdev->res;
- mxr_dbg(mdev, "suspend - start\n");
- mutex_lock(&mdev->mutex);
- /* turn clocks off */
- clk_disable_unprepare(res->sclk_mixer);
- clk_disable_unprepare(res->vp);
- clk_disable_unprepare(res->mixer);
- mutex_unlock(&mdev->mutex);
- mxr_dbg(mdev, "suspend - finished\n");
- return 0;
-}
-
-static const struct dev_pm_ops mxr_pm_ops = {
- .runtime_suspend = mxr_runtime_suspend,
- .runtime_resume = mxr_runtime_resume,
-};
-
-/* --------- DRIVER INITIALIZATION ---------- */
-
-static int mxr_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct mxr_platform_data *pdata = dev->platform_data;
- struct mxr_device *mdev;
- int ret;
-
- /* mdev does not exist yet so no mxr_dbg is used */
- dev_info(dev, "probe start\n");
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev) {
- dev_err(dev, "not enough memory.\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- /* setup pointer to master device */
- mdev->dev = dev;
-
- mutex_init(&mdev->mutex);
- spin_lock_init(&mdev->reg_slock);
- init_waitqueue_head(&mdev->event_queue);
-
- /* acquire resources: regs, irqs, clocks, regulators */
- ret = mxr_acquire_resources(mdev, pdev);
- if (ret)
- goto fail_mem;
-
- /* configure resources for video output */
- ret = mxr_acquire_video(mdev, mxr_output_conf,
- ARRAY_SIZE(mxr_output_conf));
- if (ret)
- goto fail_resources;
-
- /* configure layers */
- ret = mxr_acquire_layers(mdev, pdata);
- if (ret)
- goto fail_video;
-
- pm_runtime_enable(dev);
-
- mxr_info(mdev, "probe successful\n");
- return 0;
-
-fail_video:
- mxr_release_video(mdev);
-
-fail_resources:
- mxr_release_resources(mdev);
-
-fail_mem:
- kfree(mdev);
-
-fail:
- dev_info(dev, "probe failed\n");
- return ret;
-}
-
-static int mxr_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct mxr_device *mdev = to_mdev(dev);
-
- pm_runtime_disable(dev);
-
- mxr_release_layers(mdev);
- mxr_release_video(mdev);
- mxr_release_resources(mdev);
-
- kfree(mdev);
-
- dev_info(dev, "remove successful\n");
- return 0;
-}
-
-static struct platform_driver mxr_driver __refdata = {
- .probe = mxr_probe,
- .remove = mxr_remove,
- .driver = {
- .name = MXR_DRIVER_NAME,
- .pm = &mxr_pm_ops,
- }
-};
-
-static int __init mxr_init(void)
-{
- int i, ret;
- static const char banner[] __initconst =
- "Samsung TV Mixer driver, "
- "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
- pr_info("%s\n", banner);
-
- /* Loading auxiliary modules */
- for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
- request_module(mxr_output_conf[i].module_name);
-
- ret = platform_driver_register(&mxr_driver);
- if (ret != 0) {
- pr_err("s5p-tv: registration of MIXER driver failed\n");
- return -ENXIO;
- }
-
- return 0;
-}
-module_init(mxr_init);
-
-static void __exit mxr_exit(void)
-{
- platform_driver_unregister(&mxr_driver);
-}
-module_exit(mxr_exit);
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
deleted file mode 100644
index d4d2564f7de7..000000000000
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include <media/videobuf2-dma-contig.h>
-
-/* FORMAT DEFINITIONS */
-
-static const struct mxr_format mxr_fb_fmt_rgb565 = {
- .name = "RGB565",
- .fourcc = V4L2_PIX_FMT_RGB565,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .num_planes = 1,
- .plane = {
- { .width = 1, .height = 1, .size = 2 },
- },
- .num_subframes = 1,
- .cookie = 4,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb1555 = {
- .name = "ARGB1555",
- .num_planes = 1,
- .fourcc = V4L2_PIX_FMT_RGB555,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .plane = {
- { .width = 1, .height = 1, .size = 2 },
- },
- .num_subframes = 1,
- .cookie = 5,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb4444 = {
- .name = "ARGB4444",
- .num_planes = 1,
- .fourcc = V4L2_PIX_FMT_RGB444,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .plane = {
- { .width = 1, .height = 1, .size = 2 },
- },
- .num_subframes = 1,
- .cookie = 6,
-};
-
-static const struct mxr_format mxr_fb_fmt_argb8888 = {
- .name = "ARGB8888",
- .fourcc = V4L2_PIX_FMT_BGR32,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .num_planes = 1,
- .plane = {
- { .width = 1, .height = 1, .size = 4 },
- },
- .num_subframes = 1,
- .cookie = 7,
-};
-
-static const struct mxr_format *mxr_graph_format[] = {
- &mxr_fb_fmt_rgb565,
- &mxr_fb_fmt_argb1555,
- &mxr_fb_fmt_argb4444,
- &mxr_fb_fmt_argb8888,
-};
-
-/* AUXILIARY CALLBACKS */
-
-static void mxr_graph_layer_release(struct mxr_layer *layer)
-{
- mxr_base_layer_unregister(layer);
- mxr_base_layer_release(layer);
-}
-
-static void mxr_graph_buffer_set(struct mxr_layer *layer,
- struct mxr_buffer *buf)
-{
- dma_addr_t addr = 0;
-
- if (buf)
- addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
- mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
-}
-
-static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
-{
- mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
-}
-
-static void mxr_graph_format_set(struct mxr_layer *layer)
-{
- mxr_reg_graph_format(layer->mdev, layer->idx,
- layer->fmt, &layer->geo);
-}
-
-static inline unsigned int closest(unsigned int x, unsigned int a,
- unsigned int b, unsigned long flags)
-{
- unsigned int mid = (a + b) / 2;
-
- /* choosing closest value with constraints according to table:
- * -------------+-----+-----+-----+-------+
- * flags | 0 | LE | GE | LE|GE |
- * -------------+-----+-----+-----+-------+
- * x <= a | a | a | a | a |
- * a < x <= mid | a | a | b | a |
- * mid < x < b | b | a | b | b |
- * b <= x | b | b | b | b |
- * -------------+-----+-----+-----+-------+
- */
-
- /* remove all non-constraint flags */
- flags &= V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE;
-
- if (x <= a)
- return a;
- if (x >= b)
- return b;
- if (flags == V4L2_SEL_FLAG_LE)
- return a;
- if (flags == V4L2_SEL_FLAG_GE)
- return b;
- if (x <= mid)
- return a;
- return b;
-}
-
-static inline unsigned int do_center(unsigned int center,
- unsigned int size, unsigned int upper, unsigned int flags)
-{
- unsigned int lower;
-
- if (flags & MXR_NO_OFFSET)
- return 0;
-
- lower = center - min(center, size / 2);
- return min(lower, upper - size);
-}
-
-static void mxr_graph_fix_geometry(struct mxr_layer *layer,
- enum mxr_geometry_stage stage, unsigned long flags)
-{
- struct mxr_geometry *geo = &layer->geo;
- struct mxr_crop *src = &geo->src;
- struct mxr_crop *dst = &geo->dst;
- unsigned int x_center, y_center;
-
- switch (stage) {
-
- case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
- flags = 0;
- /* fall through */
-
- case MXR_GEOMETRY_COMPOSE:
- /* remember center of the area */
- x_center = dst->x_offset + dst->width / 2;
- y_center = dst->y_offset + dst->height / 2;
- /* round up/down to 2 multiple depending on flags */
- if (flags & V4L2_SEL_FLAG_LE) {
- dst->width = round_down(dst->width, 2);
- dst->height = round_down(dst->height, 2);
- } else {
- dst->width = round_up(dst->width, 2);
- dst->height = round_up(dst->height, 2);
- }
- /* assure that compose rect is inside display area */
- dst->width = min(dst->width, dst->full_width);
- dst->height = min(dst->height, dst->full_height);
-
- /* ensure that compose is reachable using 2x scaling */
- dst->width = min(dst->width, 2 * src->full_width);
- dst->height = min(dst->height, 2 * src->full_height);
-
- /* setup offsets */
- dst->x_offset = do_center(x_center, dst->width,
- dst->full_width, flags);
- dst->y_offset = do_center(y_center, dst->height,
- dst->full_height, flags);
- flags = 0;
- /* fall through */
-
- case MXR_GEOMETRY_CROP:
- /* remember center of the area */
- x_center = src->x_offset + src->width / 2;
- y_center = src->y_offset + src->height / 2;
- /* ensure that cropping area lies inside the buffer */
- if (src->full_width < dst->width)
- src->width = dst->width / 2;
- else
- src->width = closest(src->width, dst->width / 2,
- dst->width, flags);
-
- if (src->width == dst->width)
- geo->x_ratio = 0;
- else
- geo->x_ratio = 1;
-
- if (src->full_height < dst->height)
- src->height = dst->height / 2;
- else
- src->height = closest(src->height, dst->height / 2,
- dst->height, flags);
-
- if (src->height == dst->height)
- geo->y_ratio = 0;
- else
- geo->y_ratio = 1;
-
- /* setup offsets */
- src->x_offset = do_center(x_center, src->width,
- src->full_width, flags);
- src->y_offset = do_center(y_center, src->height,
- src->full_height, flags);
- flags = 0;
- /* fall through */
- case MXR_GEOMETRY_SOURCE:
- src->full_width = clamp_val(src->full_width,
- src->width + src->x_offset, 32767);
- src->full_height = clamp_val(src->full_height,
- src->height + src->y_offset, 2047);
- }
-}
-
-/* PUBLIC API */
-
-struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
-{
- struct mxr_layer *layer;
- int ret;
- const struct mxr_layer_ops ops = {
- .release = mxr_graph_layer_release,
- .buffer_set = mxr_graph_buffer_set,
- .stream_set = mxr_graph_stream_set,
- .format_set = mxr_graph_format_set,
- .fix_geometry = mxr_graph_fix_geometry,
- };
- char name[32];
-
- sprintf(name, "graph%d", idx);
-
- layer = mxr_base_layer_create(mdev, idx, name, &ops);
- if (layer == NULL) {
- mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
- goto fail;
- }
-
- layer->fmt_array = mxr_graph_format;
- layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
-
- ret = mxr_base_layer_register(layer);
- if (ret)
- goto fail_layer;
-
- return layer;
-
-fail_layer:
- mxr_base_layer_release(layer);
-
-fail:
- return NULL;
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
deleted file mode 100644
index a0ec14a1da13..000000000000
--- a/drivers/media/platform/s5p-tv/mixer_reg.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-#include "regs-mixer.h"
-#include "regs-vp.h"
-
-#include <linux/delay.h>
-
-/* Register access subroutines */
-
-static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
-{
- return readl(mdev->res.vp_regs + reg_id);
-}
-
-static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
-{
- writel(val, mdev->res.vp_regs + reg_id);
-}
-
-static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
- u32 val, u32 mask)
-{
- u32 old = vp_read(mdev, reg_id);
-
- val = (val & mask) | (old & ~mask);
- writel(val, mdev->res.vp_regs + reg_id);
-}
-
-static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
-{
- return readl(mdev->res.mxr_regs + reg_id);
-}
-
-static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
-{
- writel(val, mdev->res.mxr_regs + reg_id);
-}
-
-static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
- u32 val, u32 mask)
-{
- u32 old = mxr_read(mdev, reg_id);
-
- val = (val & mask) | (old & ~mask);
- writel(val, mdev->res.mxr_regs + reg_id);
-}
-
-void mxr_vsync_set_update(struct mxr_device *mdev, int en)
-{
- /* block update on vsync */
- mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
- MXR_STATUS_SYNC_ENABLE);
- vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
-}
-
-static void __mxr_reg_vp_reset(struct mxr_device *mdev)
-{
- int tries = 100;
-
- vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
- for (tries = 100; tries; --tries) {
- /* waiting until VP_SRESET_PROCESSING is 0 */
- if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
- break;
- mdelay(10);
- }
- WARN(tries == 0, "failed to reset Video Processor\n");
-}
-
-static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
-
-void mxr_reg_reset(struct mxr_device *mdev)
-{
- unsigned long flags;
- u32 val; /* value stored to register */
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- /* set output in RGB888 mode */
- mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
-
- /* 16 beat burst in DMA */
- mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
- MXR_STATUS_BURST_MASK);
-
- /* setting default layer priority: layer1 > video > layer0
- * because typical usage scenario would be
- * layer0 - framebuffer
- * video - video overlay
- * layer1 - OSD
- */
- val = MXR_LAYER_CFG_GRP0_VAL(1);
- val |= MXR_LAYER_CFG_VP_VAL(2);
- val |= MXR_LAYER_CFG_GRP1_VAL(3);
- mxr_write(mdev, MXR_LAYER_CFG, val);
-
- /* use dark gray background color */
- mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
- mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
- mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
-
- /* setting graphical layers */
-
- val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
- val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
- val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
-
- /* the same configuration for both layers */
- mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
- mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
-
- /* configuration of Video Processor Registers */
- __mxr_reg_vp_reset(mdev);
- mxr_reg_vp_default_filter(mdev);
-
- /* enable all interrupts */
- mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
- const struct mxr_format *fmt, const struct mxr_geometry *geo)
-{
- u32 val;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- /* setup format */
- mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
- MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
-
- /* setup geometry */
- mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
- val = MXR_GRP_WH_WIDTH(geo->src.width);
- val |= MXR_GRP_WH_HEIGHT(geo->src.height);
- val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
- val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
- mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
-
- /* setup offsets in source image */
- val = MXR_GRP_SXY_SX(geo->src.x_offset);
- val |= MXR_GRP_SXY_SY(geo->src.y_offset);
- mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
-
- /* setup offsets in display image */
- val = MXR_GRP_DXY_DX(geo->dst.x_offset);
- val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
- mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_vp_format(struct mxr_device *mdev,
- const struct mxr_format *fmt, const struct mxr_geometry *geo)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
-
- /* setting size of input image */
- vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
- VP_IMG_VSIZE(geo->src.full_height));
- /* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
- VP_IMG_VSIZE(geo->src.full_height / 2));
-
- vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
- vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
- vp_write(mdev, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(geo->src.x_offset));
- vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
-
- vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
- vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
- if (geo->dst.field == V4L2_FIELD_INTERLACED) {
- vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
- vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
- } else {
- vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
- vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
- }
-
- vp_write(mdev, VP_H_RATIO, geo->x_ratio);
- vp_write(mdev, VP_V_RATIO, geo->y_ratio);
-
- vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-
-}
-
-void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
-{
- u32 val = addr ? ~0 : 0;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- if (idx == 0)
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
- else
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
- mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_vp_buffer(struct mxr_device *mdev,
- dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
-{
- u32 val = luma_addr[0] ? ~0 : 0;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
- vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
- /* TODO: fix tiled mode */
- vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
- vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
- vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
- vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-static void mxr_irq_layer_handle(struct mxr_layer *layer)
-{
- struct list_head *head = &layer->enq_list;
- struct mxr_buffer *done;
-
- /* skip non-existing layer */
- if (layer == NULL)
- return;
-
- spin_lock(&layer->enq_slock);
- if (layer->state == MXR_LAYER_IDLE)
- goto done;
-
- done = layer->shadow_buf;
- layer->shadow_buf = layer->update_buf;
-
- if (list_empty(head)) {
- if (layer->state != MXR_LAYER_STREAMING)
- layer->update_buf = NULL;
- } else {
- struct mxr_buffer *next;
- next = list_first_entry(head, struct mxr_buffer, list);
- list_del(&next->list);
- layer->update_buf = next;
- }
-
- layer->ops.buffer_set(layer, layer->update_buf);
-
- if (done && done != layer->shadow_buf)
- vb2_buffer_done(&done->vb.vb2_buf, VB2_BUF_STATE_DONE);
-
-done:
- spin_unlock(&layer->enq_slock);
-}
-
-irqreturn_t mxr_irq_handler(int irq, void *dev_data)
-{
- struct mxr_device *mdev = dev_data;
- u32 i, val;
-
- spin_lock(&mdev->reg_slock);
- val = mxr_read(mdev, MXR_INT_STATUS);
-
- /* wake up process waiting for VSYNC */
- if (val & MXR_INT_STATUS_VSYNC) {
- set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
- /* toggle TOP field event if working in interlaced mode */
- if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
- change_bit(MXR_EVENT_TOP, &mdev->event_flags);
- wake_up(&mdev->event_queue);
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_STATUS_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
-
- /* clear interrupts */
- mxr_write(mdev, MXR_INT_STATUS, val);
-
- spin_unlock(&mdev->reg_slock);
- /* leave on non-vsync event */
- if (~val & MXR_INT_CLEAR_VSYNC)
- return IRQ_HANDLED;
- /* skip layer update on bottom field */
- if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
- return IRQ_HANDLED;
- for (i = 0; i < MXR_MAX_LAYERS; ++i)
- mxr_irq_layer_handle(mdev->layer[i]);
- return IRQ_HANDLED;
-}
-
-void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
-{
- u32 val;
-
- val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
-}
-
-void mxr_reg_streamon(struct mxr_device *mdev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- /* single write -> no need to block vsync update */
-
- /* start MIXER */
- mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
- set_bit(MXR_EVENT_TOP, &mdev->event_flags);
-
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_streamoff(struct mxr_device *mdev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- /* single write -> no need to block vsync update */
-
- /* stop MIXER */
- mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
-
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-int mxr_reg_wait4vsync(struct mxr_device *mdev)
-{
- long time_left;
-
- clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
- /* TODO: consider adding interruptible */
- time_left = wait_event_timeout(mdev->event_queue,
- test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
- msecs_to_jiffies(1000));
- if (time_left > 0)
- return 0;
- mxr_warn(mdev, "no vsync detected - timeout\n");
- return -ETIME;
-}
-
-void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
- struct v4l2_mbus_framefmt *fmt)
-{
- u32 val = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->reg_slock, flags);
- mxr_vsync_set_update(mdev, MXR_DISABLE);
-
- /* selecting colorspace accepted by output */
- if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
- val |= MXR_CFG_OUT_YUV444;
- else
- val |= MXR_CFG_OUT_RGB888;
-
- /* choosing between interlace and progressive mode */
- if (fmt->field == V4L2_FIELD_INTERLACED)
- val |= MXR_CFG_SCAN_INTERLACE;
- else
- val |= MXR_CFG_SCAN_PROGRASSIVE;
-
- /* choosing between porper HD and SD mode */
- if (fmt->height == 480)
- val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (fmt->height == 576)
- val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (fmt->height == 720)
- val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (fmt->height == 1080)
- val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
- else
- WARN(1, "unrecognized mbus height %u!\n", fmt->height);
-
- mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
- MXR_CFG_OUT_MASK);
-
- val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
- vp_write_mask(mdev, VP_MODE, val,
- VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
-
- mxr_vsync_set_update(mdev, MXR_ENABLE);
- spin_unlock_irqrestore(&mdev->reg_slock, flags);
-}
-
-void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
-{
- /* no extra actions need to be done */
-}
-
-void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
-{
- /* no extra actions need to be done */
-}
-
-static const u8 filter_y_horiz_tap8[] = {
- 0, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 0, 0, 0,
- 0, 2, 4, 5, 6, 6, 6, 6,
- 6, 5, 5, 4, 3, 2, 1, 1,
- 0, -6, -12, -16, -18, -20, -21, -20,
- -20, -18, -16, -13, -10, -8, -5, -2,
- 127, 126, 125, 121, 114, 107, 99, 89,
- 79, 68, 57, 46, 35, 25, 16, 8,
-};
-
-static const u8 filter_y_vert_tap4[] = {
- 0, -3, -6, -8, -8, -8, -8, -7,
- -6, -5, -4, -3, -2, -1, -1, 0,
- 127, 126, 124, 118, 111, 102, 92, 81,
- 70, 59, 48, 37, 27, 19, 11, 5,
- 0, 5, 11, 19, 27, 37, 48, 59,
- 70, 81, 92, 102, 111, 118, 124, 126,
- 0, 0, -1, -1, -2, -3, -4, -5,
- -6, -7, -8, -8, -8, -8, -6, -3,
-};
-
-static const u8 filter_cr_horiz_tap4[] = {
- 0, -3, -6, -8, -8, -8, -8, -7,
- -6, -5, -4, -3, -2, -1, -1, 0,
- 127, 126, 124, 118, 111, 102, 92, 81,
- 70, 59, 48, 37, 27, 19, 11, 5,
-};
-
-static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
- int reg_id, const u8 *data, unsigned int size)
-{
- /* assure 4-byte align */
- BUG_ON(size & 3);
- for (; size; size -= 4, reg_id += 4, data += 4) {
- u32 val = (data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | data[3];
- vp_write(mdev, reg_id, val);
- }
-}
-
-static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
-{
- mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
- filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
- mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
- filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
- mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
- filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
-}
-
-static void mxr_reg_mxr_dump(struct mxr_device *mdev)
-{
-#define DUMPREG(reg_id) \
-do { \
- mxr_dbg(mdev, #reg_id " = %08x\n", \
- (u32)readl(mdev->res.mxr_regs + reg_id)); \
-} while (0)
-
- DUMPREG(MXR_STATUS);
- DUMPREG(MXR_CFG);
- DUMPREG(MXR_INT_EN);
- DUMPREG(MXR_INT_STATUS);
-
- DUMPREG(MXR_LAYER_CFG);
- DUMPREG(MXR_VIDEO_CFG);
-
- DUMPREG(MXR_GRAPHIC0_CFG);
- DUMPREG(MXR_GRAPHIC0_BASE);
- DUMPREG(MXR_GRAPHIC0_SPAN);
- DUMPREG(MXR_GRAPHIC0_WH);
- DUMPREG(MXR_GRAPHIC0_SXY);
- DUMPREG(MXR_GRAPHIC0_DXY);
-
- DUMPREG(MXR_GRAPHIC1_CFG);
- DUMPREG(MXR_GRAPHIC1_BASE);
- DUMPREG(MXR_GRAPHIC1_SPAN);
- DUMPREG(MXR_GRAPHIC1_WH);
- DUMPREG(MXR_GRAPHIC1_SXY);
- DUMPREG(MXR_GRAPHIC1_DXY);
-#undef DUMPREG
-}
-
-static void mxr_reg_vp_dump(struct mxr_device *mdev)
-{
-#define DUMPREG(reg_id) \
-do { \
- mxr_dbg(mdev, #reg_id " = %08x\n", \
- (u32) readl(mdev->res.vp_regs + reg_id)); \
-} while (0)
-
-
- DUMPREG(VP_ENABLE);
- DUMPREG(VP_SRESET);
- DUMPREG(VP_SHADOW_UPDATE);
- DUMPREG(VP_FIELD_ID);
- DUMPREG(VP_MODE);
- DUMPREG(VP_IMG_SIZE_Y);
- DUMPREG(VP_IMG_SIZE_C);
- DUMPREG(VP_PER_RATE_CTRL);
- DUMPREG(VP_TOP_Y_PTR);
- DUMPREG(VP_BOT_Y_PTR);
- DUMPREG(VP_TOP_C_PTR);
- DUMPREG(VP_BOT_C_PTR);
- DUMPREG(VP_ENDIAN_MODE);
- DUMPREG(VP_SRC_H_POSITION);
- DUMPREG(VP_SRC_V_POSITION);
- DUMPREG(VP_SRC_WIDTH);
- DUMPREG(VP_SRC_HEIGHT);
- DUMPREG(VP_DST_H_POSITION);
- DUMPREG(VP_DST_V_POSITION);
- DUMPREG(VP_DST_WIDTH);
- DUMPREG(VP_DST_HEIGHT);
- DUMPREG(VP_H_RATIO);
- DUMPREG(VP_V_RATIO);
-
-#undef DUMPREG
-}
-
-void mxr_reg_dump(struct mxr_device *mdev)
-{
- mxr_reg_mxr_dump(mdev);
- mxr_reg_vp_dump(mdev);
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
deleted file mode 100644
index ee74e2b44d69..000000000000
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#define pr_fmt(fmt) "s5p-tv (mixer): " fmt
-
-#include "mixer.h"
-
-#include <media/v4l2-ioctl.h>
-#include <linux/videodev2.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/timer.h>
-#include <media/videobuf2-dma-contig.h>
-
-static int find_reg_callback(struct device *dev, void *p)
-{
- struct v4l2_subdev **sd = p;
-
- *sd = dev_get_drvdata(dev);
- /* non-zero value stops iteration */
- return 1;
-}
-
-static struct v4l2_subdev *find_and_register_subdev(
- struct mxr_device *mdev, char *module_name)
-{
- struct device_driver *drv;
- struct v4l2_subdev *sd = NULL;
- int ret;
-
- /* TODO: add waiting until probe is finished */
- drv = driver_find(module_name, &platform_bus_type);
- if (!drv) {
- mxr_warn(mdev, "module %s is missing\n", module_name);
- return NULL;
- }
- /* driver refcnt is increased, it is safe to iterate over devices */
- ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
- /* ret == 0 means that find_reg_callback was never executed */
- if (sd == NULL) {
- mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
- goto done;
- }
- /* v4l2_device_register_subdev detects if sd is NULL */
- ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
- if (ret) {
- mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
- sd = NULL;
- }
-
-done:
- return sd;
-}
-
-int mxr_acquire_video(struct mxr_device *mdev,
- struct mxr_output_conf *output_conf, int output_count)
-{
- struct device *dev = mdev->dev;
- struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
- int i;
- int ret = 0;
- struct v4l2_subdev *sd;
-
- strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
- /* prepare context for V4L2 device */
- ret = v4l2_device_register(dev, v4l2_dev);
- if (ret) {
- mxr_err(mdev, "could not register v4l2 device.\n");
- goto fail;
- }
-
- vb2_dma_contig_set_max_seg_size(mdev->dev, DMA_BIT_MASK(32));
-
- /* registering outputs */
- mdev->output_cnt = 0;
- for (i = 0; i < output_count; ++i) {
- struct mxr_output_conf *conf = &output_conf[i];
- struct mxr_output *out;
-
- sd = find_and_register_subdev(mdev, conf->module_name);
- /* trying to register next output */
- if (sd == NULL)
- continue;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
- if (out == NULL) {
- mxr_err(mdev, "no memory for '%s'\n",
- conf->output_name);
- ret = -ENOMEM;
- /* registered subdevs are removed in fail_v4l2_dev */
- goto fail_output;
- }
- strlcpy(out->name, conf->output_name, sizeof(out->name));
- out->sd = sd;
- out->cookie = conf->cookie;
- mdev->output[mdev->output_cnt++] = out;
- mxr_info(mdev, "added output '%s' from module '%s'\n",
- conf->output_name, conf->module_name);
- /* checking if maximal number of outputs is reached */
- if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
- break;
- }
-
- if (mdev->output_cnt == 0) {
- mxr_err(mdev, "failed to register any output\n");
- ret = -ENODEV;
- /* skipping fail_output because there is nothing to free */
- goto fail_v4l2_dev;
- }
-
- return 0;
-
-fail_output:
- /* kfree is NULL-safe */
- for (i = 0; i < mdev->output_cnt; ++i)
- kfree(mdev->output[i]);
- memset(mdev->output, 0, sizeof(mdev->output));
-
-fail_v4l2_dev:
- /* NOTE: automatically unregister all subdevs */
- v4l2_device_unregister(v4l2_dev);
-
-fail:
- return ret;
-}
-
-void mxr_release_video(struct mxr_device *mdev)
-{
- int i;
-
- /* kfree is NULL-safe */
- for (i = 0; i < mdev->output_cnt; ++i)
- kfree(mdev->output[i]);
-
- vb2_dma_contig_clear_max_seg_size(mdev->dev);
- v4l2_device_unregister(&mdev->v4l2_dev);
-}
-
-static int mxr_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
- strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
- strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
- sprintf(cap->bus_info, "%d", layer->idx);
- cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
-{
- mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
- geo->src.full_width, geo->src.full_height);
- mxr_dbg(mdev, "src.size = (%u, %u)\n",
- geo->src.width, geo->src.height);
- mxr_dbg(mdev, "src.offset = (%u, %u)\n",
- geo->src.x_offset, geo->src.y_offset);
- mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
- geo->dst.full_width, geo->dst.full_height);
- mxr_dbg(mdev, "dst.size = (%u, %u)\n",
- geo->dst.width, geo->dst.height);
- mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
- geo->dst.x_offset, geo->dst.y_offset);
- mxr_dbg(mdev, "ratio = (%u, %u)\n",
- geo->x_ratio, geo->y_ratio);
-}
-
-static void mxr_layer_default_geo(struct mxr_layer *layer)
-{
- struct mxr_device *mdev = layer->mdev;
- struct v4l2_mbus_framefmt mbus_fmt;
-
- memset(&layer->geo, 0, sizeof(layer->geo));
-
- mxr_get_mbus_fmt(mdev, &mbus_fmt);
-
- layer->geo.dst.full_width = mbus_fmt.width;
- layer->geo.dst.full_height = mbus_fmt.height;
- layer->geo.dst.width = layer->geo.dst.full_width;
- layer->geo.dst.height = layer->geo.dst.full_height;
- layer->geo.dst.field = mbus_fmt.field;
-
- layer->geo.src.full_width = mbus_fmt.width;
- layer->geo.src.full_height = mbus_fmt.height;
- layer->geo.src.width = layer->geo.src.full_width;
- layer->geo.src.height = layer->geo.src.full_height;
-
- mxr_geometry_dump(mdev, &layer->geo);
- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
- mxr_geometry_dump(mdev, &layer->geo);
-}
-
-static void mxr_layer_update_output(struct mxr_layer *layer)
-{
- struct mxr_device *mdev = layer->mdev;
- struct v4l2_mbus_framefmt mbus_fmt;
-
- mxr_get_mbus_fmt(mdev, &mbus_fmt);
- /* checking if update is needed */
- if (layer->geo.dst.full_width == mbus_fmt.width &&
- layer->geo.dst.full_height == mbus_fmt.width)
- return;
-
- layer->geo.dst.full_width = mbus_fmt.width;
- layer->geo.dst.full_height = mbus_fmt.height;
- layer->geo.dst.field = mbus_fmt.field;
- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
-
- mxr_geometry_dump(mdev, &layer->geo);
-}
-
-static const struct mxr_format *find_format_by_fourcc(
- struct mxr_layer *layer, unsigned long fourcc);
-static const struct mxr_format *find_format_by_index(
- struct mxr_layer *layer, unsigned long index);
-
-static int mxr_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- const struct mxr_format *fmt;
-
- mxr_dbg(mdev, "%s\n", __func__);
- fmt = find_format_by_index(layer, f->index);
- if (fmt == NULL)
- return -EINVAL;
-
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
-
- return 0;
-}
-
-static unsigned int divup(unsigned int divident, unsigned int divisor)
-{
- return (divident + divisor - 1) / divisor;
-}
-
-unsigned long mxr_get_plane_size(const struct mxr_block *blk,
- unsigned int width, unsigned int height)
-{
- unsigned int bl_width = divup(width, blk->width);
- unsigned int bl_height = divup(height, blk->height);
-
- return bl_width * bl_height * blk->size;
-}
-
-static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
- const struct mxr_format *fmt, u32 width, u32 height)
-{
- int i;
-
- /* checking if nothing to fill */
- if (!planes)
- return;
-
- memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
- for (i = 0; i < fmt->num_planes; ++i) {
- struct v4l2_plane_pix_format *plane = planes
- + fmt->plane2subframe[i];
- const struct mxr_block *blk = &fmt->plane[i];
- u32 bl_width = divup(width, blk->width);
- u32 bl_height = divup(height, blk->height);
- u32 sizeimage = bl_width * bl_height * blk->size;
- u32 bytesperline = bl_width * blk->size / blk->height;
-
- plane->sizeimage += sizeimage;
- plane->bytesperline = max(plane->bytesperline, bytesperline);
- }
-}
-
-static int mxr_g_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
- pix->width = layer->geo.src.full_width;
- pix->height = layer->geo.src.full_height;
- pix->field = V4L2_FIELD_NONE;
- pix->pixelformat = layer->fmt->fourcc;
- pix->colorspace = layer->fmt->colorspace;
- mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
-
- return 0;
-}
-
-static int mxr_s_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct mxr_layer *layer = video_drvdata(file);
- const struct mxr_format *fmt;
- struct v4l2_pix_format_mplane *pix;
- struct mxr_device *mdev = layer->mdev;
- struct mxr_geometry *geo = &layer->geo;
-
- mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
-
- pix = &f->fmt.pix_mp;
- fmt = find_format_by_fourcc(layer, pix->pixelformat);
- if (fmt == NULL) {
- mxr_warn(mdev, "not recognized fourcc: %08x\n",
- pix->pixelformat);
- return -EINVAL;
- }
- layer->fmt = fmt;
- /* set source size to highest accepted value */
- geo->src.full_width = max(geo->dst.full_width, pix->width);
- geo->src.full_height = max(geo->dst.full_height, pix->height);
- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
- mxr_geometry_dump(mdev, &layer->geo);
- /* set cropping to total visible screen */
- geo->src.width = pix->width;
- geo->src.height = pix->height;
- geo->src.x_offset = 0;
- geo->src.y_offset = 0;
- /* assure consistency of geometry */
- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
- mxr_geometry_dump(mdev, &layer->geo);
- /* set full size to lowest possible value */
- geo->src.full_width = 0;
- geo->src.full_height = 0;
- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
- mxr_geometry_dump(mdev, &layer->geo);
-
- /* returning results */
- mxr_g_fmt(file, priv, f);
-
- return 0;
-}
-
-static int mxr_g_selection(struct file *file, void *fh,
- struct v4l2_selection *s)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_geometry *geo = &layer->geo;
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return -EINVAL;
-
- switch (s->target) {
- case V4L2_SEL_TGT_CROP:
- s->r.left = geo->src.x_offset;
- s->r.top = geo->src.y_offset;
- s->r.width = geo->src.width;
- s->r.height = geo->src.height;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- s->r.left = 0;
- s->r.top = 0;
- s->r.width = geo->src.full_width;
- s->r.height = geo->src.full_height;
- break;
- case V4L2_SEL_TGT_COMPOSE:
- case V4L2_SEL_TGT_COMPOSE_PADDED:
- s->r.left = geo->dst.x_offset;
- s->r.top = geo->dst.y_offset;
- s->r.width = geo->dst.width;
- s->r.height = geo->dst.height;
- break;
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- s->r.left = 0;
- s->r.top = 0;
- s->r.width = geo->dst.full_width;
- s->r.height = geo->dst.full_height;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* returns 1 if rectangle 'a' is inside 'b' */
-static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
-{
- if (a->left < b->left)
- return 0;
- if (a->top < b->top)
- return 0;
- if (a->left + a->width > b->left + b->width)
- return 0;
- if (a->top + a->height > b->top + b->height)
- return 0;
- return 1;
-}
-
-static int mxr_s_selection(struct file *file, void *fh,
- struct v4l2_selection *s)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_geometry *geo = &layer->geo;
- struct mxr_crop *target = NULL;
- enum mxr_geometry_stage stage;
- struct mxr_geometry tmp;
- struct v4l2_rect res;
-
- memset(&res, 0, sizeof(res));
-
- mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
- s->r.width, s->r.height, s->r.left, s->r.top);
-
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return -EINVAL;
-
- switch (s->target) {
- /* ignore read-only targets */
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- res.width = geo->src.full_width;
- res.height = geo->src.full_height;
- break;
-
- /* ignore read-only targets */
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- res.width = geo->dst.full_width;
- res.height = geo->dst.full_height;
- break;
-
- case V4L2_SEL_TGT_CROP:
- target = &geo->src;
- stage = MXR_GEOMETRY_CROP;
- break;
- case V4L2_SEL_TGT_COMPOSE:
- case V4L2_SEL_TGT_COMPOSE_PADDED:
- target = &geo->dst;
- stage = MXR_GEOMETRY_COMPOSE;
- break;
- default:
- return -EINVAL;
- }
- /* apply change and update geometry if needed */
- if (target) {
- /* backup current geometry if setup fails */
- memcpy(&tmp, geo, sizeof(tmp));
-
- /* apply requested selection */
- target->x_offset = s->r.left;
- target->y_offset = s->r.top;
- target->width = s->r.width;
- target->height = s->r.height;
-
- layer->ops.fix_geometry(layer, stage, s->flags);
-
- /* retrieve update selection rectangle */
- res.left = target->x_offset;
- res.top = target->y_offset;
- res.width = target->width;
- res.height = target->height;
-
- mxr_geometry_dump(layer->mdev, &layer->geo);
- }
-
- /* checking if the rectangle satisfies constraints */
- if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
- goto fail;
- if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
- goto fail;
-
- /* return result rectangle */
- s->r = res;
-
- return 0;
-fail:
- /* restore old geometry, which is not touched if target is NULL */
- if (target)
- memcpy(geo, &tmp, sizeof(tmp));
- return -ERANGE;
-}
-
-static int mxr_enum_dv_timings(struct file *file, void *fh,
- struct v4l2_enum_dv_timings *timings)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- timings->pad = 0;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
- ret = v4l2_subdev_call(to_outsd(mdev), pad, enum_dv_timings, timings);
- mutex_unlock(&mdev->mutex);
-
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_s_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
-
- /* timings change cannot be done while there is an entity
- * dependent on output configuration
- */
- if (mdev->n_output > 0) {
- mutex_unlock(&mdev->mutex);
- return -EBUSY;
- }
-
- ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
-
- mutex_unlock(&mdev->mutex);
-
- mxr_layer_update_output(layer);
-
- /* any failure should return EINVAL according to V4L2 doc */
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_g_dv_timings(struct file *file, void *fh,
- struct v4l2_dv_timings *timings)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
- ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
- mutex_unlock(&mdev->mutex);
-
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_dv_timings_cap(struct file *file, void *fh,
- struct v4l2_dv_timings_cap *cap)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- cap->pad = 0;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
- ret = v4l2_subdev_call(to_outsd(mdev), pad, dv_timings_cap, cap);
- mutex_unlock(&mdev->mutex);
-
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
-
- /* standard change cannot be done while there is an entity
- * dependent on output configuration
- */
- if (mdev->n_output > 0) {
- mutex_unlock(&mdev->mutex);
- return -EBUSY;
- }
-
- ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
-
- mutex_unlock(&mdev->mutex);
-
- mxr_layer_update_output(layer);
-
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- /* lock protects from changing sd_out */
- mutex_lock(&mdev->mutex);
- ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
- mutex_unlock(&mdev->mutex);
-
- return ret ? -EINVAL : 0;
-}
-
-static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- struct mxr_output *out;
- struct v4l2_subdev *sd;
-
- if (a->index >= mdev->output_cnt)
- return -EINVAL;
- out = mdev->output[a->index];
- BUG_ON(out == NULL);
- sd = out->sd;
- strlcpy(a->name, out->name, sizeof(a->name));
-
- /* try to obtain supported tv norms */
- v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
- a->capabilities = 0;
- if (sd->ops->video && sd->ops->video->s_dv_timings)
- a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
- if (sd->ops->video && sd->ops->video->s_std_output)
- a->capabilities |= V4L2_OUT_CAP_STD;
- a->type = V4L2_OUTPUT_TYPE_ANALOG;
-
- return 0;
-}
-
-static int mxr_s_output(struct file *file, void *fh, unsigned int i)
-{
- struct video_device *vfd = video_devdata(file);
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
-
- if (i >= mdev->output_cnt || mdev->output[i] == NULL)
- return -EINVAL;
-
- mutex_lock(&mdev->mutex);
- if (mdev->n_output > 0) {
- mutex_unlock(&mdev->mutex);
- return -EBUSY;
- }
- mdev->current_output = i;
- vfd->tvnorms = 0;
- v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
- &vfd->tvnorms);
- mutex_unlock(&mdev->mutex);
-
- /* update layers geometry */
- mxr_layer_update_output(layer);
-
- mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
-
- return 0;
-}
-
-static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
-
- mutex_lock(&mdev->mutex);
- *p = mdev->current_output;
- mutex_unlock(&mdev->mutex);
-
- return 0;
-}
-
-static int mxr_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_reqbufs(&layer->vb_queue, p);
-}
-
-static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_querybuf(&layer->vb_queue, p);
-}
-
-static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
- return vb2_qbuf(&layer->vb_queue, p);
-}
-
-static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
-}
-
-static int mxr_expbuf(struct file *file, void *priv,
- struct v4l2_exportbuffer *eb)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_expbuf(&layer->vb_queue, eb);
-}
-
-static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_streamon(&layer->vb_queue, i);
-}
-
-static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- return vb2_streamoff(&layer->vb_queue, i);
-}
-
-static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
- .vidioc_querycap = mxr_querycap,
- /* format handling */
- .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
- .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
- .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
- /* buffer control */
- .vidioc_reqbufs = mxr_reqbufs,
- .vidioc_querybuf = mxr_querybuf,
- .vidioc_qbuf = mxr_qbuf,
- .vidioc_dqbuf = mxr_dqbuf,
- .vidioc_expbuf = mxr_expbuf,
- /* Streaming control */
- .vidioc_streamon = mxr_streamon,
- .vidioc_streamoff = mxr_streamoff,
- /* DV Timings functions */
- .vidioc_enum_dv_timings = mxr_enum_dv_timings,
- .vidioc_s_dv_timings = mxr_s_dv_timings,
- .vidioc_g_dv_timings = mxr_g_dv_timings,
- .vidioc_dv_timings_cap = mxr_dv_timings_cap,
- /* analog TV standard functions */
- .vidioc_s_std = mxr_s_std,
- .vidioc_g_std = mxr_g_std,
- /* Output handling */
- .vidioc_enum_output = mxr_enum_output,
- .vidioc_s_output = mxr_s_output,
- .vidioc_g_output = mxr_g_output,
- /* selection ioctls */
- .vidioc_g_selection = mxr_g_selection,
- .vidioc_s_selection = mxr_s_selection,
-};
-
-static int mxr_video_open(struct file *file)
-{
- struct mxr_layer *layer = video_drvdata(file);
- struct mxr_device *mdev = layer->mdev;
- int ret = 0;
-
- mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
- if (mutex_lock_interruptible(&layer->mutex))
- return -ERESTARTSYS;
- /* assure device probe is finished */
- wait_for_device_probe();
- /* creating context for file descriptor */
- ret = v4l2_fh_open(file);
- if (ret) {
- mxr_err(mdev, "v4l2_fh_open failed\n");
- goto unlock;
- }
-
- /* leaving if layer is already initialized */
- if (!v4l2_fh_is_singular_file(file))
- goto unlock;
-
- /* FIXME: should power be enabled on open? */
- ret = mxr_power_get(mdev);
- if (ret) {
- mxr_err(mdev, "power on failed\n");
- goto fail_fh_open;
- }
-
- ret = vb2_queue_init(&layer->vb_queue);
- if (ret != 0) {
- mxr_err(mdev, "failed to initialize vb2 queue\n");
- goto fail_power;
- }
- /* set default format, first on the list */
- layer->fmt = layer->fmt_array[0];
- /* setup default geometry */
- mxr_layer_default_geo(layer);
- mutex_unlock(&layer->mutex);
-
- return 0;
-
-fail_power:
- mxr_power_put(mdev);
-
-fail_fh_open:
- v4l2_fh_release(file);
-
-unlock:
- mutex_unlock(&layer->mutex);
-
- return ret;
-}
-
-static unsigned int
-mxr_video_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct mxr_layer *layer = video_drvdata(file);
- unsigned int res;
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
- mutex_lock(&layer->mutex);
- res = vb2_poll(&layer->vb_queue, file, wait);
- mutex_unlock(&layer->mutex);
- return res;
-}
-
-static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct mxr_layer *layer = video_drvdata(file);
- int ret;
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
-
- if (mutex_lock_interruptible(&layer->mutex))
- return -ERESTARTSYS;
- ret = vb2_mmap(&layer->vb_queue, vma);
- mutex_unlock(&layer->mutex);
- return ret;
-}
-
-static int mxr_video_release(struct file *file)
-{
- struct mxr_layer *layer = video_drvdata(file);
-
- mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
- mutex_lock(&layer->mutex);
- if (v4l2_fh_is_singular_file(file)) {
- vb2_queue_release(&layer->vb_queue);
- mxr_power_put(layer->mdev);
- }
- v4l2_fh_release(file);
- mutex_unlock(&layer->mutex);
- return 0;
-}
-
-static const struct v4l2_file_operations mxr_fops = {
- .owner = THIS_MODULE,
- .open = mxr_video_open,
- .poll = mxr_video_poll,
- .mmap = mxr_video_mmap,
- .release = mxr_video_release,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static int queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
- struct device *alloc_devs[])
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
- const struct mxr_format *fmt = layer->fmt;
- int i;
- struct mxr_device *mdev = layer->mdev;
- struct v4l2_plane_pix_format planes[3];
-
- mxr_dbg(mdev, "%s\n", __func__);
- /* checking if format was configured */
- if (fmt == NULL)
- return -EINVAL;
- mxr_dbg(mdev, "fmt = %s\n", fmt->name);
- mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
- layer->geo.src.full_height);
-
- *nplanes = fmt->num_subframes;
- for (i = 0; i < fmt->num_subframes; ++i) {
- sizes[i] = planes[i].sizeimage;
- mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
- }
-
- if (*nbuffers == 0)
- *nbuffers = 1;
-
- return 0;
-}
-
-static void buf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
- struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
- struct mxr_device *mdev = layer->mdev;
- unsigned long flags;
-
- spin_lock_irqsave(&layer->enq_slock, flags);
- list_add_tail(&buffer->list, &layer->enq_list);
- spin_unlock_irqrestore(&layer->enq_slock, flags);
-
- mxr_dbg(mdev, "queuing buffer\n");
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
- struct mxr_device *mdev = layer->mdev;
- unsigned long flags;
-
- mxr_dbg(mdev, "%s\n", __func__);
-
- /* block any changes in output configuration */
- mxr_output_get(mdev);
-
- mxr_layer_update_output(layer);
- layer->ops.format_set(layer);
- /* enabling layer in hardware */
- spin_lock_irqsave(&layer->enq_slock, flags);
- layer->state = MXR_LAYER_STREAMING;
- spin_unlock_irqrestore(&layer->enq_slock, flags);
-
- layer->ops.stream_set(layer, MXR_ENABLE);
- mxr_streamer_get(mdev);
-
- return 0;
-}
-
-static void mxr_watchdog(unsigned long arg)
-{
- struct mxr_layer *layer = (struct mxr_layer *) arg;
- struct mxr_device *mdev = layer->mdev;
- unsigned long flags;
-
- mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
-
- spin_lock_irqsave(&layer->enq_slock, flags);
-
- if (layer->update_buf == layer->shadow_buf)
- layer->update_buf = NULL;
- if (layer->update_buf) {
- vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- layer->update_buf = NULL;
- }
- if (layer->shadow_buf) {
- vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- layer->shadow_buf = NULL;
- }
- spin_unlock_irqrestore(&layer->enq_slock, flags);
-}
-
-static void stop_streaming(struct vb2_queue *vq)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
- struct mxr_device *mdev = layer->mdev;
- unsigned long flags;
- struct timer_list watchdog;
- struct mxr_buffer *buf, *buf_tmp;
-
- mxr_dbg(mdev, "%s\n", __func__);
-
- spin_lock_irqsave(&layer->enq_slock, flags);
-
- /* reset list */
- layer->state = MXR_LAYER_STREAMING_FINISH;
-
- /* set all buffer to be done */
- list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
-
- spin_unlock_irqrestore(&layer->enq_slock, flags);
-
- /* give 1 seconds to complete to complete last buffers */
- setup_timer_on_stack(&watchdog, mxr_watchdog,
- (unsigned long)layer);
- mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
-
- /* wait until all buffers are goes to done state */
- vb2_wait_for_all_buffers(vq);
-
- /* stop timer if all synchronization is done */
- del_timer_sync(&watchdog);
- destroy_timer_on_stack(&watchdog);
-
- /* stopping hardware */
- spin_lock_irqsave(&layer->enq_slock, flags);
- layer->state = MXR_LAYER_IDLE;
- spin_unlock_irqrestore(&layer->enq_slock, flags);
-
- /* disabling layer in hardware */
- layer->ops.stream_set(layer, MXR_DISABLE);
- /* remove one streamer */
- mxr_streamer_put(mdev);
- /* allow changes in output configuration */
- mxr_output_put(mdev);
-}
-
-static struct vb2_ops mxr_video_qops = {
- .queue_setup = queue_setup,
- .buf_queue = buf_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
-};
-
-/* FIXME: try to put this functions to mxr_base_layer_create */
-int mxr_base_layer_register(struct mxr_layer *layer)
-{
- struct mxr_device *mdev = layer->mdev;
- int ret;
-
- ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
- if (ret)
- mxr_err(mdev, "failed to register video device\n");
- else
- mxr_info(mdev, "registered layer %s as /dev/video%d\n",
- layer->vfd.name, layer->vfd.num);
- return ret;
-}
-
-void mxr_base_layer_unregister(struct mxr_layer *layer)
-{
- video_unregister_device(&layer->vfd);
-}
-
-void mxr_layer_release(struct mxr_layer *layer)
-{
- if (layer->ops.release)
- layer->ops.release(layer);
-}
-
-void mxr_base_layer_release(struct mxr_layer *layer)
-{
- kfree(layer);
-}
-
-static void mxr_vfd_release(struct video_device *vdev)
-{
- pr_info("video device release\n");
-}
-
-struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
- int idx, char *name, const struct mxr_layer_ops *ops)
-{
- struct mxr_layer *layer;
-
- layer = kzalloc(sizeof(*layer), GFP_KERNEL);
- if (layer == NULL) {
- mxr_err(mdev, "not enough memory for layer.\n");
- goto fail;
- }
-
- layer->mdev = mdev;
- layer->idx = idx;
- layer->ops = *ops;
-
- spin_lock_init(&layer->enq_slock);
- INIT_LIST_HEAD(&layer->enq_list);
- mutex_init(&layer->mutex);
-
- layer->vfd = (struct video_device) {
- .minor = -1,
- .release = mxr_vfd_release,
- .fops = &mxr_fops,
- .vfl_dir = VFL_DIR_TX,
- .ioctl_ops = &mxr_ioctl_ops,
- };
- strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
-
- video_set_drvdata(&layer->vfd, layer);
- layer->vfd.lock = &layer->mutex;
- layer->vfd.v4l2_dev = &mdev->v4l2_dev;
-
- layer->vb_queue = (struct vb2_queue) {
- .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
- .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
- .drv_priv = layer,
- .buf_struct_size = sizeof(struct mxr_buffer),
- .ops = &mxr_video_qops,
- .min_buffers_needed = 1,
- .mem_ops = &vb2_dma_contig_memops,
- .lock = &layer->mutex,
- .dev = mdev->dev,
- };
-
- return layer;
-
-fail:
- return NULL;
-}
-
-static const struct mxr_format *find_format_by_fourcc(
- struct mxr_layer *layer, unsigned long fourcc)
-{
- int i;
-
- for (i = 0; i < layer->fmt_array_size; ++i)
- if (layer->fmt_array[i]->fourcc == fourcc)
- return layer->fmt_array[i];
- return NULL;
-}
-
-static const struct mxr_format *find_format_by_index(
- struct mxr_layer *layer, unsigned long index)
-{
- if (index >= layer->fmt_array_size)
- return NULL;
- return layer->fmt_array[index];
-}
-
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
deleted file mode 100644
index 6fa6f673f53b..000000000000
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Samsung TV Mixer driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include "mixer.h"
-
-#include "regs-vp.h"
-
-#include <media/videobuf2-dma-contig.h>
-
-/* FORMAT DEFINITIONS */
-static const struct mxr_format mxr_fmt_nv12 = {
- .name = "NV12",
- .fourcc = V4L2_PIX_FMT_NV12,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .num_planes = 2,
- .plane = {
- { .width = 1, .height = 1, .size = 1 },
- { .width = 2, .height = 2, .size = 2 },
- },
- .num_subframes = 1,
- .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv21 = {
- .name = "NV21",
- .fourcc = V4L2_PIX_FMT_NV21,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .num_planes = 2,
- .plane = {
- { .width = 1, .height = 1, .size = 1 },
- { .width = 2, .height = 2, .size = 2 },
- },
- .num_subframes = 1,
- .cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv12m = {
- .name = "NV12 (mplane)",
- .fourcc = V4L2_PIX_FMT_NV12M,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .num_planes = 2,
- .plane = {
- { .width = 1, .height = 1, .size = 1 },
- { .width = 2, .height = 2, .size = 2 },
- },
- .num_subframes = 2,
- .plane2subframe = {0, 1},
- .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
-};
-
-static const struct mxr_format mxr_fmt_nv12mt = {
- .name = "NV12 tiled (mplane)",
- .fourcc = V4L2_PIX_FMT_NV12MT,
- .colorspace = V4L2_COLORSPACE_JPEG,
- .num_planes = 2,
- .plane = {
- { .width = 128, .height = 32, .size = 4096 },
- { .width = 128, .height = 32, .size = 2048 },
- },
- .num_subframes = 2,
- .plane2subframe = {0, 1},
- .cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
-};
-
-static const struct mxr_format *mxr_video_format[] = {
- &mxr_fmt_nv12,
- &mxr_fmt_nv21,
- &mxr_fmt_nv12m,
- &mxr_fmt_nv12mt,
-};
-
-/* AUXILIARY CALLBACKS */
-
-static void mxr_vp_layer_release(struct mxr_layer *layer)
-{
- mxr_base_layer_unregister(layer);
- mxr_base_layer_release(layer);
-}
-
-static void mxr_vp_buffer_set(struct mxr_layer *layer,
- struct mxr_buffer *buf)
-{
- dma_addr_t luma_addr[2] = {0, 0};
- dma_addr_t chroma_addr[2] = {0, 0};
-
- if (buf == NULL) {
- mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
- return;
- }
- luma_addr[0] = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
- if (layer->fmt->num_subframes == 2) {
- chroma_addr[0] =
- vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 1);
- } else {
- /* FIXME: mxr_get_plane_size compute integer division,
- * which is slow and should not be performed in interrupt */
- chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
- &layer->fmt->plane[0], layer->geo.src.full_width,
- layer->geo.src.full_height);
- }
- if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
- luma_addr[1] = luma_addr[0] + 0x40;
- chroma_addr[1] = chroma_addr[0] + 0x40;
- } else {
- luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
- chroma_addr[1] = chroma_addr[0];
- }
- mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
-}
-
-static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
-{
- mxr_reg_vp_layer_stream(layer->mdev, en);
-}
-
-static void mxr_vp_format_set(struct mxr_layer *layer)
-{
- mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
-}
-
-static inline unsigned int do_center(unsigned int center,
- unsigned int size, unsigned int upper, unsigned int flags)
-{
- unsigned int lower;
-
- if (flags & MXR_NO_OFFSET)
- return 0;
-
- lower = center - min(center, size / 2);
- return min(lower, upper - size);
-}
-
-static void mxr_vp_fix_geometry(struct mxr_layer *layer,
- enum mxr_geometry_stage stage, unsigned long flags)
-{
- struct mxr_geometry *geo = &layer->geo;
- struct mxr_crop *src = &geo->src;
- struct mxr_crop *dst = &geo->dst;
- unsigned long x_center, y_center;
-
- switch (stage) {
-
- case MXR_GEOMETRY_SINK: /* nothing to be fixed here */
- case MXR_GEOMETRY_COMPOSE:
- /* remember center of the area */
- x_center = dst->x_offset + dst->width / 2;
- y_center = dst->y_offset + dst->height / 2;
-
- /* ensure that compose is reachable using 16x scaling */
- dst->width = clamp(dst->width, 8U, 16 * src->full_width);
- dst->height = clamp(dst->height, 1U, 16 * src->full_height);
-
- /* setup offsets */
- dst->x_offset = do_center(x_center, dst->width,
- dst->full_width, flags);
- dst->y_offset = do_center(y_center, dst->height,
- dst->full_height, flags);
- flags = 0; /* remove possible MXR_NO_OFFSET flag */
- /* fall through */
- case MXR_GEOMETRY_CROP:
- /* remember center of the area */
- x_center = src->x_offset + src->width / 2;
- y_center = src->y_offset + src->height / 2;
-
- /* ensure scaling is between 0.25x .. 16x */
- src->width = clamp(src->width, round_up(dst->width / 16, 4),
- dst->width * 4);
- src->height = clamp(src->height, round_up(dst->height / 16, 4),
- dst->height * 4);
-
- /* hardware limits */
- src->width = clamp(src->width, 32U, 2047U);
- src->height = clamp(src->height, 4U, 2047U);
-
- /* setup offsets */
- src->x_offset = do_center(x_center, src->width,
- src->full_width, flags);
- src->y_offset = do_center(y_center, src->height,
- src->full_height, flags);
-
- /* setting scaling ratio */
- geo->x_ratio = (src->width << 16) / dst->width;
- geo->y_ratio = (src->height << 16) / dst->height;
- /* fall through */
-
- case MXR_GEOMETRY_SOURCE:
- src->full_width = clamp(src->full_width,
- ALIGN(src->width + src->x_offset, 8), 8192U);
- src->full_height = clamp(src->full_height,
- src->height + src->y_offset, 8192U);
- }
-}
-
-/* PUBLIC API */
-
-struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
-{
- struct mxr_layer *layer;
- int ret;
- const struct mxr_layer_ops ops = {
- .release = mxr_vp_layer_release,
- .buffer_set = mxr_vp_buffer_set,
- .stream_set = mxr_vp_stream_set,
- .format_set = mxr_vp_format_set,
- .fix_geometry = mxr_vp_fix_geometry,
- };
- char name[32];
-
- sprintf(name, "video%d", idx);
-
- layer = mxr_base_layer_create(mdev, idx, name, &ops);
- if (layer == NULL) {
- mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
- goto fail;
- }
-
- layer->fmt_array = mxr_video_format;
- layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
-
- ret = mxr_base_layer_register(layer);
- if (ret)
- goto fail_layer;
-
- return layer;
-
-fail_layer:
- mxr_base_layer_release(layer);
-
-fail:
- return NULL;
-}
-
diff --git a/drivers/media/platform/s5p-tv/regs-hdmi.h b/drivers/media/platform/s5p-tv/regs-hdmi.h
deleted file mode 100644
index a889d1f57f28..000000000000
--- a/drivers/media/platform/s5p-tv/regs-hdmi.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * HDMI register header file for Samsung TVOUT driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef SAMSUNG_REGS_HDMI_H
-#define SAMSUNG_REGS_HDMI_H
-
-/*
- * Register part
-*/
-
-#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
-#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
-#define HDMI_TG_BASE(x) ((x) + 0x00050000)
-
-/* Control registers */
-#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
-#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
-#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
-#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
-#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
-#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
-#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
-
-/* Core registers */
-#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
-#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
-#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
-#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
-#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
-#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
-#define HDMI_HPD HDMI_CORE_BASE(0x0030)
-#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
-#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
-#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
-#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
-#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
-#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
-#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
-#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
-#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
-#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
-#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
-#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
-#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
-#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
-#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
-#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
-#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
-#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
-#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
-#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
-#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
-#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
-#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
-#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
-#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
-#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
-#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
-#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
-#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
-#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
-#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
-#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
-#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
-#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
-
-/* Timing generator registers */
-#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
-#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
-#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
-#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
-#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
-#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
-#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
-#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
-#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
-#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
-#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
-#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
-#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
-#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
-#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
-#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
-#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
-#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
-#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
-#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
-#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
-#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
-#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
-#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
-#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
-#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
-#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
-#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
-#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
-
-/*
- * Bit definition part
- */
-
-/* HDMI_INTC_CON */
-#define HDMI_INTC_EN_GLOBAL (1 << 6)
-#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
-#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
-
-/* HDMI_INTC_FLAG */
-#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
-#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
-
-/* HDMI_PHY_RSTOUT */
-#define HDMI_PHY_SW_RSTOUT (1 << 0)
-
-/* HDMI_CORE_RSTOUT */
-#define HDMI_CORE_SW_RSTOUT (1 << 0)
-
-/* HDMI_CON_0 */
-#define HDMI_BLUE_SCR_EN (1 << 5)
-#define HDMI_EN (1 << 0)
-
-/* HDMI_CON_2 */
-#define HDMI_DVI_PERAMBLE_EN (1 << 5)
-#define HDMI_DVI_BAND_EN (1 << 1)
-
-/* HDMI_PHY_STATUS */
-#define HDMI_PHY_STATUS_READY (1 << 0)
-
-/* HDMI_MODE_SEL */
-#define HDMI_MODE_HDMI_EN (1 << 1)
-#define HDMI_MODE_DVI_EN (1 << 0)
-#define HDMI_MODE_MASK (3 << 0)
-
-/* HDMI_TG_CMD */
-#define HDMI_TG_FIELD_EN (1 << 1)
-#define HDMI_TG_EN (1 << 0)
-
-#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/platform/s5p-tv/regs-mixer.h b/drivers/media/platform/s5p-tv/regs-mixer.h
deleted file mode 100644
index 158abb43d0a4..000000000000
--- a/drivers/media/platform/s5p-tv/regs-mixer.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Mixer register header file for Samsung Mixer driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-#ifndef SAMSUNG_REGS_MIXER_H
-#define SAMSUNG_REGS_MIXER_H
-
-/*
- * Register part
- */
-#define MXR_STATUS 0x0000
-#define MXR_CFG 0x0004
-#define MXR_INT_EN 0x0008
-#define MXR_INT_STATUS 0x000C
-#define MXR_LAYER_CFG 0x0010
-#define MXR_VIDEO_CFG 0x0014
-#define MXR_GRAPHIC0_CFG 0x0020
-#define MXR_GRAPHIC0_BASE 0x0024
-#define MXR_GRAPHIC0_SPAN 0x0028
-#define MXR_GRAPHIC0_SXY 0x002C
-#define MXR_GRAPHIC0_WH 0x0030
-#define MXR_GRAPHIC0_DXY 0x0034
-#define MXR_GRAPHIC0_BLANK 0x0038
-#define MXR_GRAPHIC1_CFG 0x0040
-#define MXR_GRAPHIC1_BASE 0x0044
-#define MXR_GRAPHIC1_SPAN 0x0048
-#define MXR_GRAPHIC1_SXY 0x004C
-#define MXR_GRAPHIC1_WH 0x0050
-#define MXR_GRAPHIC1_DXY 0x0054
-#define MXR_GRAPHIC1_BLANK 0x0058
-#define MXR_BG_CFG 0x0060
-#define MXR_BG_COLOR0 0x0064
-#define MXR_BG_COLOR1 0x0068
-#define MXR_BG_COLOR2 0x006C
-
-/* for parametrized access to layer registers */
-#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
-#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
-#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
-#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
-#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
-#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
-
-/*
- * Bit definition part
- */
-
-/* generates mask for range of bits */
-#define MXR_MASK(high_bit, low_bit) \
- (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
-
-#define MXR_MASK_VAL(val, high_bit, low_bit) \
- (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
-
-/* bits for MXR_STATUS */
-#define MXR_STATUS_16_BURST (1 << 7)
-#define MXR_STATUS_BURST_MASK (1 << 7)
-#define MXR_STATUS_SYNC_ENABLE (1 << 2)
-#define MXR_STATUS_REG_RUN (1 << 0)
-
-/* bits for MXR_CFG */
-#define MXR_CFG_OUT_YUV444 (0 << 8)
-#define MXR_CFG_OUT_RGB888 (1 << 8)
-#define MXR_CFG_OUT_MASK (1 << 8)
-#define MXR_CFG_DST_SDO (0 << 7)
-#define MXR_CFG_DST_HDMI (1 << 7)
-#define MXR_CFG_DST_MASK (1 << 7)
-#define MXR_CFG_SCAN_HD_720 (0 << 6)
-#define MXR_CFG_SCAN_HD_1080 (1 << 6)
-#define MXR_CFG_GRP1_ENABLE (1 << 5)
-#define MXR_CFG_GRP0_ENABLE (1 << 4)
-#define MXR_CFG_VP_ENABLE (1 << 3)
-#define MXR_CFG_SCAN_INTERLACE (0 << 2)
-#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
-#define MXR_CFG_SCAN_NTSC (0 << 1)
-#define MXR_CFG_SCAN_PAL (1 << 1)
-#define MXR_CFG_SCAN_SD (0 << 0)
-#define MXR_CFG_SCAN_HD (1 << 0)
-#define MXR_CFG_SCAN_MASK 0x47
-
-/* bits for MXR_GRAPHICn_CFG */
-#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
-#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
-#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
-#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
-#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
-
-/* bits for MXR_GRAPHICn_WH */
-#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
-#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
-#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_GRAPHICn_SXY */
-#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_GRAPHICn_DXY */
-#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
-#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
-
-/* bits for MXR_INT_EN */
-#define MXR_INT_EN_VSYNC (1 << 11)
-#define MXR_INT_EN_ALL (0x0f << 8)
-
-/* bit for MXR_INT_STATUS */
-#define MXR_INT_CLEAR_VSYNC (1 << 11)
-#define MXR_INT_STATUS_VSYNC (1 << 0)
-
-/* bit for MXR_LAYER_CFG */
-#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
-#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
-#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
-
-#endif /* SAMSUNG_REGS_MIXER_H */
-
diff --git a/drivers/media/platform/s5p-tv/regs-sdo.h b/drivers/media/platform/s5p-tv/regs-sdo.h
deleted file mode 100644
index 6f22fbfe2f6c..000000000000
--- a/drivers/media/platform/s5p-tv/regs-sdo.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* drivers/media/platform/s5p-tv/regs-sdo.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * SDO register description file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SAMSUNG_REGS_SDO_H
-#define SAMSUNG_REGS_SDO_H
-
-/*
- * Register part
- */
-
-#define SDO_CLKCON 0x0000
-#define SDO_CONFIG 0x0008
-#define SDO_VBI 0x0014
-#define SDO_DAC 0x003C
-#define SDO_CCCON 0x0180
-#define SDO_IRQ 0x0280
-#define SDO_IRQMASK 0x0284
-#define SDO_VERSION 0x03D8
-
-/*
- * Bit definition part
- */
-
-/* SDO Clock Control Register (SDO_CLKCON) */
-#define SDO_TVOUT_SW_RESET (1 << 4)
-#define SDO_TVOUT_CLOCK_READY (1 << 1)
-#define SDO_TVOUT_CLOCK_ON (1 << 0)
-
-/* SDO Video Standard Configuration Register (SDO_CONFIG) */
-#define SDO_PROGRESSIVE (1 << 4)
-#define SDO_NTSC_M 0
-#define SDO_PAL_M 1
-#define SDO_PAL_BGHID 2
-#define SDO_PAL_N 3
-#define SDO_PAL_NC 4
-#define SDO_NTSC_443 8
-#define SDO_PAL_60 9
-#define SDO_STANDARD_MASK 0xf
-
-/* SDO VBI Configuration Register (SDO_VBI) */
-#define SDO_CVBS_WSS_INS (1 << 14)
-#define SDO_CVBS_CLOSED_CAPTION_MASK (3 << 12)
-
-/* SDO DAC Configuration Register (SDO_DAC) */
-#define SDO_POWER_ON_DAC (1 << 0)
-
-/* SDO Color Compensation On/Off Control (SDO_CCCON) */
-#define SDO_COMPENSATION_BHS_ADJ_OFF (1 << 4)
-#define SDO_COMPENSATION_CVBS_COMP_OFF (1 << 0)
-
-/* SDO Interrupt Request Register (SDO_IRQ) */
-#define SDO_VSYNC_IRQ_PEND (1 << 0)
-
-#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/platform/s5p-tv/regs-vp.h b/drivers/media/platform/s5p-tv/regs-vp.h
deleted file mode 100644
index 6c63984e11e8..000000000000
--- a/drivers/media/platform/s5p-tv/regs-vp.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Video processor register header file for Samsung Mixer driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SAMSUNG_REGS_VP_H
-#define SAMSUNG_REGS_VP_H
-
-/*
- * Register part
- */
-
-#define VP_ENABLE 0x0000
-#define VP_SRESET 0x0004
-#define VP_SHADOW_UPDATE 0x0008
-#define VP_FIELD_ID 0x000C
-#define VP_MODE 0x0010
-#define VP_IMG_SIZE_Y 0x0014
-#define VP_IMG_SIZE_C 0x0018
-#define VP_PER_RATE_CTRL 0x001C
-#define VP_TOP_Y_PTR 0x0028
-#define VP_BOT_Y_PTR 0x002C
-#define VP_TOP_C_PTR 0x0030
-#define VP_BOT_C_PTR 0x0034
-#define VP_ENDIAN_MODE 0x03CC
-#define VP_SRC_H_POSITION 0x0044
-#define VP_SRC_V_POSITION 0x0048
-#define VP_SRC_WIDTH 0x004C
-#define VP_SRC_HEIGHT 0x0050
-#define VP_DST_H_POSITION 0x0054
-#define VP_DST_V_POSITION 0x0058
-#define VP_DST_WIDTH 0x005C
-#define VP_DST_HEIGHT 0x0060
-#define VP_H_RATIO 0x0064
-#define VP_V_RATIO 0x0068
-#define VP_POLY8_Y0_LL 0x006C
-#define VP_POLY4_Y0_LL 0x00EC
-#define VP_POLY4_C0_LL 0x012C
-
-/*
- * Bit definition part
- */
-
-/* generates mask for range of bits */
-
-#define VP_MASK(high_bit, low_bit) \
- (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
-
-#define VP_MASK_VAL(val, high_bit, low_bit) \
- (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
-
- /* VP_ENABLE */
-#define VP_ENABLE_ON (1 << 0)
-
-/* VP_SRESET */
-#define VP_SRESET_PROCESSING (1 << 0)
-
-/* VP_SHADOW_UPDATE */
-#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
-
-/* VP_MODE */
-#define VP_MODE_NV12 (0 << 6)
-#define VP_MODE_NV21 (1 << 6)
-#define VP_MODE_LINE_SKIP (1 << 5)
-#define VP_MODE_MEM_LINEAR (0 << 4)
-#define VP_MODE_MEM_TILED (1 << 4)
-#define VP_MODE_FMT_MASK (5 << 4)
-#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
-#define VP_MODE_2D_IPC (1 << 1)
-
-/* VP_IMG_SIZE_Y */
-/* VP_IMG_SIZE_C */
-#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
-#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
-
-/* VP_SRC_H_POSITION */
-#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
-
-/* VP_ENDIAN_MODE */
-#define VP_ENDIAN_MODE_LITTLE (1 << 0)
-
-#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/platform/s5p-tv/sdo_drv.c b/drivers/media/platform/s5p-tv/sdo_drv.c
deleted file mode 100644
index c75d4354d182..000000000000
--- a/drivers/media/platform/s5p-tv/sdo_drv.c
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Samsung Standard Definition Output (SDO) driver
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *
- * Tomasz Stanislawski, <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published
- * by the Free Software Foundiation. either version 2 of the License,
- * or (at your option) any later version
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-
-#include <media/v4l2-subdev.h>
-
-#include "regs-sdo.h"
-
-MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
-MODULE_LICENSE("GPL");
-
-#define SDO_DEFAULT_STD V4L2_STD_PAL
-
-struct sdo_format {
- v4l2_std_id id;
- /* all modes are 720 pixels wide */
- unsigned int height;
- unsigned int cookie;
-};
-
-struct sdo_device {
- /** pointer to device parent */
- struct device *dev;
- /** base address of SDO registers */
- void __iomem *regs;
- /** SDO interrupt */
- unsigned int irq;
- /** DAC source clock */
- struct clk *sclk_dac;
- /** DAC clock */
- struct clk *dac;
- /** DAC physical interface */
- struct clk *dacphy;
- /** clock for control of VPLL */
- struct clk *fout_vpll;
- /** vpll rate before sdo stream was on */
- unsigned long vpll_rate;
- /** regulator for SDO IP power */
- struct regulator *vdac;
- /** regulator for SDO plug detection */
- struct regulator *vdet;
- /** subdev used as device interface */
- struct v4l2_subdev sd;
- /** current format */
- const struct sdo_format *fmt;
-};
-
-static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct sdo_device, sd);
-}
-
-static inline
-void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
-{
- u32 old = readl(sdev->regs + reg_id);
- value = (value & mask) | (old & ~mask);
- writel(value, sdev->regs + reg_id);
-}
-
-static inline
-void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
-{
- writel(value, sdev->regs + reg_id);
-}
-
-static inline
-u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
-{
- return readl(sdev->regs + reg_id);
-}
-
-static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
-{
- struct sdo_device *sdev = dev_data;
-
- /* clear interrupt */
- sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
- return IRQ_HANDLED;
-}
-
-static void sdo_reg_debug(struct sdo_device *sdev)
-{
-#define DBGREG(reg_id) \
- dev_info(sdev->dev, #reg_id " = %08x\n", \
- sdo_read(sdev, reg_id))
-
- DBGREG(SDO_CLKCON);
- DBGREG(SDO_CONFIG);
- DBGREG(SDO_VBI);
- DBGREG(SDO_DAC);
- DBGREG(SDO_IRQ);
- DBGREG(SDO_IRQMASK);
- DBGREG(SDO_VERSION);
-}
-
-static const struct sdo_format sdo_format[] = {
- { V4L2_STD_PAL_N, .height = 576, .cookie = SDO_PAL_N },
- { V4L2_STD_PAL_Nc, .height = 576, .cookie = SDO_PAL_NC },
- { V4L2_STD_PAL_M, .height = 480, .cookie = SDO_PAL_M },
- { V4L2_STD_PAL_60, .height = 480, .cookie = SDO_PAL_60 },
- { V4L2_STD_NTSC_443, .height = 480, .cookie = SDO_NTSC_443 },
- { V4L2_STD_PAL, .height = 576, .cookie = SDO_PAL_BGHID },
- { V4L2_STD_NTSC_M, .height = 480, .cookie = SDO_NTSC_M },
-};
-
-static const struct sdo_format *sdo_find_format(v4l2_std_id id)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
- if (sdo_format[i].id & id)
- return &sdo_format[i];
- return NULL;
-}
-
-static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
-{
- *std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
- V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
- V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
- return 0;
-}
-
-static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
-{
- struct sdo_device *sdev = sd_to_sdev(sd);
- const struct sdo_format *fmt;
- fmt = sdo_find_format(std);
- if (fmt == NULL)
- return -EINVAL;
- sdev->fmt = fmt;
- return 0;
-}
-
-static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
-{
- *std = sd_to_sdev(sd)->fmt->id;
- return 0;
-}
-
-static int sdo_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct sdo_device *sdev = sd_to_sdev(sd);
-
- if (!sdev->fmt)
- return -ENXIO;
- if (format->pad)
- return -EINVAL;
- /* all modes are 720 pixels wide */
- fmt->width = 720;
- fmt->height = sdev->fmt->height;
- fmt->code = MEDIA_BUS_FMT_FIXED;
- fmt->field = V4L2_FIELD_INTERLACED;
- fmt->colorspace = V4L2_COLORSPACE_JPEG;
- return 0;
-}
-
-static int sdo_s_power(struct v4l2_subdev *sd, int on)
-{
- struct sdo_device *sdev = sd_to_sdev(sd);
- struct device *dev = sdev->dev;
- int ret;
-
- dev_info(dev, "sdo_s_power(%d)\n", on);
-
- if (on)
- ret = pm_runtime_get_sync(dev);
- else
- ret = pm_runtime_put_sync(dev);
-
- /* only values < 0 indicate errors */
- return ret < 0 ? ret : 0;
-}
-
-static int sdo_streamon(struct sdo_device *sdev)
-{
- int ret;
-
- /* set proper clock for Timing Generator */
- sdev->vpll_rate = clk_get_rate(sdev->fout_vpll);
- ret = clk_set_rate(sdev->fout_vpll, 54000000);
- if (ret < 0) {
- dev_err(sdev->dev, "Failed to set vpll rate\n");
- return ret;
- }
- dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
- clk_get_rate(sdev->fout_vpll));
- /* enable clock in SDO */
- sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
- ret = clk_prepare_enable(sdev->dacphy);
- if (ret < 0) {
- dev_err(sdev->dev, "clk_prepare_enable(dacphy) failed\n");
- goto fail;
- }
- /* enable DAC */
- sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
- sdo_reg_debug(sdev);
- return 0;
-
-fail:
- sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
- clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
- return ret;
-}
-
-static int sdo_streamoff(struct sdo_device *sdev)
-{
- int tries;
-
- sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
- clk_disable_unprepare(sdev->dacphy);
- sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
- for (tries = 100; tries; --tries) {
- if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
- break;
- mdelay(1);
- }
- if (tries == 0)
- dev_err(sdev->dev, "failed to stop streaming\n");
- clk_set_rate(sdev->fout_vpll, sdev->vpll_rate);
- return tries ? 0 : -EIO;
-}
-
-static int sdo_s_stream(struct v4l2_subdev *sd, int on)
-{
- struct sdo_device *sdev = sd_to_sdev(sd);
- return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
-}
-
-static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
- .s_power = sdo_s_power,
-};
-
-static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
- .s_std_output = sdo_s_std_output,
- .g_std_output = sdo_g_std_output,
- .g_tvnorms_output = sdo_g_tvnorms_output,
- .s_stream = sdo_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops sdo_sd_pad_ops = {
- .get_fmt = sdo_get_fmt,
-};
-
-static const struct v4l2_subdev_ops sdo_sd_ops = {
- .core = &sdo_sd_core_ops,
- .video = &sdo_sd_video_ops,
- .pad = &sdo_sd_pad_ops,
-};
-
-static int sdo_runtime_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct sdo_device *sdev = sd_to_sdev(sd);
-
- dev_info(dev, "suspend\n");
- regulator_disable(sdev->vdet);
- regulator_disable(sdev->vdac);
- clk_disable_unprepare(sdev->sclk_dac);
- return 0;
-}
-
-static int sdo_runtime_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct sdo_device *sdev = sd_to_sdev(sd);
- int ret;
-
- dev_info(dev, "resume\n");
-
- ret = clk_prepare_enable(sdev->sclk_dac);
- if (ret < 0)
- return ret;
-
- ret = regulator_enable(sdev->vdac);
- if (ret < 0)
- goto dac_clk_dis;
-
- ret = regulator_enable(sdev->vdet);
- if (ret < 0)
- goto vdac_r_dis;
-
- /* software reset */
- sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
- mdelay(10);
- sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
-
- /* setting TV mode */
- sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
- /* XXX: forcing interlaced mode using undocumented bit */
- sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
- /* turn all VBI off */
- sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
- SDO_CVBS_CLOSED_CAPTION_MASK);
- /* turn all post processing off */
- sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
- SDO_COMPENSATION_CVBS_COMP_OFF);
- sdo_reg_debug(sdev);
- return 0;
-
-vdac_r_dis:
- regulator_disable(sdev->vdac);
-dac_clk_dis:
- clk_disable_unprepare(sdev->sclk_dac);
- return ret;
-}
-
-static const struct dev_pm_ops sdo_pm_ops = {
- .runtime_suspend = sdo_runtime_suspend,
- .runtime_resume = sdo_runtime_resume,
-};
-
-static int sdo_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct sdo_device *sdev;
- struct resource *res;
- int ret = 0;
- struct clk *sclk_vpll;
-
- dev_info(dev, "probe start\n");
- sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
- dev_err(dev, "not enough memory.\n");
- ret = -ENOMEM;
- goto fail;
- }
- sdev->dev = dev;
-
- /* mapping registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(dev, "get memory resource failed.\n");
- ret = -ENXIO;
- goto fail;
- }
-
- sdev->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (sdev->regs == NULL) {
- dev_err(dev, "register mapping failed.\n");
- ret = -ENXIO;
- goto fail;
- }
-
- /* acquiring interrupt */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "get interrupt resource failed.\n");
- ret = -ENXIO;
- goto fail;
- }
- ret = devm_request_irq(&pdev->dev, res->start, sdo_irq_handler, 0,
- "s5p-sdo", sdev);
- if (ret) {
- dev_err(dev, "request interrupt failed.\n");
- goto fail;
- }
- sdev->irq = res->start;
-
- /* acquire clocks */
- sdev->sclk_dac = clk_get(dev, "sclk_dac");
- if (IS_ERR(sdev->sclk_dac)) {
- dev_err(dev, "failed to get clock 'sclk_dac'\n");
- ret = PTR_ERR(sdev->sclk_dac);
- goto fail;
- }
- sdev->dac = clk_get(dev, "dac");
- if (IS_ERR(sdev->dac)) {
- dev_err(dev, "failed to get clock 'dac'\n");
- ret = PTR_ERR(sdev->dac);
- goto fail_sclk_dac;
- }
- sdev->dacphy = clk_get(dev, "dacphy");
- if (IS_ERR(sdev->dacphy)) {
- dev_err(dev, "failed to get clock 'dacphy'\n");
- ret = PTR_ERR(sdev->dacphy);
- goto fail_dac;
- }
- sclk_vpll = clk_get(dev, "sclk_vpll");
- if (IS_ERR(sclk_vpll)) {
- dev_err(dev, "failed to get clock 'sclk_vpll'\n");
- ret = PTR_ERR(sclk_vpll);
- goto fail_dacphy;
- }
- clk_set_parent(sdev->sclk_dac, sclk_vpll);
- clk_put(sclk_vpll);
- sdev->fout_vpll = clk_get(dev, "fout_vpll");
- if (IS_ERR(sdev->fout_vpll)) {
- dev_err(dev, "failed to get clock 'fout_vpll'\n");
- ret = PTR_ERR(sdev->fout_vpll);
- goto fail_dacphy;
- }
- dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
-
- /* acquire regulator */
- sdev->vdac = devm_regulator_get(dev, "vdd33a_dac");
- if (IS_ERR(sdev->vdac)) {
- dev_err(dev, "failed to get regulator 'vdac'\n");
- ret = PTR_ERR(sdev->vdac);
- goto fail_fout_vpll;
- }
- sdev->vdet = devm_regulator_get(dev, "vdet");
- if (IS_ERR(sdev->vdet)) {
- dev_err(dev, "failed to get regulator 'vdet'\n");
- ret = PTR_ERR(sdev->vdet);
- goto fail_fout_vpll;
- }
-
- /* enable gate for dac clock, because mixer uses it */
- ret = clk_prepare_enable(sdev->dac);
- if (ret < 0) {
- dev_err(dev, "clk_prepare_enable(dac) failed\n");
- goto fail_fout_vpll;
- }
-
- /* configure power management */
- pm_runtime_enable(dev);
-
- /* configuration of interface subdevice */
- v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
- sdev->sd.owner = THIS_MODULE;
- strlcpy(sdev->sd.name, "s5p-sdo", sizeof(sdev->sd.name));
-
- /* set default format */
- sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
- BUG_ON(sdev->fmt == NULL);
-
- /* keeping subdev in device's private for use by other drivers */
- dev_set_drvdata(dev, &sdev->sd);
-
- dev_info(dev, "probe succeeded\n");
- return 0;
-
-fail_fout_vpll:
- clk_put(sdev->fout_vpll);
-fail_dacphy:
- clk_put(sdev->dacphy);
-fail_dac:
- clk_put(sdev->dac);
-fail_sclk_dac:
- clk_put(sdev->sclk_dac);
-fail:
- dev_info(dev, "probe failed\n");
- return ret;
-}
-
-static int sdo_remove(struct platform_device *pdev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
- struct sdo_device *sdev = sd_to_sdev(sd);
-
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(sdev->dac);
- clk_put(sdev->fout_vpll);
- clk_put(sdev->dacphy);
- clk_put(sdev->dac);
- clk_put(sdev->sclk_dac);
-
- dev_info(&pdev->dev, "remove successful\n");
- return 0;
-}
-
-static struct platform_driver sdo_driver __refdata = {
- .probe = sdo_probe,
- .remove = sdo_remove,
- .driver = {
- .name = "s5p-sdo",
- .pm = &sdo_pm_ops,
- }
-};
-
-module_platform_driver(sdo_driver);
diff --git a/drivers/media/platform/s5p-tv/sii9234_drv.c b/drivers/media/platform/s5p-tv/sii9234_drv.c
deleted file mode 100644
index 0a97f9ab4f76..000000000000
--- a/drivers/media/platform/s5p-tv/sii9234_drv.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Samsung MHL interface driver
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/freezer.h>
-#include <linux/gpio.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/regulator/machine.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/media/sii9234.h>
-#include <media/v4l2-subdev.h>
-
-MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
-MODULE_DESCRIPTION("Samsung MHL interface driver");
-MODULE_LICENSE("GPL");
-
-struct sii9234_context {
- struct i2c_client *client;
- struct regulator *power;
- int gpio_n_reset;
- struct v4l2_subdev sd;
-};
-
-static inline struct sii9234_context *sd_to_context(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct sii9234_context, sd);
-}
-
-static inline int sii9234_readb(struct i2c_client *client, int addr)
-{
- return i2c_smbus_read_byte_data(client, addr);
-}
-
-static inline int sii9234_writeb(struct i2c_client *client, int addr, int value)
-{
- return i2c_smbus_write_byte_data(client, addr, value);
-}
-
-static inline int sii9234_writeb_mask(struct i2c_client *client, int addr,
- int value, int mask)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(client, addr);
- if (ret < 0)
- return ret;
- ret = (ret & ~mask) | (value & mask);
- return i2c_smbus_write_byte_data(client, addr, ret);
-}
-
-static inline int sii9234_readb_idx(struct i2c_client *client, int addr)
-{
- int ret;
- ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
- if (ret < 0)
- return ret;
- return i2c_smbus_read_byte_data(client, 0xbe);
-}
-
-static inline int sii9234_writeb_idx(struct i2c_client *client, int addr,
- int value)
-{
- int ret;
- ret = i2c_smbus_write_byte_data(client, 0xbc, addr >> 8);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_byte_data(client, 0xbd, addr & 0xff);
- if (ret < 0)
- return ret;
- ret = i2c_smbus_write_byte_data(client, 0xbe, value);
- return ret;
-}
-
-static inline int sii9234_writeb_idx_mask(struct i2c_client *client, int addr,
- int value, int mask)
-{
- int ret;
-
- ret = sii9234_readb_idx(client, addr);
- if (ret < 0)
- return ret;
- ret = (ret & ~mask) | (value & mask);
- return sii9234_writeb_idx(client, addr, ret);
-}
-
-static int sii9234_reset(struct sii9234_context *ctx)
-{
- struct i2c_client *client = ctx->client;
- struct device *dev = &client->dev;
- int ret, tries;
-
- gpio_direction_output(ctx->gpio_n_reset, 1);
- mdelay(1);
- gpio_direction_output(ctx->gpio_n_reset, 0);
- mdelay(1);
- gpio_direction_output(ctx->gpio_n_reset, 1);
- mdelay(1);
-
- /* going to TTPI mode */
- ret = sii9234_writeb(client, 0xc7, 0);
- if (ret < 0) {
- dev_err(dev, "failed to set TTPI mode\n");
- return ret;
- }
- for (tries = 0; tries < 100 ; ++tries) {
- ret = sii9234_readb(client, 0x1b);
- if (ret > 0)
- break;
- if (ret < 0) {
- dev_err(dev, "failed to reset device\n");
- return -EIO;
- }
- mdelay(1);
- }
- if (tries == 100) {
- dev_err(dev, "maximal number of tries reached\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int sii9234_verify_version(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- int family, rev, tpi_rev, dev_id, sub_id, hdcp, id;
-
- family = sii9234_readb(client, 0x1b);
- rev = sii9234_readb(client, 0x1c) & 0x0f;
- tpi_rev = sii9234_readb(client, 0x1d) & 0x7f;
- dev_id = sii9234_readb_idx(client, 0x0103);
- sub_id = sii9234_readb_idx(client, 0x0102);
- hdcp = sii9234_readb(client, 0x30);
-
- if (family < 0 || rev < 0 || tpi_rev < 0 || dev_id < 0 ||
- sub_id < 0 || hdcp < 0) {
- dev_err(dev, "failed to read chip's version\n");
- return -EIO;
- }
-
- id = (dev_id << 8) | sub_id;
-
- dev_info(dev, "chip: SiL%02x family: %02x, rev: %02x\n",
- id, family, rev);
- dev_info(dev, "tpi_rev:%02x, hdcp: %02x\n", tpi_rev, hdcp);
- if (id != 0x9234) {
- dev_err(dev, "not supported chip\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static u8 data[][3] = {
-/* setup from driver created by doonsoo45.kim */
- { 0x01, 0x05, 0x04 }, /* Enable Auto soft reset on SCDT = 0 */
- { 0x01, 0x08, 0x35 }, /* Power Up TMDS Tx Core */
- { 0x01, 0x0d, 0x1c }, /* HDMI Transcode mode enable */
- { 0x01, 0x2b, 0x01 }, /* Enable HDCP Compliance workaround */
- { 0x01, 0x79, 0x40 }, /* daniel test...MHL_INT */
- { 0x01, 0x80, 0x34 }, /* Enable Rx PLL Clock Value */
- { 0x01, 0x90, 0x27 }, /* Enable CBUS discovery */
- { 0x01, 0x91, 0xe5 }, /* Skip RGND detection */
- { 0x01, 0x92, 0x46 }, /* Force MHD mode */
- { 0x01, 0x93, 0xdc }, /* Disable CBUS pull-up during RGND measurement */
- { 0x01, 0x94, 0x66 }, /* 1.8V CBUS VTH & GND threshold */
- { 0x01, 0x95, 0x31 }, /* RGND block & single discovery attempt */
- { 0x01, 0x96, 0x22 }, /* use 1K and 2K setting */
- { 0x01, 0xa0, 0x10 }, /* SIMG: Term mode */
- { 0x01, 0xa1, 0xfc }, /* Disable internal Mobile HD driver */
- { 0x01, 0xa3, 0xfa }, /* SIMG: Output Swing default EB, 3x Clk Mult */
- { 0x01, 0xa5, 0x80 }, /* SIMG: RGND Hysterisis, 3x mode for Beast */
- { 0x01, 0xa6, 0x0c }, /* SIMG: Swing Offset */
- { 0x02, 0x3d, 0x3f }, /* Power up CVCC 1.2V core */
- { 0x03, 0x00, 0x00 }, /* SIMG: correcting HW default */
- { 0x03, 0x11, 0x01 }, /* Enable TxPLL Clock */
- { 0x03, 0x12, 0x15 }, /* Enable Tx Clock Path & Equalizer */
- { 0x03, 0x13, 0x60 }, /* SIMG: Set termination value */
- { 0x03, 0x14, 0xf0 }, /* SIMG: Change CKDT level */
- { 0x03, 0x17, 0x07 }, /* SIMG: PLL Calrefsel */
- { 0x03, 0x1a, 0x20 }, /* VCO Cal */
- { 0x03, 0x22, 0xe0 }, /* SIMG: Auto EQ */
- { 0x03, 0x23, 0xc0 }, /* SIMG: Auto EQ */
- { 0x03, 0x24, 0xa0 }, /* SIMG: Auto EQ */
- { 0x03, 0x25, 0x80 }, /* SIMG: Auto EQ */
- { 0x03, 0x26, 0x60 }, /* SIMG: Auto EQ */
- { 0x03, 0x27, 0x40 }, /* SIMG: Auto EQ */
- { 0x03, 0x28, 0x20 }, /* SIMG: Auto EQ */
- { 0x03, 0x29, 0x00 }, /* SIMG: Auto EQ */
- { 0x03, 0x31, 0x0b }, /* SIMG: Rx PLL BW value from I2C BW ~ 4MHz */
- { 0x03, 0x45, 0x06 }, /* SIMG: DPLL Mode */
- { 0x03, 0x4b, 0x06 }, /* SIMG: Correcting HW default */
- { 0x03, 0x4c, 0xa0 }, /* Manual zone control */
- { 0x03, 0x4d, 0x02 }, /* SIMG: PLL Mode Value (order is important) */
-};
-
-static int sii9234_set_internal(struct sii9234_context *ctx)
-{
- struct i2c_client *client = ctx->client;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(data); ++i) {
- int addr = (data[i][0] << 8) | data[i][1];
- ret = sii9234_writeb_idx(client, addr, data[i][2]);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-static int sii9234_runtime_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct sii9234_context *ctx = sd_to_context(sd);
- struct i2c_client *client = ctx->client;
-
- dev_info(dev, "suspend start\n");
-
- sii9234_writeb_mask(client, 0x1e, 3, 3);
- regulator_disable(ctx->power);
-
- return 0;
-}
-
-static int sii9234_runtime_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct sii9234_context *ctx = sd_to_context(sd);
- struct i2c_client *client = ctx->client;
- int ret;
-
- dev_info(dev, "resume start\n");
- ret = regulator_enable(ctx->power);
- if (ret < 0)
- return ret;
-
- ret = sii9234_reset(ctx);
- if (ret)
- goto fail;
-
- /* enable tpi */
- ret = sii9234_writeb_mask(client, 0x1e, 1, 0);
- if (ret < 0)
- goto fail;
- ret = sii9234_set_internal(ctx);
- if (ret < 0)
- goto fail;
-
- return 0;
-
-fail:
- dev_err(dev, "failed to resume\n");
- regulator_disable(ctx->power);
-
- return ret;
-}
-
-static const struct dev_pm_ops sii9234_pm_ops = {
- .runtime_suspend = sii9234_runtime_suspend,
- .runtime_resume = sii9234_runtime_resume,
-};
-
-static int sii9234_s_power(struct v4l2_subdev *sd, int on)
-{
- struct sii9234_context *ctx = sd_to_context(sd);
- int ret;
-
- if (on)
- ret = pm_runtime_get_sync(&ctx->client->dev);
- else
- ret = pm_runtime_put(&ctx->client->dev);
- /* only values < 0 indicate errors */
- return ret < 0 ? ret : 0;
-}
-
-static int sii9234_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct sii9234_context *ctx = sd_to_context(sd);
-
- /* (dis/en)able TDMS output */
- sii9234_writeb_mask(ctx->client, 0x1a, enable ? 0 : ~0 , 1 << 4);
- return 0;
-}
-
-static const struct v4l2_subdev_core_ops sii9234_core_ops = {
- .s_power = sii9234_s_power,
-};
-
-static const struct v4l2_subdev_video_ops sii9234_video_ops = {
- .s_stream = sii9234_s_stream,
-};
-
-static const struct v4l2_subdev_ops sii9234_ops = {
- .core = &sii9234_core_ops,
- .video = &sii9234_video_ops,
-};
-
-static int sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct sii9234_platform_data *pdata = dev->platform_data;
- struct sii9234_context *ctx;
- int ret;
-
- ctx = devm_kzalloc(&client->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- dev_err(dev, "out of memory\n");
- ret = -ENOMEM;
- goto fail;
- }
- ctx->client = client;
-
- ctx->power = devm_regulator_get(dev, "hdmi-en");
- if (IS_ERR(ctx->power)) {
- dev_err(dev, "failed to acquire regulator hdmi-en\n");
- return PTR_ERR(ctx->power);
- }
-
- ctx->gpio_n_reset = pdata->gpio_n_reset;
- ret = devm_gpio_request(dev, ctx->gpio_n_reset, "MHL_RST");
- if (ret) {
- dev_err(dev, "failed to acquire MHL_RST gpio\n");
- return ret;
- }
-
- v4l2_i2c_subdev_init(&ctx->sd, client, &sii9234_ops);
-
- pm_runtime_enable(dev);
-
- /* enable device */
- ret = pm_runtime_get_sync(dev);
- if (ret)
- goto fail_pm;
-
- /* verify chip version */
- ret = sii9234_verify_version(client);
- if (ret)
- goto fail_pm_get;
-
- /* stop processing */
- pm_runtime_put(dev);
-
- dev_info(dev, "probe successful\n");
-
- return 0;
-
-fail_pm_get:
- pm_runtime_put_sync(dev);
-
-fail_pm:
- pm_runtime_disable(dev);
-
-fail:
- dev_err(dev, "probe failed\n");
-
- return ret;
-}
-
-static int sii9234_remove(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
-
- pm_runtime_disable(dev);
-
- dev_info(dev, "remove successful\n");
-
- return 0;
-}
-
-
-static const struct i2c_device_id sii9234_id[] = {
- { "SII9234", 0 },
- { },
-};
-
-MODULE_DEVICE_TABLE(i2c, sii9234_id);
-static struct i2c_driver sii9234_driver = {
- .driver = {
- .name = "sii9234",
- .pm = &sii9234_pm_ops,
- },
- .probe = sii9234_probe,
- .remove = sii9234_remove,
- .id_table = sii9234_id,
-};
-
-module_i2c_driver(sii9234_driver);
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index e1f39b4cf1cd..ef2a519bcd4c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -362,7 +362,7 @@ static void sh_vou_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&vou_dev->lock, flags);
}
-static struct vb2_ops sh_vou_qops = {
+static const struct vb2_ops sh_vou_qops = {
.queue_setup = sh_vou_queue_setup,
.buf_prepare = sh_vou_buf_prepare,
.buf_queue = sh_vou_buf_queue,
@@ -937,7 +937,10 @@ static int sh_vou_s_selection(struct file *file, void *fh,
{
struct v4l2_rect *rect = &sel->r;
struct sh_vou_device *vou_dev = video_drvdata(file);
- struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT};
+ struct v4l2_subdev_selection sd_sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_COMPOSE,
+ };
struct v4l2_pix_format *pix = &vou_dev->pix;
struct sh_vou_geometry geo;
struct v4l2_subdev_format format = {
@@ -978,14 +981,14 @@ static int sh_vou_s_selection(struct file *file, void *fh,
geo.in_height = pix->height;
/* Configure the encoder one-to-one, position at 0, ignore errors */
- sd_crop.c.width = geo.output.width;
- sd_crop.c.height = geo.output.height;
+ sd_sel.r.width = geo.output.width;
+ sd_sel.r.height = geo.output.height;
/*
- * We first issue a S_CROP, so that the subsequent S_FMT delivers the
+ * We first issue a S_SELECTION, so that the subsequent S_FMT delivers the
* final encoder configuration.
*/
- v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
- s_crop, &sd_crop);
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_selection, NULL, &sd_sel);
format.format.width = geo.output.width;
format.format.height = geo.output.height;
ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 39f66414f621..86d74788544f 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -17,31 +17,6 @@ config SOC_CAMERA_PLATFORM
help
This is a generic SoC camera platform driver, useful for testing
-config VIDEO_PXA27x
- tristate "PXA27x Quick Capture Interface driver"
- depends on VIDEO_DEV && PXA27x && SOC_CAMERA
- select VIDEOBUF_DMA_SG
- select SG_SPLIT
- ---help---
- This is a v4l2 driver for the PXA27x Quick Capture Interface
-
-config VIDEO_RCAR_VIN_OLD
- tristate "R-Car Video Input (VIN) support (DEPRECATED)"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- select SOC_CAMERA_SCALE_CROP
- ---help---
- This is a v4l2 driver for the R-Car VIN Interface
-
-config VIDEO_SH_MOBILE_CSI2
- tristate "SuperH Mobile MIPI CSI-2 Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
- depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
- ---help---
- This is a v4l2 driver for the SuperH MIPI CSI-2 Interface
-
config VIDEO_SH_MOBILE_CEU
tristate "SuperH Mobile CEU Interface driver"
depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 7703cb7ce456..7633a0f2f66f 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -7,7 +7,4 @@ obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
# soc-camera host drivers have to be linked after camera drivers
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
-obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
-obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
-obj-$(CONFIG_VIDEO_RCAR_VIN_OLD) += rcar_vin.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 30211f6b4483..46de657c3e6d 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -536,7 +536,7 @@ static void stop_streaming(struct vb2_queue *vq)
pm_runtime_put(ici->v4l2_dev.dev);
}
-static struct vb2_ops isi_video_qops = {
+static const struct vb2_ops isi_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
deleted file mode 100644
index 9c137522c660..000000000000
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ /dev/null
@@ -1,1970 +0,0 @@
-/*
- * SoC-camera host driver for Renesas R-Car VIN unit
- *
- * Copyright (C) 2011-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
- *
- * Based on V4L2 Driver for SuperH Mobile CEU interface "sh_mobile_ceu_camera.c"
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
-#include <media/v4l2-subdev.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "soc_scale_crop.h"
-
-#define DRV_NAME "rcar_vin"
-
-/* Register offsets for R-Car VIN */
-#define VNMC_REG 0x00 /* Video n Main Control Register */
-#define VNMS_REG 0x04 /* Video n Module Status Register */
-#define VNFC_REG 0x08 /* Video n Frame Capture Register */
-#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
-#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
-#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
-#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
-#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
-#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
-#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
-#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
-#define VNIS_REG 0x2C /* Video n Image Stride Register */
-#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
-#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
-#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
-#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
-#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
-#define VNYS_REG 0x50 /* Video n Y Scale Register */
-#define VNXS_REG 0x54 /* Video n X Scale Register */
-#define VNDMR_REG 0x58 /* Video n Data Mode Register */
-#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
-#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
-#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
-#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
-#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
-#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
-#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
-#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
-#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
-#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
-#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
-#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
-#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
-#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
-#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
-#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
-#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
-#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
-#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
-#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
-#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
-#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
-#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
-#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
-#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
-#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
-
-/* Register bit fields for R-Car VIN */
-/* Video n Main Control Register bits */
-#define VNMC_FOC (1 << 21)
-#define VNMC_YCAL (1 << 19)
-#define VNMC_INF_YUV8_BT656 (0 << 16)
-#define VNMC_INF_YUV8_BT601 (1 << 16)
-#define VNMC_INF_YUV10_BT656 (2 << 16)
-#define VNMC_INF_YUV10_BT601 (3 << 16)
-#define VNMC_INF_YUV16 (5 << 16)
-#define VNMC_INF_RGB888 (6 << 16)
-#define VNMC_VUP (1 << 10)
-#define VNMC_IM_ODD (0 << 3)
-#define VNMC_IM_ODD_EVEN (1 << 3)
-#define VNMC_IM_EVEN (2 << 3)
-#define VNMC_IM_FULL (3 << 3)
-#define VNMC_BPS (1 << 1)
-#define VNMC_ME (1 << 0)
-
-/* Video n Module Status Register bits */
-#define VNMS_FBS_MASK (3 << 3)
-#define VNMS_FBS_SHIFT 3
-#define VNMS_AV (1 << 1)
-#define VNMS_CA (1 << 0)
-
-/* Video n Frame Capture Register bits */
-#define VNFC_C_FRAME (1 << 1)
-#define VNFC_S_FRAME (1 << 0)
-
-/* Video n Interrupt Enable Register bits */
-#define VNIE_FIE (1 << 4)
-#define VNIE_EFE (1 << 1)
-
-/* Video n Data Mode Register bits */
-#define VNDMR_EXRGB (1 << 8)
-#define VNDMR_BPSM (1 << 4)
-#define VNDMR_DTMD_YCSEP (1 << 1)
-#define VNDMR_DTMD_ARGB (1 << 0)
-
-/* Video n Data Mode Register 2 bits */
-#define VNDMR2_VPS (1 << 30)
-#define VNDMR2_HPS (1 << 29)
-#define VNDMR2_FTEV (1 << 17)
-#define VNDMR2_VLV(n) ((n & 0xf) << 12)
-
-#define VIN_MAX_WIDTH 2048
-#define VIN_MAX_HEIGHT 2048
-
-#define TIMEOUT_MS 100
-
-#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0)
-#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1)
-#define RCAR_VIN_BT601 (1 << 2)
-#define RCAR_VIN_BT656 (1 << 3)
-
-enum chip_id {
- RCAR_GEN3,
- RCAR_GEN2,
- RCAR_H1,
- RCAR_M1,
- RCAR_E1,
-};
-
-struct vin_coeff {
- unsigned short xs_value;
- u32 coeff_set[24];
-};
-
-static const struct vin_coeff vin_coeff_set[] = {
- { 0x0000, {
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000 },
- },
- { 0x1000, {
- 0x000fa400, 0x000fa400, 0x09625902,
- 0x000003f8, 0x00000403, 0x3de0d9f0,
- 0x001fffed, 0x00000804, 0x3cc1f9c3,
- 0x001003de, 0x00000c01, 0x3cb34d7f,
- 0x002003d2, 0x00000c00, 0x3d24a92d,
- 0x00200bca, 0x00000bff, 0x3df600d2,
- 0x002013cc, 0x000007ff, 0x3ed70c7e,
- 0x00100fde, 0x00000000, 0x3f87c036 },
- },
- { 0x1200, {
- 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
- 0x002003e7, 0x001ffffa, 0x000185bc,
- 0x002007dc, 0x000003ff, 0x3e52859c,
- 0x00200bd4, 0x00000002, 0x3d53996b,
- 0x00100fd0, 0x00000403, 0x3d04ad2d,
- 0x00000bd5, 0x00000403, 0x3d35ace7,
- 0x3ff003e4, 0x00000801, 0x3dc674a1,
- 0x3fffe800, 0x00000800, 0x3e76f461 },
- },
- { 0x1400, {
- 0x00100be3, 0x00100be3, 0x04d1359a,
- 0x00000fdb, 0x002003ed, 0x0211fd93,
- 0x00000fd6, 0x002003f4, 0x0002d97b,
- 0x000007d6, 0x002ffffb, 0x3e93b956,
- 0x3ff003da, 0x001003ff, 0x3db49926,
- 0x3fffefe9, 0x00100001, 0x3d655cee,
- 0x3fffd400, 0x00000003, 0x3d65f4b6,
- 0x000fb421, 0x00000402, 0x3dc6547e },
- },
- { 0x1600, {
- 0x00000bdd, 0x00000bdd, 0x06519578,
- 0x3ff007da, 0x00000be3, 0x03c24973,
- 0x3ff003d9, 0x00000be9, 0x01b30d5f,
- 0x3ffff7df, 0x001003f1, 0x0003c542,
- 0x000fdfec, 0x001003f7, 0x3ec4711d,
- 0x000fc400, 0x002ffffd, 0x3df504f1,
- 0x001fa81a, 0x002ffc00, 0x3d957cc2,
- 0x002f8c3c, 0x00100000, 0x3db5c891 },
- },
- { 0x1800, {
- 0x3ff003dc, 0x3ff003dc, 0x0791e558,
- 0x000ff7dd, 0x3ff007de, 0x05328554,
- 0x000fe7e3, 0x3ff00be2, 0x03232546,
- 0x000fd7ee, 0x000007e9, 0x0143bd30,
- 0x001fb800, 0x000007ee, 0x00044511,
- 0x002fa015, 0x000007f4, 0x3ef4bcee,
- 0x002f8832, 0x001003f9, 0x3e4514c7,
- 0x001f7853, 0x001003fd, 0x3de54c9f },
- },
- { 0x1a00, {
- 0x000fefe0, 0x000fefe0, 0x08721d3c,
- 0x001fdbe7, 0x000ffbde, 0x0652a139,
- 0x001fcbf0, 0x000003df, 0x0463292e,
- 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
- 0x002f9c12, 0x3ff00be7, 0x01241905,
- 0x001f8c29, 0x000007ed, 0x3fe470eb,
- 0x000f7c46, 0x000007f2, 0x3f04b8ca,
- 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
- },
- { 0x1c00, {
- 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
- 0x002fbff3, 0x001fe3e4, 0x0712ad23,
- 0x002fa800, 0x000ff3e0, 0x05631d1b,
- 0x001f9810, 0x000ffbe1, 0x03b3890d,
- 0x000f8c23, 0x000003e3, 0x0233e8fa,
- 0x3fef843b, 0x000003e7, 0x00f430e4,
- 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
- 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
- },
- { 0x1e00, {
- 0x001fbbf4, 0x001fbbf4, 0x09425112,
- 0x001fa800, 0x002fc7ed, 0x0792b110,
- 0x000f980e, 0x001fdbe6, 0x0613110a,
- 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
- 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
- 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
- 0x3f5f9c61, 0x000003e6, 0x00e428c5,
- 0x3f1fb07b, 0x000003eb, 0x3fe440af },
- },
- { 0x2000, {
- 0x000fa400, 0x000fa400, 0x09625902,
- 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
- 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
- 0x3faf902d, 0x001fd3e8, 0x055348f1,
- 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
- 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
- 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
- 0x3ecfd880, 0x000fffe6, 0x00c404ac },
- },
- { 0x2200, {
- 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
- 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
- 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
- 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
- 0x3f2fac49, 0x001fcfea, 0x04a364d9,
- 0x3effc05c, 0x001fdbe7, 0x038394ca,
- 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
- 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
- },
- { 0x2400, {
- 0x3f9fa014, 0x3f9fa014, 0x098260e6,
- 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
- 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
- 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
- 0x3eefc850, 0x000fbbf2, 0x050340d0,
- 0x3ecfe062, 0x000fcbec, 0x041364c2,
- 0x3ea00073, 0x001fd3ea, 0x03037cb5,
- 0x3e902086, 0x001fdfe8, 0x022388a5 },
- },
- { 0x2600, {
- 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
- 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
- 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
- 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
- 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
- 0x3eb00066, 0x3fffbbf3, 0x047334bb,
- 0x3ea01c77, 0x000fc7ee, 0x039348ae,
- 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
- },
- { 0x2800, {
- 0x3f2fb426, 0x3f2fb426, 0x094250ce,
- 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
- 0x3eefd040, 0x3f7fa811, 0x0782acc9,
- 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
- 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
- 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
- 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
- 0x3ec06884, 0x000fbff2, 0x03031c9e },
- },
- { 0x2a00, {
- 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
- 0x3eefd439, 0x3f2fb822, 0x08526cc2,
- 0x3edfe845, 0x3f4fb018, 0x078294bf,
- 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
- 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
- 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
- 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
- 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
- },
- { 0x2c00, {
- 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
- 0x3edfec3d, 0x3f0fc828, 0x082258b9,
- 0x3ed00049, 0x3f1fc01e, 0x077278b6,
- 0x3ed01455, 0x3f3fb815, 0x06c294b2,
- 0x3ed03460, 0x3f5fb40d, 0x0602acac,
- 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
- 0x3f107476, 0x3f9fb400, 0x0472c89d,
- 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
- },
- { 0x2e00, {
- 0x3eefec37, 0x3eefec37, 0x088220b0,
- 0x3ee00041, 0x3effdc2d, 0x07f244ae,
- 0x3ee0144c, 0x3f0fd023, 0x07625cad,
- 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
- 0x3f004861, 0x3f3fbc13, 0x060288a6,
- 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
- 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
- 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
- },
- { 0x3000, {
- 0x3ef0003a, 0x3ef0003a, 0x084210a6,
- 0x3ef01045, 0x3effec32, 0x07b228a7,
- 0x3f00284e, 0x3f0fdc29, 0x073244a4,
- 0x3f104058, 0x3f0fd420, 0x06a258a2,
- 0x3f305c62, 0x3f2fc818, 0x0612689d,
- 0x3f508069, 0x3f3fc011, 0x05728496,
- 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
- 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
- },
- { 0x3200, {
- 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
- 0x3f102447, 0x3f000035, 0x0782149d,
- 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
- 0x3f405458, 0x3f0fe424, 0x06924099,
- 0x3f607061, 0x3f1fd41d, 0x06024c97,
- 0x3f909068, 0x3f2fcc16, 0x05726490,
- 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
- 0x0000d077, 0x3f4fc409, 0x04627484 },
- },
- { 0x3400, {
- 0x3f202040, 0x3f202040, 0x07a1e898,
- 0x3f303449, 0x3f100c38, 0x0741fc98,
- 0x3f504c50, 0x3f10002f, 0x06e21495,
- 0x3f706459, 0x3f1ff028, 0x06722492,
- 0x3fa08060, 0x3f1fe421, 0x05f2348f,
- 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
- 0x0000bc6e, 0x3f2fd014, 0x04f25086,
- 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
- },
- { 0x3600, {
- 0x3f403042, 0x3f403042, 0x0761d890,
- 0x3f504848, 0x3f301c3b, 0x0701f090,
- 0x3f805c50, 0x3f200c33, 0x06a2008f,
- 0x3fa07458, 0x3f10002b, 0x06520c8d,
- 0x3fd0905e, 0x3f1ff424, 0x05e22089,
- 0x0000ac65, 0x3f1fe81d, 0x05823483,
- 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
- 0x0080e871, 0x3f2fd412, 0x0482407c },
- },
- { 0x3800, {
- 0x3f604043, 0x3f604043, 0x0721c88a,
- 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
- 0x3fb06851, 0x3f301c35, 0x0681e889,
- 0x3fd08456, 0x3f30082f, 0x0611fc88,
- 0x00009c5d, 0x3f200027, 0x05d20884,
- 0x0030b863, 0x3f2ff421, 0x05621880,
- 0x0070d468, 0x3f2fe81b, 0x0502247c,
- 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
- },
- { 0x3a00, {
- 0x3f904c44, 0x3f904c44, 0x06e1b884,
- 0x3fb0604a, 0x3f70383e, 0x0691c885,
- 0x3fe07451, 0x3f502c36, 0x0661d483,
- 0x00009055, 0x3f401831, 0x0601ec81,
- 0x0030a85b, 0x3f300c2a, 0x05b1f480,
- 0x0070c061, 0x3f300024, 0x0562047a,
- 0x00b0d867, 0x3f3ff41e, 0x05020c77,
- 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
- },
- { 0x3c00, {
- 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
- 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
- 0x0000844f, 0x3f703838, 0x0631cc7d,
- 0x00309855, 0x3f602433, 0x05d1d47e,
- 0x0060b459, 0x3f50142e, 0x0581e47b,
- 0x00a0c85f, 0x3f400828, 0x0531f078,
- 0x00e0e064, 0x3f300021, 0x0501fc73,
- 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
- },
- { 0x3e00, {
- 0x3fe06444, 0x3fe06444, 0x0681a07a,
- 0x00007849, 0x3fc0503f, 0x0641b07a,
- 0x0020904d, 0x3fa0403a, 0x05f1c07a,
- 0x0060a453, 0x3f803034, 0x05c1c878,
- 0x0090b858, 0x3f70202f, 0x0571d477,
- 0x00d0d05d, 0x3f501829, 0x0531e073,
- 0x0110e462, 0x3f500825, 0x04e1e471,
- 0x01510065, 0x3f40001f, 0x04a1f06d },
- },
- { 0x4000, {
- 0x00007044, 0x00007044, 0x06519476,
- 0x00208448, 0x3fe05c3f, 0x0621a476,
- 0x0050984d, 0x3fc04c3a, 0x05e1b075,
- 0x0080ac52, 0x3fa03c35, 0x05a1b875,
- 0x00c0c056, 0x3f803030, 0x0561c473,
- 0x0100d45b, 0x3f70202b, 0x0521d46f,
- 0x0140e860, 0x3f601427, 0x04d1d46e,
- 0x01810064, 0x3f500822, 0x0491dc6b },
- },
- { 0x5000, {
- 0x0110a442, 0x0110a442, 0x0551545e,
- 0x0140b045, 0x00e0983f, 0x0531585f,
- 0x0160c047, 0x00c08c3c, 0x0511645e,
- 0x0190cc4a, 0x00908039, 0x04f1685f,
- 0x01c0dc4c, 0x00707436, 0x04d1705e,
- 0x0200e850, 0x00506833, 0x04b1785b,
- 0x0230f453, 0x00305c30, 0x0491805a,
- 0x02710056, 0x0010542d, 0x04718059 },
- },
- { 0x6000, {
- 0x01c0bc40, 0x01c0bc40, 0x04c13052,
- 0x01e0c841, 0x01a0b43d, 0x04c13851,
- 0x0210cc44, 0x0180a83c, 0x04a13453,
- 0x0230d845, 0x0160a03a, 0x04913c52,
- 0x0260e047, 0x01409838, 0x04714052,
- 0x0280ec49, 0x01208c37, 0x04514c50,
- 0x02b0f44b, 0x01008435, 0x04414c50,
- 0x02d1004c, 0x00e07c33, 0x0431544f },
- },
- { 0x7000, {
- 0x0230c83e, 0x0230c83e, 0x04711c4c,
- 0x0250d03f, 0x0210c43c, 0x0471204b,
- 0x0270d840, 0x0200b83c, 0x0451244b,
- 0x0290dc42, 0x01e0b43a, 0x0441244c,
- 0x02b0e443, 0x01c0b038, 0x0441284b,
- 0x02d0ec44, 0x01b0a438, 0x0421304a,
- 0x02f0f445, 0x0190a036, 0x04213449,
- 0x0310f847, 0x01709c34, 0x04213848 },
- },
- { 0x8000, {
- 0x0280d03d, 0x0280d03d, 0x04310c48,
- 0x02a0d43e, 0x0270c83c, 0x04311047,
- 0x02b0dc3e, 0x0250c83a, 0x04311447,
- 0x02d0e040, 0x0240c03a, 0x04211446,
- 0x02e0e840, 0x0220bc39, 0x04111847,
- 0x0300e842, 0x0210b438, 0x04012445,
- 0x0310f043, 0x0200b037, 0x04012045,
- 0x0330f444, 0x01e0ac36, 0x03f12445 },
- },
- { 0xefff, {
- 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
- 0x0340e03a, 0x0330e039, 0x03c0f03e,
- 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
- 0x0350e43a, 0x0320dc38, 0x03c0f43e,
- 0x0360e43b, 0x0320d839, 0x03b0f03e,
- 0x0360e83b, 0x0310d838, 0x03c0fc3b,
- 0x0370e83b, 0x0310d439, 0x03a0f83d,
- 0x0370e83c, 0x0300d438, 0x03b0fc3c },
- }
-};
-
-enum rcar_vin_state {
- STOPPED = 0,
- RUNNING,
- STOPPING,
-};
-
-struct rcar_vin_priv {
- void __iomem *base;
- spinlock_t lock;
- int sequence;
- /* State of the VIN module in capturing mode */
- enum rcar_vin_state state;
- struct soc_camera_host ici;
- struct list_head capture;
-#define MAX_BUFFER_NUM 3
- struct vb2_v4l2_buffer *queue_buf[MAX_BUFFER_NUM];
- enum v4l2_field field;
- unsigned int pdata_flags;
- unsigned int vb_count;
- unsigned int nr_hw_slots;
- bool request_to_stop;
- struct completion capture_stop;
- enum chip_id chip;
-};
-
-#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM)
-
-struct rcar_vin_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
- struct rcar_vin_buffer, \
- vb)->list)
-
-struct rcar_vin_cam {
- /* VIN offsets within the camera output, before the VIN scaler */
- unsigned int vin_left;
- unsigned int vin_top;
- /* Client output, as seen by the VIN */
- unsigned int width;
- unsigned int height;
- /* User window from S_FMT */
- unsigned int out_width;
- unsigned int out_height;
- /*
- * User window from S_CROP / G_CROP, produced by client cropping and
- * scaling, VIN scaling and VIN cropping, mapped back onto the client
- * input window
- */
- struct v4l2_rect subrect;
- /* Camera cropping rectangle */
- struct v4l2_rect rect;
- const struct soc_mbus_pixelfmt *extra_fmt;
-};
-
-/*
- * .queue_setup() is called to check whether the driver can accept the requested
- * number of buffers and to fill in plane sizes for the current frame format if
- * required
- */
-static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
- unsigned int *count,
- unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
-
- if (!vq->num_buffers)
- priv->sequence = 0;
-
- if (!*count)
- *count = 2;
- priv->vb_count = *count;
-
- /* Number of hardware slots */
- if (is_continuous_transfer(priv))
- priv->nr_hw_slots = MAX_BUFFER_NUM;
- else
- priv->nr_hw_slots = 1;
-
- if (*num_planes)
- return sizes[0] < icd->sizeimage ? -EINVAL : 0;
-
- sizes[0] = icd->sizeimage;
- *num_planes = 1;
-
- dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
-
- return 0;
-}
-
-static int rcar_vin_setup(struct rcar_vin_priv *priv)
-{
- struct soc_camera_device *icd = priv->ici.icd;
- struct rcar_vin_cam *cam = icd->host_priv;
- u32 vnmc, dmr, interrupts;
- bool progressive = false, output_is_yuv = false, input_is_yuv = false;
-
- switch (priv->field) {
- case V4L2_FIELD_TOP:
- vnmc = VNMC_IM_ODD;
- break;
- case V4L2_FIELD_BOTTOM:
- vnmc = VNMC_IM_EVEN;
- break;
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- vnmc = VNMC_IM_FULL;
- break;
- case V4L2_FIELD_INTERLACED_BT:
- vnmc = VNMC_IM_FULL | VNMC_FOC;
- break;
- case V4L2_FIELD_NONE:
- if (is_continuous_transfer(priv)) {
- vnmc = VNMC_IM_ODD_EVEN;
- progressive = true;
- } else {
- vnmc = VNMC_IM_ODD;
- }
- break;
- default:
- vnmc = VNMC_IM_ODD;
- break;
- }
-
- /* input interface */
- switch (icd->current_fmt->code) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- /* BT.601/BT.1358 16bit YCbCr422 */
- vnmc |= VNMC_INF_YUV16;
- input_is_yuv = true;
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
- VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
- input_is_yuv = true;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- vnmc |= VNMC_INF_RGB888;
- break;
- case MEDIA_BUS_FMT_YUYV10_2X10:
- /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
- VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
- input_is_yuv = true;
- break;
- default:
- break;
- }
-
- /* output format */
- switch (icd->current_fmt->host_fmt->fourcc) {
- case V4L2_PIX_FMT_NV16:
- iowrite32(ALIGN(cam->width * cam->height, 0x80),
- priv->base + VNUVAOF_REG);
- dmr = VNDMR_DTMD_YCSEP;
- output_is_yuv = true;
- break;
- case V4L2_PIX_FMT_YUYV:
- dmr = VNDMR_BPSM;
- output_is_yuv = true;
- break;
- case V4L2_PIX_FMT_UYVY:
- dmr = 0;
- output_is_yuv = true;
- break;
- case V4L2_PIX_FMT_RGB555X:
- dmr = VNDMR_DTMD_ARGB;
- break;
- case V4L2_PIX_FMT_RGB565:
- dmr = 0;
- break;
- case V4L2_PIX_FMT_RGB32:
- if (priv->chip != RCAR_GEN2 && priv->chip != RCAR_H1 &&
- priv->chip != RCAR_E1)
- goto e_format;
-
- dmr = VNDMR_EXRGB;
- break;
- case V4L2_PIX_FMT_ARGB32:
- if (priv->chip != RCAR_GEN3)
- goto e_format;
-
- dmr = VNDMR_EXRGB | VNDMR_DTMD_ARGB;
- break;
- default:
- goto e_format;
- }
-
- /* Always update on field change */
- vnmc |= VNMC_VUP;
-
- /* If input and output use the same colorspace, use bypass mode */
- if (input_is_yuv == output_is_yuv)
- vnmc |= VNMC_BPS;
-
- /* progressive or interlaced mode */
- interrupts = progressive ? VNIE_FIE : VNIE_EFE;
-
- /* ack interrupts */
- iowrite32(interrupts, priv->base + VNINTS_REG);
- /* enable interrupts */
- iowrite32(interrupts, priv->base + VNIE_REG);
- /* start capturing */
- iowrite32(dmr, priv->base + VNDMR_REG);
- iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
-
- return 0;
-
-e_format:
- dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
- icd->current_fmt->host_fmt->fourcc);
- return -EINVAL;
-}
-
-static void rcar_vin_capture(struct rcar_vin_priv *priv)
-{
- if (is_continuous_transfer(priv))
- /* Continuous Frame Capture Mode */
- iowrite32(VNFC_C_FRAME, priv->base + VNFC_REG);
- else
- /* Single Frame Capture Mode */
- iowrite32(VNFC_S_FRAME, priv->base + VNFC_REG);
-}
-
-static void rcar_vin_request_capture_stop(struct rcar_vin_priv *priv)
-{
- priv->state = STOPPING;
-
- /* set continuous & single transfer off */
- iowrite32(0, priv->base + VNFC_REG);
- /* disable capture (release DMA buffer), reset */
- iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
- priv->base + VNMC_REG);
-
- /* update the status if stopped already */
- if (!(ioread32(priv->base + VNMS_REG) & VNMS_CA))
- priv->state = STOPPED;
-}
-
-static int rcar_vin_get_free_hw_slot(struct rcar_vin_priv *priv)
-{
- int slot;
-
- for (slot = 0; slot < priv->nr_hw_slots; slot++)
- if (priv->queue_buf[slot] == NULL)
- return slot;
-
- return -1;
-}
-
-static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
-{
- /* Ensure all HW slots are filled */
- return rcar_vin_get_free_hw_slot(priv) < 0 ? 1 : 0;
-}
-
-/* Moves a buffer from the queue to the HW slots */
-static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
-{
- struct vb2_v4l2_buffer *vbuf;
- dma_addr_t phys_addr_top;
- int slot;
-
- if (list_empty(&priv->capture))
- return 0;
-
- /* Find a free HW slot */
- slot = rcar_vin_get_free_hw_slot(priv);
- if (slot < 0)
- return 0;
-
- vbuf = &list_entry(priv->capture.next,
- struct rcar_vin_buffer, list)->vb;
- list_del_init(to_buf_list(vbuf));
- priv->queue_buf[slot] = vbuf;
- phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
- iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
-
- return 1;
-}
-
-static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- unsigned long size;
-
- size = icd->sizeimage;
-
- if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
- vb->index, vb2_plane_size(vb, 0), size);
- goto error;
- }
-
- vb2_set_plane_payload(vb, 0, size);
-
- dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
- vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
-
- spin_lock_irq(&priv->lock);
-
- list_add_tail(to_buf_list(vbuf), &priv->capture);
- rcar_vin_fill_hw_slot(priv);
-
- /* If we weren't running, and have enough buffers, start capturing! */
- if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
- if (rcar_vin_setup(priv)) {
- /* Submit error */
- list_del_init(to_buf_list(vbuf));
- spin_unlock_irq(&priv->lock);
- goto error;
- }
- priv->request_to_stop = false;
- init_completion(&priv->capture_stop);
- priv->state = RUNNING;
- rcar_vin_capture(priv);
- }
-
- spin_unlock_irq(&priv->lock);
-
- return;
-
-error:
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
-}
-
-/*
- * Wait for capture to stop and all in-flight buffers to be finished with by
- * the video hardware. This must be called under &priv->lock
- *
- */
-static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
-{
- while (priv->state != STOPPED) {
- /* issue stop if running */
- if (priv->state == RUNNING)
- rcar_vin_request_capture_stop(priv);
-
- /* wait until capturing has been stopped */
- if (priv->state == STOPPING) {
- priv->request_to_stop = true;
- spin_unlock_irq(&priv->lock);
- if (!wait_for_completion_timeout(
- &priv->capture_stop,
- msecs_to_jiffies(TIMEOUT_MS)))
- priv->state = STOPPED;
- spin_lock_irq(&priv->lock);
- }
- }
-}
-
-static void rcar_vin_stop_streaming(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- struct list_head *buf_head, *tmp;
- int i;
-
- spin_lock_irq(&priv->lock);
- rcar_vin_wait_stop_streaming(priv);
-
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i]) {
- vb2_buffer_done(&priv->queue_buf[i]->vb2_buf,
- VB2_BUF_STATE_ERROR);
- priv->queue_buf[i] = NULL;
- }
- }
-
- list_for_each_safe(buf_head, tmp, &priv->capture) {
- vb2_buffer_done(&list_entry(buf_head,
- struct rcar_vin_buffer, list)->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- list_del_init(buf_head);
- }
- spin_unlock_irq(&priv->lock);
-}
-
-static struct vb2_ops rcar_vin_vb2_ops = {
- .queue_setup = rcar_vin_videobuf_setup,
- .buf_queue = rcar_vin_videobuf_queue,
- .stop_streaming = rcar_vin_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-static irqreturn_t rcar_vin_irq(int irq, void *data)
-{
- struct rcar_vin_priv *priv = data;
- u32 int_status;
- bool can_run = false, hw_stopped;
- int slot;
- unsigned int handled = 0;
-
- spin_lock(&priv->lock);
-
- int_status = ioread32(priv->base + VNINTS_REG);
- if (!int_status)
- goto done;
- /* ack interrupts */
- iowrite32(int_status, priv->base + VNINTS_REG);
- handled = 1;
-
- /* nothing to do if capture status is 'STOPPED' */
- if (priv->state == STOPPED)
- goto done;
-
- hw_stopped = !(ioread32(priv->base + VNMS_REG) & VNMS_CA);
-
- if (!priv->request_to_stop) {
- if (is_continuous_transfer(priv))
- slot = (ioread32(priv->base + VNMS_REG) &
- VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
- else
- slot = 0;
-
- priv->queue_buf[slot]->field = priv->field;
- priv->queue_buf[slot]->sequence = priv->sequence++;
- priv->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
- vb2_buffer_done(&priv->queue_buf[slot]->vb2_buf,
- VB2_BUF_STATE_DONE);
- priv->queue_buf[slot] = NULL;
-
- if (priv->state != STOPPING)
- can_run = rcar_vin_fill_hw_slot(priv);
-
- if (hw_stopped || !can_run) {
- priv->state = STOPPED;
- } else if (is_continuous_transfer(priv) &&
- list_empty(&priv->capture) &&
- priv->state == RUNNING) {
- /*
- * The continuous capturing requires an explicit stop
- * operation when there is no buffer to be set into
- * the VnMBm registers.
- */
- rcar_vin_request_capture_stop(priv);
- } else {
- rcar_vin_capture(priv);
- }
-
- } else if (hw_stopped) {
- priv->state = STOPPED;
- priv->request_to_stop = false;
- complete(&priv->capture_stop);
- }
-
-done:
- spin_unlock(&priv->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static int rcar_vin_add_device(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- int i;
-
- for (i = 0; i < MAX_BUFFER_NUM; i++)
- priv->queue_buf[i] = NULL;
-
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- dev_dbg(icd->parent, "R-Car VIN driver attached to camera %d\n",
- icd->devnum);
-
- return 0;
-}
-
-static void rcar_vin_remove_device(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- struct vb2_v4l2_buffer *vbuf;
- int i;
-
- /* disable capture, disable interrupts */
- iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
- priv->base + VNMC_REG);
- iowrite32(0, priv->base + VNIE_REG);
-
- priv->state = STOPPED;
- priv->request_to_stop = false;
-
- /* make sure active buffer is cancelled */
- spin_lock_irq(&priv->lock);
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- vbuf = priv->queue_buf[i];
- if (vbuf) {
- list_del_init(to_buf_list(vbuf));
- vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
- }
- }
- spin_unlock_irq(&priv->lock);
-
- pm_runtime_put(ici->v4l2_dev.dev);
-
- dev_dbg(icd->parent, "R-Car VIN driver detached from camera %d\n",
- icd->devnum);
-}
-
-static void set_coeff(struct rcar_vin_priv *priv, unsigned short xs)
-{
- int i;
- const struct vin_coeff *p_prev_set = NULL;
- const struct vin_coeff *p_set = NULL;
-
- /* Look for suitable coefficient values */
- for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
- p_prev_set = p_set;
- p_set = &vin_coeff_set[i];
-
- if (xs < p_set->xs_value)
- break;
- }
-
- /* Use previous value if its XS value is closer */
- if (p_prev_set && p_set &&
- xs - p_prev_set->xs_value < p_set->xs_value - xs)
- p_set = p_prev_set;
-
- /* Set coefficient registers */
- iowrite32(p_set->coeff_set[0], priv->base + VNC1A_REG);
- iowrite32(p_set->coeff_set[1], priv->base + VNC1B_REG);
- iowrite32(p_set->coeff_set[2], priv->base + VNC1C_REG);
-
- iowrite32(p_set->coeff_set[3], priv->base + VNC2A_REG);
- iowrite32(p_set->coeff_set[4], priv->base + VNC2B_REG);
- iowrite32(p_set->coeff_set[5], priv->base + VNC2C_REG);
-
- iowrite32(p_set->coeff_set[6], priv->base + VNC3A_REG);
- iowrite32(p_set->coeff_set[7], priv->base + VNC3B_REG);
- iowrite32(p_set->coeff_set[8], priv->base + VNC3C_REG);
-
- iowrite32(p_set->coeff_set[9], priv->base + VNC4A_REG);
- iowrite32(p_set->coeff_set[10], priv->base + VNC4B_REG);
- iowrite32(p_set->coeff_set[11], priv->base + VNC4C_REG);
-
- iowrite32(p_set->coeff_set[12], priv->base + VNC5A_REG);
- iowrite32(p_set->coeff_set[13], priv->base + VNC5B_REG);
- iowrite32(p_set->coeff_set[14], priv->base + VNC5C_REG);
-
- iowrite32(p_set->coeff_set[15], priv->base + VNC6A_REG);
- iowrite32(p_set->coeff_set[16], priv->base + VNC6B_REG);
- iowrite32(p_set->coeff_set[17], priv->base + VNC6C_REG);
-
- iowrite32(p_set->coeff_set[18], priv->base + VNC7A_REG);
- iowrite32(p_set->coeff_set[19], priv->base + VNC7B_REG);
- iowrite32(p_set->coeff_set[20], priv->base + VNC7C_REG);
-
- iowrite32(p_set->coeff_set[21], priv->base + VNC8A_REG);
- iowrite32(p_set->coeff_set[22], priv->base + VNC8B_REG);
- iowrite32(p_set->coeff_set[23], priv->base + VNC8C_REG);
-}
-
-/* rect is guaranteed to not exceed the scaled camera rectangle */
-static int rcar_vin_set_rect(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_cam *cam = icd->host_priv;
- struct rcar_vin_priv *priv = ici->priv;
- unsigned int left_offset, top_offset;
- unsigned char dsize = 0;
- struct v4l2_rect *cam_subrect = &cam->subrect;
- u32 value;
-
- dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
- icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
-
- left_offset = cam->vin_left;
- top_offset = cam->vin_top;
-
- if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_RGB32 &&
- priv->chip == RCAR_E1)
- dsize = 1;
-
- dev_dbg(icd->parent, "Cam %ux%u@%u:%u\n",
- cam->width, cam->height, cam->vin_left, cam->vin_top);
- dev_dbg(icd->parent, "Cam subrect %ux%u@%u:%u\n",
- cam_subrect->width, cam_subrect->height,
- cam_subrect->left, cam_subrect->top);
-
- /* Set Start/End Pixel/Line Pre-Clip */
- iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
- iowrite32((left_offset + cam_subrect->width - 1) << dsize,
- priv->base + VNEPPRC_REG);
- switch (priv->field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
- iowrite32((top_offset + cam_subrect->height) / 2 - 1,
- priv->base + VNELPRC_REG);
- break;
- default:
- iowrite32(top_offset, priv->base + VNSLPRC_REG);
- iowrite32(top_offset + cam_subrect->height - 1,
- priv->base + VNELPRC_REG);
- break;
- }
-
- /* Set scaling coefficient */
- value = 0;
- if (cam_subrect->height != cam->out_height)
- value = (4096 * cam_subrect->height) / cam->out_height;
- dev_dbg(icd->parent, "YS Value: %x\n", value);
- iowrite32(value, priv->base + VNYS_REG);
-
- value = 0;
- if (cam_subrect->width != cam->out_width)
- value = (4096 * cam_subrect->width) / cam->out_width;
-
- /* Horizontal upscaling is up to double size */
- if (0 < value && value < 2048)
- value = 2048;
-
- dev_dbg(icd->parent, "XS Value: %x\n", value);
- iowrite32(value, priv->base + VNXS_REG);
-
- /* Horizontal upscaling is carried out by scaling down from double size */
- if (value < 4096)
- value *= 2;
-
- set_coeff(priv, value);
-
- /* Set Start/End Pixel/Line Post-Clip */
- iowrite32(0, priv->base + VNSPPOC_REG);
- iowrite32(0, priv->base + VNSLPOC_REG);
- iowrite32((cam->out_width - 1) << dsize, priv->base + VNEPPOC_REG);
- switch (priv->field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- iowrite32(cam->out_height / 2 - 1,
- priv->base + VNELPOC_REG);
- break;
- default:
- iowrite32(cam->out_height - 1, priv->base + VNELPOC_REG);
- break;
- }
-
- iowrite32(ALIGN(cam->out_width, 0x10), priv->base + VNIS_REG);
-
- return 0;
-}
-
-static void capture_stop_preserve(struct rcar_vin_priv *priv, u32 *vnmc)
-{
- *vnmc = ioread32(priv->base + VNMC_REG);
- /* module disable */
- iowrite32(*vnmc & ~VNMC_ME, priv->base + VNMC_REG);
-}
-
-static void capture_restore(struct rcar_vin_priv *priv, u32 vnmc)
-{
- unsigned long timeout = jiffies + 10 * HZ;
-
- /*
- * Wait until the end of the current frame. It can take a long time,
- * but if it has been aborted by a MRST1 reset, it should exit sooner.
- */
- while ((ioread32(priv->base + VNMS_REG) & VNMS_AV) &&
- time_before(jiffies, timeout))
- msleep(1);
-
- if (time_after(jiffies, timeout)) {
- dev_err(priv->ici.v4l2_dev.dev,
- "Timeout waiting for frame end! Interface problem?\n");
- return;
- }
-
- iowrite32(vnmc, priv->base + VNMC_REG);
-}
-
-#define VIN_MBUS_FLAGS (V4L2_MBUS_MASTER | \
- V4L2_MBUS_PCLK_SAMPLE_RISING | \
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_HSYNC_ACTIVE_LOW | \
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_VSYNC_ACTIVE_LOW | \
- V4L2_MBUS_DATA_ACTIVE_HIGH)
-
-static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_mbus_config cfg;
- unsigned long common_flags;
- u32 vnmc;
- u32 val;
- int ret;
-
- capture_stop_preserve(priv, &vnmc);
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
- if (!common_flags) {
- dev_warn(icd->parent,
- "MBUS flags incompatible: camera 0x%x, host 0x%x\n",
- cfg.flags, VIN_MBUS_FLAGS);
- return -EINVAL;
- }
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- } else {
- common_flags = VIN_MBUS_FLAGS;
- }
-
- /* Make choises, based on platform preferences */
- if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
- if (priv->pdata_flags & RCAR_VIN_HSYNC_ACTIVE_LOW)
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
- }
-
- if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
- if (priv->pdata_flags & RCAR_VIN_VSYNC_ACTIVE_LOW)
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
- }
-
- cfg.flags = common_flags;
- ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
-
- val = VNDMR2_FTEV | VNDMR2_VLV(1);
- if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
- val |= VNDMR2_VPS;
- if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
- val |= VNDMR2_HPS;
- iowrite32(val, priv->base + VNDMR2_REG);
-
- ret = rcar_vin_set_rect(icd);
- if (ret < 0)
- return ret;
-
- capture_restore(priv, vnmc);
-
- return 0;
-}
-
-static int rcar_vin_try_bus_param(struct soc_camera_device *icd,
- unsigned char buswidth)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_mbus_config cfg;
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (ret == -ENOIOCTLCMD)
- return 0;
- else if (ret)
- return ret;
-
- if (buswidth > 24)
- return -EINVAL;
-
- /* check is there common mbus flags */
- ret = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
- if (ret)
- return 0;
-
- dev_warn(icd->parent,
- "MBUS flags incompatible: camera 0x%x, host 0x%x\n",
- cfg.flags, VIN_MBUS_FLAGS);
-
- return -EINVAL;
-}
-
-static bool rcar_vin_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
- return fmt->packing == SOC_MBUS_PACKING_NONE ||
- (fmt->bits_per_sample > 8 &&
- fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV16,
- .name = "NV16",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .name = "YUYV",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_UYVY,
- .name = "UYVY",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .name = "RGB565",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB555X,
- .name = "ARGB1555",
- .bits_per_sample = 16,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB32,
- .name = "RGB888",
- .bits_per_sample = 32,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_ARGB32,
- .name = "ARGB8888",
- .bits_per_sample = 32,
- .packing = SOC_MBUS_PACKING_NONE,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
-};
-
-static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
- struct soc_camera_format_xlate *xlate)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- int ret, k, n;
- int formats = 0;
- struct rcar_vin_cam *cam;
- struct v4l2_subdev_mbus_code_enum code = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .index = idx,
- };
- const struct soc_mbus_pixelfmt *fmt;
-
- ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
- if (ret < 0)
- return 0;
-
- fmt = soc_mbus_get_fmtdesc(code.code);
- if (!fmt) {
- dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code);
- return 0;
- }
-
- ret = rcar_vin_try_bus_param(icd, fmt->bits_per_sample);
- if (ret < 0)
- return 0;
-
- if (!icd->host_priv) {
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- struct v4l2_rect rect;
- struct device *dev = icd->parent;
- int shift;
-
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- /* Cache current client geometry */
- ret = soc_camera_client_g_rect(sd, &rect);
- if (ret == -ENOIOCTLCMD) {
- /* Sensor driver doesn't support cropping */
- rect.left = 0;
- rect.top = 0;
- rect.width = mf->width;
- rect.height = mf->height;
- } else if (ret < 0) {
- return ret;
- }
-
- /*
- * If sensor proposes too large format then try smaller ones:
- * 1280x960, 640x480, 320x240
- */
- for (shift = 0; shift < 3; shift++) {
- if (mf->width <= VIN_MAX_WIDTH &&
- mf->height <= VIN_MAX_HEIGHT)
- break;
-
- mf->width = 1280 >> shift;
- mf->height = 960 >> shift;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd),
- pad, set_fmt, NULL,
- &fmt);
- if (ret < 0)
- return ret;
- }
-
- if (shift == 3) {
- dev_err(dev,
- "Failed to configure the client below %ux%u\n",
- mf->width, mf->height);
- return -EIO;
- }
-
- dev_dbg(dev, "camera fmt %ux%u\n", mf->width, mf->height);
-
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
- if (!cam)
- return -ENOMEM;
- /*
- * We are called with current camera crop,
- * initialise subrect with it
- */
- cam->rect = rect;
- cam->subrect = rect;
- cam->width = mf->width;
- cam->height = mf->height;
- cam->out_width = mf->width;
- cam->out_height = mf->height;
-
- icd->host_priv = cam;
- } else {
- cam = icd->host_priv;
- }
-
- /* Beginning of a pass */
- if (!idx)
- cam->extra_fmt = NULL;
-
- switch (code.code) {
- case MEDIA_BUS_FMT_YUYV8_1X16:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YUYV10_2X10:
- case MEDIA_BUS_FMT_RGB888_1X24:
- if (cam->extra_fmt)
- break;
-
- /* Add all our formats that can be generated by VIN */
- cam->extra_fmt = rcar_vin_formats;
-
- n = ARRAY_SIZE(rcar_vin_formats);
- formats += n;
- for (k = 0; xlate && k < n; k++, xlate++) {
- xlate->host_fmt = &rcar_vin_formats[k];
- xlate->code = code.code;
- dev_dbg(dev, "Providing format %s using code %d\n",
- rcar_vin_formats[k].name, code.code);
- }
- break;
- default:
- if (!rcar_vin_packing_supported(fmt))
- return 0;
-
- dev_dbg(dev, "Providing format %s in pass-through mode\n",
- fmt->name);
- break;
- }
-
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = fmt;
- xlate->code = code.code;
- xlate++;
- }
-
- return formats;
-}
-
-static void rcar_vin_put_formats(struct soc_camera_device *icd)
-{
- kfree(icd->host_priv);
- icd->host_priv = NULL;
-}
-
-static int rcar_vin_set_crop(struct soc_camera_device *icd,
- const struct v4l2_crop *a)
-{
- struct v4l2_crop a_writable = *a;
- const struct v4l2_rect *rect = &a_writable.c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- struct v4l2_crop cam_crop;
- struct rcar_vin_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_crop.c;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->parent;
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &fmt.format;
- u32 vnmc;
- int ret, i;
-
- dev_dbg(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
- rect->left, rect->top);
-
- /* During camera cropping its output window can change too, stop VIN */
- capture_stop_preserve(priv, &vnmc);
- dev_dbg(dev, "VNMC_REG 0x%x\n", vnmc);
-
- /* Apply iterative camera S_CROP for new input window. */
- ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
- &cam->rect, &cam->subrect);
- if (ret < 0)
- return ret;
-
- dev_dbg(dev, "camera cropped to %ux%u@%u:%u\n",
- cam_rect->width, cam_rect->height,
- cam_rect->left, cam_rect->top);
-
- /* On success cam_crop contains current camera crop */
-
- /* Retrieve camera output window */
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
- if (ret < 0)
- return ret;
-
- if (mf->width > VIN_MAX_WIDTH || mf->height > VIN_MAX_HEIGHT)
- return -EINVAL;
-
- /* Cache camera output window */
- cam->width = mf->width;
- cam->height = mf->height;
-
- icd->user_width = cam->width;
- icd->user_height = cam->height;
-
- cam->vin_left = rect->left & ~1;
- cam->vin_top = rect->top & ~1;
-
- /* Use VIN cropping to crop to the new window. */
- ret = rcar_vin_set_rect(icd);
- if (ret < 0)
- return ret;
-
- cam->subrect = *rect;
-
- dev_dbg(dev, "VIN cropped to %ux%u@%u:%u\n",
- icd->user_width, icd->user_height,
- cam->vin_left, cam->vin_top);
-
- /* Restore capture */
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i] && priv->state == STOPPED) {
- vnmc |= VNMC_ME;
- break;
- }
- }
- capture_restore(priv, vnmc);
-
- /* Even if only camera cropping succeeded */
- return ret;
-}
-
-static int rcar_vin_get_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
-{
- struct rcar_vin_cam *cam = icd->host_priv;
-
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->c = cam->subrect;
-
- return 0;
-}
-
-/* Similar to set_crop multistage iterative algorithm */
-static int rcar_vin_set_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct rcar_vin_cam *cam = icd->host_priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_mbus_framefmt mf;
- struct device *dev = icd->parent;
- __u32 pixfmt = pix->pixelformat;
- const struct soc_camera_format_xlate *xlate;
- unsigned int vin_sub_width = 0, vin_sub_height = 0;
- int ret;
- bool can_scale;
- enum v4l2_field field;
- v4l2_std_id std;
-
- dev_dbg(dev, "S_FMT(pix=0x%x, %ux%u)\n",
- pixfmt, pix->width, pix->height);
-
- switch (pix->field) {
- default:
- pix->field = V4L2_FIELD_NONE;
- /* fall-through */
- case V4L2_FIELD_NONE:
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- field = pix->field;
- break;
- case V4L2_FIELD_INTERLACED:
- /* Query for standard if not explicitly mentioned _TB/_BT */
- ret = v4l2_subdev_call(sd, video, querystd, &std);
- if (ret == -ENOIOCTLCMD) {
- field = V4L2_FIELD_NONE;
- } else if (ret < 0) {
- return ret;
- } else {
- field = std & V4L2_STD_625_50 ?
- V4L2_FIELD_INTERLACED_TB :
- V4L2_FIELD_INTERLACED_BT;
- }
- break;
- }
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- dev_warn(dev, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
- /* Calculate client output geometry */
- soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf,
- 12);
- mf.field = pix->field;
- mf.colorspace = pix->colorspace;
- mf.code = xlate->code;
-
- switch (pixfmt) {
- case V4L2_PIX_FMT_RGB32:
- can_scale = priv->chip != RCAR_E1;
- break;
- case V4L2_PIX_FMT_ARGB32:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB555X:
- can_scale = true;
- break;
- default:
- can_scale = false;
- break;
- }
-
- dev_dbg(dev, "request camera output %ux%u\n", mf.width, mf.height);
-
- ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
- &mf, &vin_sub_width, &vin_sub_height,
- can_scale, 12);
-
- /* Done with the camera. Now see if we can improve the result */
- dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
- ret, mf.width, mf.height, pix->width, pix->height);
-
- if (ret == -ENOIOCTLCMD)
- dev_dbg(dev, "Sensor doesn't support scaling\n");
- else if (ret < 0)
- return ret;
-
- if (mf.code != xlate->code)
- return -EINVAL;
-
- /* Prepare VIN crop */
- cam->width = mf.width;
- cam->height = mf.height;
-
- /* Use VIN scaling to scale to the requested user window. */
-
- /* We cannot scale up */
- if (pix->width > vin_sub_width)
- vin_sub_width = pix->width;
-
- if (pix->height > vin_sub_height)
- vin_sub_height = pix->height;
-
- pix->colorspace = mf.colorspace;
-
- if (!can_scale) {
- pix->width = vin_sub_width;
- pix->height = vin_sub_height;
- }
-
- /*
- * We have calculated CFLCR, the actual configuration will be performed
- * in rcar_vin_set_bus_param()
- */
-
- dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
- vin_sub_width, pix->width, vin_sub_height, pix->height);
-
- cam->out_width = pix->width;
- cam->out_height = pix->height;
-
- icd->current_fmt = xlate;
-
- priv->field = field;
-
- return 0;
-}
-
-static int rcar_vin_try_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- const struct soc_camera_format_xlate *xlate;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- __u32 pixfmt = pix->pixelformat;
- int width, height;
- int ret;
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- xlate = icd->current_fmt;
- dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
- pixfmt, xlate->host_fmt->fourcc);
- pixfmt = xlate->host_fmt->fourcc;
- pix->pixelformat = pixfmt;
- pix->colorspace = icd->colorspace;
- }
-
- /* FIXME: calculate using depth and bus width */
- v4l_bound_align_image(&pix->width, 2, VIN_MAX_WIDTH, 1,
- &pix->height, 4, VIN_MAX_HEIGHT, 2, 0);
-
- width = pix->width;
- height = pix->height;
-
- /* let soc-camera calculate these values */
- pix->bytesperline = 0;
- pix->sizeimage = 0;
-
- /* limit to sensor capabilities */
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->code = xlate->code;
- mf->colorspace = pix->colorspace;
-
- ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
- pad, set_fmt, &pad_cfg, &format);
- if (ret < 0)
- return ret;
-
- /* Adjust only if VIN cannot scale */
- if (pix->width > mf->width * 2)
- pix->width = mf->width * 2;
- if (pix->height > mf->height * 3)
- pix->height = mf->height * 3;
-
- pix->field = mf->field;
- pix->colorspace = mf->colorspace;
-
- if (pixfmt == V4L2_PIX_FMT_NV16) {
- /* FIXME: check against rect_max after converting soc-camera */
- /* We can scale precisely, need a bigger image from camera */
- if (pix->width < width || pix->height < height) {
- /*
- * We presume, the sensor behaves sanely, i.e. if
- * requested a bigger rectangle, it will not return a
- * smaller one.
- */
- mf->width = VIN_MAX_WIDTH;
- mf->height = VIN_MAX_HEIGHT;
- ret = v4l2_device_call_until_err(sd->v4l2_dev,
- soc_camera_grp_id(icd),
- pad, set_fmt, &pad_cfg,
- &format);
- if (ret < 0) {
- dev_err(icd->parent,
- "client try_fmt() = %d\n", ret);
- return ret;
- }
- }
- /* We will scale exactly */
- if (mf->width > width)
- pix->width = width;
- if (mf->height > height)
- pix->height = height;
- }
-
- return ret;
-}
-
-static unsigned int rcar_vin_poll(struct file *file, poll_table *pt)
-{
- struct soc_camera_device *icd = file->private_data;
-
- return vb2_poll(&icd->vb2_vidq, file, pt);
-}
-
-static int rcar_vin_querycap(struct soc_camera_host *ici,
- struct v4l2_capability *cap)
-{
- strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d", DRV_NAME, ici->nr);
-
- return 0;
-}
-
-static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
- struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vq->io_modes = VB2_MMAP | VB2_USERPTR;
- vq->drv_priv = icd;
- vq->ops = &rcar_vin_vb2_ops;
- vq->mem_ops = &vb2_dma_contig_memops;
- vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
- vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vq->lock = &ici->host_lock;
- vq->dev = ici->v4l2_dev.dev;
-
- return vb2_queue_init(vq);
-}
-
-static struct soc_camera_host_ops rcar_vin_host_ops = {
- .owner = THIS_MODULE,
- .add = rcar_vin_add_device,
- .remove = rcar_vin_remove_device,
- .get_formats = rcar_vin_get_formats,
- .put_formats = rcar_vin_put_formats,
- .get_crop = rcar_vin_get_crop,
- .set_crop = rcar_vin_set_crop,
- .try_fmt = rcar_vin_try_fmt,
- .set_fmt = rcar_vin_set_fmt,
- .poll = rcar_vin_poll,
- .querycap = rcar_vin_querycap,
- .set_bus_param = rcar_vin_set_bus_param,
- .init_videobuf2 = rcar_vin_init_videobuf2,
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id rcar_vin_of_table[] = {
- { .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 },
- { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
- { .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
- { .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
- { .compatible = "renesas,rcar-gen3-vin", .data = (void *)RCAR_GEN3 },
- { .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
- { },
-};
-MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
-#endif
-
-static int rcar_vin_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match = NULL;
- struct rcar_vin_priv *priv;
- struct v4l2_of_endpoint ep;
- struct device_node *np;
- struct resource *mem;
- unsigned int pdata_flags;
- int irq, ret;
-
- match = of_match_device(of_match_ptr(rcar_vin_of_table), &pdev->dev);
-
- np = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
- if (!np) {
- dev_err(&pdev->dev, "could not find endpoint\n");
- return -EINVAL;
- }
-
- ret = v4l2_of_parse_endpoint(np, &ep);
- if (ret) {
- dev_err(&pdev->dev, "could not parse endpoint\n");
- return ret;
- }
-
- if (ep.bus_type == V4L2_MBUS_BT656)
- pdata_flags = RCAR_VIN_BT656;
- else {
- pdata_flags = 0;
- if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- pdata_flags |= RCAR_VIN_HSYNC_ACTIVE_LOW;
- if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- pdata_flags |= RCAR_VIN_VSYNC_ACTIVE_LOW;
- }
-
- of_node_put(np);
-
- dev_dbg(&pdev->dev, "pdata_flags = %08x\n", pdata_flags);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct rcar_vin_priv),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- ret = devm_request_irq(&pdev->dev, irq, rcar_vin_irq, IRQF_SHARED,
- dev_name(&pdev->dev), priv);
- if (ret)
- return ret;
-
- priv->ici.priv = priv;
- priv->ici.v4l2_dev.dev = &pdev->dev;
- priv->ici.drv_name = dev_name(&pdev->dev);
- priv->ici.ops = &rcar_vin_host_ops;
-
- priv->pdata_flags = pdata_flags;
- if (!match) {
- priv->ici.nr = pdev->id;
- priv->chip = pdev->id_entry->driver_data;
- } else {
- priv->ici.nr = of_alias_get_id(pdev->dev.of_node, "vin");
- priv->chip = (enum chip_id)match->data;
- }
-
- spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->capture);
-
- priv->state = STOPPED;
-
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
-
- ret = soc_camera_host_register(&priv->ici);
- if (ret)
- goto cleanup;
-
- return 0;
-
-cleanup:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-static int rcar_vin_remove(struct platform_device *pdev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
-
- soc_camera_host_unregister(soc_host);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static struct platform_driver rcar_vin_driver = {
- .probe = rcar_vin_probe,
- .remove = rcar_vin_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = of_match_ptr(rcar_vin_of_table),
- },
-};
-
-module_platform_driver(rcar_vin_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:rcar_vin");
-MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 02b519dde42a..a15bfb5aea47 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -41,7 +41,6 @@
#include <media/v4l2-dev.h>
#include <media/soc_camera.h>
#include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/drv-intf/sh_mobile_csi2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mediabus.h>
#include <media/drv-intf/soc_mediabus.h>
@@ -99,11 +98,6 @@ struct sh_mobile_ceu_buffer {
struct sh_mobile_ceu_dev {
struct soc_camera_host ici;
- /* Asynchronous CSI2 linking */
- struct v4l2_async_subdev *csi2_asd;
- struct v4l2_subdev *csi2_sd;
- /* Synchronous probing compatibility */
- struct platform_device *csi2_pdev;
unsigned int irq;
void __iomem *base;
@@ -140,7 +134,7 @@ struct sh_mobile_ceu_cam {
unsigned int width;
unsigned int height;
/*
- * User window from S_CROP / G_CROP, produced by client cropping and
+ * User window from S_SELECTION / G_SELECTION, produced by client cropping and
* scaling, CEU scaling and CEU cropping, mapped back onto the client
* input window
*/
@@ -470,7 +464,7 @@ static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
sh_mobile_ceu_soft_reset(pcdev);
}
-static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
+static const struct vb2_ops sh_mobile_ceu_videobuf_ops = {
.queue_setup = sh_mobile_ceu_videobuf_setup,
.buf_prepare = sh_mobile_ceu_videobuf_prepare,
.buf_queue = sh_mobile_ceu_videobuf_queue,
@@ -517,74 +511,20 @@ out:
return IRQ_HANDLED;
}
-static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
-{
- struct v4l2_subdev *sd;
-
- if (pcdev->csi2_sd)
- return pcdev->csi2_sd;
-
- if (pcdev->csi2_asd) {
- char name[] = "sh-mobile-csi2";
- v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
- if (!strncmp(name, sd->name, sizeof(name) - 1)) {
- pcdev->csi2_sd = sd;
- return sd;
- }
- }
-
- return NULL;
-}
-
-static struct v4l2_subdev *csi2_subdev(struct sh_mobile_ceu_dev *pcdev,
- struct soc_camera_device *icd)
-{
- struct v4l2_subdev *sd = pcdev->csi2_sd;
-
- return sd && sd->grp_id == soc_camera_grp_id(icd) ? sd : NULL;
-}
-
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
- int ret;
-
- if (csi2_sd) {
- csi2_sd->grp_id = soc_camera_grp_id(icd);
- v4l2_set_subdev_hostdata(csi2_sd, icd);
- }
-
- ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
-
- /*
- * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
- * has not found this soc-camera device among its clients
- */
- if (csi2_sd && ret == -ENODEV)
- csi2_sd->grp_id = 0;
-
dev_info(icd->parent,
- "SuperH Mobile CEU%s driver attached to camera %d\n",
- csi2_sd && csi2_sd->grp_id ? "/CSI-2" : "", icd->devnum);
+ "SuperH Mobile CEU driver attached to camera %d\n",
+ icd->devnum);
return 0;
}
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-
dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
icd->devnum);
-
- v4l2_subdev_call(csi2_sd, core, s_power, 0);
}
/* Called with .host_lock held */
@@ -704,12 +644,6 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
cdwdr_width *= 2;
}
- /* CSI2 special configuration */
- if (csi2_subdev(pcdev, icd)) {
- in_width = ((in_width - 2) * 2);
- left_offset *= 2;
- }
-
/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
camor = left_offset | (top_offset << 16);
@@ -758,13 +692,6 @@ static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
ceu_write(pcdev, CAPSR, capsr);
}
-/* Find the bus subdevice driver, e.g., CSI2 */
-static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
- struct soc_camera_device *icd)
-{
- return csi2_subdev(pcdev, icd) ? : soc_camera_to_subdev(icd);
-}
-
#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
V4L2_MBUS_PCLK_SAMPLE_RISING | \
V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
@@ -778,7 +705,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
unsigned long value, common_flags = CEU_BUS_FLAGS;
@@ -866,9 +793,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
- if (csi2_subdev(pcdev, icd)) /* CSI2 mode */
- value |= 3 << 12;
- else if (pcdev->is_16bit)
+ if (pcdev->is_16bit)
value |= 1 << 12;
else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
value |= 2 << 12;
@@ -923,9 +848,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
unsigned long common_flags = CEU_BUS_FLAGS;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
int ret;
@@ -1046,12 +969,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
return 0;
}
- if (!csi2_subdev(pcdev, icd)) {
- /* Are there any restrictions in the CSI-2 case? */
- ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
- if (ret < 0)
- return 0;
- }
+ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
if (!icd->host_priv) {
struct v4l2_subdev_format fmt = {
@@ -1189,17 +1109,16 @@ static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
* Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
* scaling and cropping algorithms and for the meaning of referenced here steps.
*/
-static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
- const struct v4l2_crop *a)
+static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
{
- struct v4l2_crop a_writable = *a;
- const struct v4l2_rect *rect = &a_writable.c;
+ struct v4l2_rect *rect = &sel->r;
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- struct v4l2_crop cam_crop;
+ struct v4l2_selection cam_sel;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct v4l2_rect *cam_rect = &cam_crop.c;
+ struct v4l2_rect *cam_rect = &cam_sel.r;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -1211,7 +1130,7 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
u32 capsr, cflcr;
int ret;
- dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height,
rect->left, rect->top);
/* During camera cropping its output window can change too, stop CEU */
@@ -1219,10 +1138,10 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
/*
- * 1. - 2. Apply iterative camera S_CROP for new input window, read back
+ * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back
* actual camera rectangle.
*/
- ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
+ ret = soc_camera_client_s_selection(sd, sel, &cam_sel,
&cam->rect, &cam->subrect);
if (ret < 0)
return ret;
@@ -1331,13 +1250,12 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
return ret;
}
-static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
- struct v4l2_crop *a)
+static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->c = cam->subrect;
+ sel->r = cam->subrect;
return 0;
}
@@ -1579,8 +1497,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
return ret;
}
-static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
- const struct v4l2_crop *a)
+static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -1599,7 +1517,7 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
"Client failed to stop the stream: %d\n", ret);
else
/* Do the crop, if it fails, there's nothing more we can do */
- sh_mobile_ceu_set_crop(icd, a);
+ sh_mobile_ceu_set_selection(icd, sel);
dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
@@ -1680,9 +1598,9 @@ static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.clock_stop = sh_mobile_ceu_clock_stop,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
- .get_crop = sh_mobile_ceu_get_crop,
- .set_crop = sh_mobile_ceu_set_crop,
- .set_livecrop = sh_mobile_ceu_set_livecrop,
+ .get_selection = sh_mobile_ceu_get_selection,
+ .set_selection = sh_mobile_ceu_set_selection,
+ .set_liveselection = sh_mobile_ceu_set_liveselection,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
.poll = sh_mobile_ceu_poll,
@@ -1721,12 +1639,11 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
unsigned int irq;
- int err, i;
+ int err;
struct bus_wait wait = {
.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
.notifier.notifier_call = bus_notify,
};
- struct sh_mobile_ceu_companion *csi2;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1821,132 +1738,16 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
if (pcdev->pdata && pcdev->pdata->asd_sizes) {
- struct v4l2_async_subdev **asd;
- char name[] = "sh-mobile-csi2";
- int j;
-
- /*
- * CSI2 interfacing: several groups can use CSI2, pick up the
- * first one
- */
- asd = pcdev->pdata->asd;
- for (j = 0; pcdev->pdata->asd_sizes[j]; j++) {
- for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) {
- dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n",
- __func__, i, (*asd)->match_type);
- if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME &&
- !strncmp(name, (*asd)->match.device_name.name,
- sizeof(name) - 1)) {
- pcdev->csi2_asd = *asd;
- break;
- }
- }
- if (pcdev->csi2_asd)
- break;
- }
-
pcdev->ici.asd = pcdev->pdata->asd;
pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
}
- /* Legacy CSI2 interfacing */
- csi2 = pcdev->pdata ? pcdev->pdata->csi2 : NULL;
- if (csi2) {
- /*
- * TODO: remove this once all users are converted to
- * asynchronous CSI2 probing. If it has to be kept, csi2
- * platform device resources have to be added, using
- * platform_device_add_resources()
- */
- struct platform_device *csi2_pdev =
- platform_device_alloc("sh-mobile-csi2", csi2->id);
- struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
-
- if (!csi2_pdev) {
- err = -ENOMEM;
- goto exit_free_clk;
- }
-
- pcdev->csi2_pdev = csi2_pdev;
-
- err = platform_device_add_data(csi2_pdev, csi2_pdata,
- sizeof(*csi2_pdata));
- if (err < 0)
- goto exit_pdev_put;
-
- csi2_pdev->resource = csi2->resource;
- csi2_pdev->num_resources = csi2->num_resources;
-
- err = platform_device_add(csi2_pdev);
- if (err < 0)
- goto exit_pdev_put;
-
- wait.dev = &csi2_pdev->dev;
-
- err = bus_register_notifier(&platform_bus_type, &wait.notifier);
- if (err < 0)
- goto exit_pdev_unregister;
-
- /*
- * From this point the driver module will not unload, until
- * we complete the completion.
- */
-
- if (!csi2_pdev->dev.driver) {
- complete(&wait.completion);
- /* Either too late, or probing failed */
- bus_unregister_notifier(&platform_bus_type, &wait.notifier);
- err = -ENXIO;
- goto exit_pdev_unregister;
- }
-
- /*
- * The module is still loaded, in the worst case it is hanging
- * in device release on our completion. So, _now_ dereferencing
- * the "owner" is safe!
- */
-
- err = try_module_get(csi2_pdev->dev.driver->owner);
-
- /* Let notifier complete, if it has been locked */
- complete(&wait.completion);
- bus_unregister_notifier(&platform_bus_type, &wait.notifier);
- if (!err) {
- err = -ENODEV;
- goto exit_pdev_unregister;
- }
-
- pcdev->csi2_sd = platform_get_drvdata(csi2_pdev);
- }
-
err = soc_camera_host_register(&pcdev->ici);
if (err)
- goto exit_csi2_unregister;
-
- if (csi2) {
- err = v4l2_device_register_subdev(&pcdev->ici.v4l2_dev,
- pcdev->csi2_sd);
- dev_dbg(&pdev->dev, "%s(): ret(register_subdev) = %d\n",
- __func__, err);
- if (err < 0)
- goto exit_host_unregister;
- /* v4l2_device_register_subdev() took a reference too */
- module_put(pcdev->csi2_sd->owner);
- }
+ goto exit_free_clk;
return 0;
-exit_host_unregister:
- soc_camera_host_unregister(&pcdev->ici);
-exit_csi2_unregister:
- if (csi2) {
- module_put(pcdev->csi2_pdev->dev.driver->owner);
-exit_pdev_unregister:
- platform_device_del(pcdev->csi2_pdev);
-exit_pdev_put:
- pcdev->csi2_pdev->resource = NULL;
- platform_device_put(pcdev->csi2_pdev);
- }
exit_free_clk:
pm_runtime_disable(&pdev->dev);
exit_release_mem:
@@ -1958,21 +1759,11 @@ exit_release_mem:
static int sh_mobile_ceu_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
- struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
- struct sh_mobile_ceu_dev, ici);
- struct platform_device *csi2_pdev = pcdev->csi2_pdev;
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
- if (csi2_pdev && csi2_pdev->dev.driver) {
- struct module *csi2_drv = csi2_pdev->dev.driver->owner;
- platform_device_del(csi2_pdev);
- csi2_pdev->resource = NULL;
- platform_device_put(csi2_pdev);
- module_put(csi2_drv);
- }
return 0;
}
@@ -2012,8 +1803,6 @@ static struct platform_driver sh_mobile_ceu_driver = {
static int __init sh_mobile_ceu_init(void)
{
- /* Whatever return code */
- request_module("sh_mobile_csi2");
return platform_driver_register(&sh_mobile_ceu_driver);
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_csi2.c b/drivers/media/platform/soc_camera/sh_mobile_csi2.c
deleted file mode 100644
index 09b18365a4b1..000000000000
--- a/drivers/media/platform/soc_camera/sh_mobile_csi2.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Driver for the SH-Mobile MIPI CSI-2 unit
- *
- * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/videodev2.h>
-#include <linux/module.h>
-
-#include <media/drv-intf/sh_mobile_ceu.h>
-#include <media/drv-intf/sh_mobile_csi2.h>
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-dev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-subdev.h>
-
-#define SH_CSI2_TREF 0x00
-#define SH_CSI2_SRST 0x04
-#define SH_CSI2_PHYCNT 0x08
-#define SH_CSI2_CHKSUM 0x0C
-#define SH_CSI2_VCDT 0x10
-
-struct sh_csi2 {
- struct v4l2_subdev subdev;
- unsigned int irq;
- unsigned long mipi_flags;
- void __iomem *base;
- struct platform_device *pdev;
- struct sh_csi2_client_config *client;
-};
-
-static void sh_csi2_hwinit(struct sh_csi2 *priv);
-
-static int sh_csi2_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- struct v4l2_mbus_framefmt *mf = &format->format;
- u32 tmp = (priv->client->channel & 3) << 8;
-
- if (format->pad)
- return -EINVAL;
-
- if (mf->width > 8188)
- mf->width = 8188;
- else if (mf->width & 1)
- mf->width &= ~1;
-
- switch (pdata->type) {
- case SH_CSI2C:
- switch (mf->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8: /* YUV422 */
- case MEDIA_BUS_FMT_YUYV8_1_5X8: /* YUV420 */
- case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- break;
- default:
- /* All MIPI CSI-2 devices must support one of primary formats */
- mf->code = MEDIA_BUS_FMT_YUYV8_2X8;
- }
- break;
- case SH_CSI2I:
- switch (mf->code) {
- case MEDIA_BUS_FMT_Y8_1X8: /* RAW8 */
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SBGGR10_1X10: /* RAW10 */
- case MEDIA_BUS_FMT_SBGGR12_1X12: /* RAW12 */
- break;
- default:
- /* All MIPI CSI-2 devices must support one of primary formats */
- mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- }
- break;
- }
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *mf;
- return 0;
- }
-
- if (mf->width > 8188 || mf->width & 1)
- return -EINVAL;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- tmp |= 0x1e; /* YUV422 8 bit */
- break;
- case MEDIA_BUS_FMT_YUYV8_1_5X8:
- tmp |= 0x18; /* YUV420 8 bit */
- break;
- case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
- tmp |= 0x21; /* RGB555 */
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- tmp |= 0x22; /* RGB565 */
- break;
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- tmp |= 0x2a; /* RAW8 */
- break;
- default:
- return -EINVAL;
- }
-
- iowrite32(tmp, priv->base + SH_CSI2_VCDT);
-
- return 0;
-}
-
-static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-
- if (!priv->mipi_flags) {
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
- struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
- struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- unsigned long common_flags, csi2_flags;
- struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,};
- int ret;
-
- /* Check if we can support this camera */
- csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK |
- V4L2_MBUS_CSI2_1_LANE;
-
- switch (pdata->type) {
- case SH_CSI2C:
- if (priv->client->lanes != 1)
- csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
- break;
- case SH_CSI2I:
- switch (priv->client->lanes) {
- default:
- csi2_flags |= V4L2_MBUS_CSI2_4_LANE;
- case 3:
- csi2_flags |= V4L2_MBUS_CSI2_3_LANE;
- case 2:
- csi2_flags |= V4L2_MBUS_CSI2_2_LANE;
- }
- }
-
- ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &client_cfg);
- if (ret == -ENOIOCTLCMD)
- common_flags = csi2_flags;
- else if (!ret)
- common_flags = soc_mbus_config_compatible(&client_cfg,
- csi2_flags);
- else
- common_flags = 0;
-
- if (!common_flags)
- return -EINVAL;
-
- /* All good: camera MIPI configuration supported */
- priv->mipi_flags = common_flags;
- }
-
- if (cfg) {
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
- }
-
- return 0;
-}
-
-static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
-{
- struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
- struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
- struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,};
- int ret = sh_csi2_g_mbus_config(sd, NULL);
-
- if (ret < 0)
- return ret;
-
- pm_runtime_get_sync(&priv->pdev->dev);
-
- sh_csi2_hwinit(priv);
-
- client_cfg.flags = priv->mipi_flags;
-
- return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg);
-}
-
-static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
- .g_mbus_config = sh_csi2_g_mbus_config,
- .s_mbus_config = sh_csi2_s_mbus_config,
-};
-
-static struct v4l2_subdev_pad_ops sh_csi2_subdev_pad_ops = {
- .set_fmt = sh_csi2_set_fmt,
-};
-
-static void sh_csi2_hwinit(struct sh_csi2 *priv)
-{
- struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- __u32 tmp = 0x10; /* Enable MIPI CSI clock lane */
-
- /* Reflect registers immediately */
- iowrite32(0x00000001, priv->base + SH_CSI2_TREF);
- /* reset CSI2 harware */
- iowrite32(0x00000001, priv->base + SH_CSI2_SRST);
- udelay(5);
- iowrite32(0x00000000, priv->base + SH_CSI2_SRST);
-
- switch (pdata->type) {
- case SH_CSI2C:
- if (priv->client->lanes == 1)
- tmp |= 1;
- else
- /* Default - both lanes */
- tmp |= 3;
- break;
- case SH_CSI2I:
- if (!priv->client->lanes || priv->client->lanes > 4)
- /* Default - all 4 lanes */
- tmp |= 0xf;
- else
- tmp |= (1 << priv->client->lanes) - 1;
- }
-
- if (priv->client->phy == SH_CSI2_PHY_MAIN)
- tmp |= 0x8000;
-
- iowrite32(tmp, priv->base + SH_CSI2_PHYCNT);
-
- tmp = 0;
- if (pdata->flags & SH_CSI2_ECC)
- tmp |= 2;
- if (pdata->flags & SH_CSI2_CRC)
- tmp |= 1;
- iowrite32(tmp, priv->base + SH_CSI2_CHKSUM);
-}
-
-static int sh_csi2_client_connect(struct sh_csi2 *priv)
-{
- struct device *dev = v4l2_get_subdevdata(&priv->subdev);
- struct sh_csi2_pdata *pdata = dev->platform_data;
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
- int i;
-
- if (priv->client)
- return -EBUSY;
-
- for (i = 0; i < pdata->num_clients; i++)
- if ((pdata->clients[i].pdev &&
- &pdata->clients[i].pdev->dev == icd->pdev) ||
- (icd->control &&
- strcmp(pdata->clients[i].name, dev_name(icd->control))))
- break;
-
- dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
-
- if (i == pdata->num_clients)
- return -ENODEV;
-
- priv->client = pdata->clients + i;
-
- return 0;
-}
-
-static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
-{
- if (!priv->client)
- return;
-
- priv->client = NULL;
-
- pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
-}
-
-static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
-{
- struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-
- if (on)
- return sh_csi2_client_connect(priv);
-
- sh_csi2_client_disconnect(priv);
- return 0;
-}
-
-static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
- .s_power = sh_csi2_s_power,
-};
-
-static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
- .core = &sh_csi2_subdev_core_ops,
- .video = &sh_csi2_subdev_video_ops,
- .pad = &sh_csi2_subdev_pad_ops,
-};
-
-static int sh_csi2_probe(struct platform_device *pdev)
-{
- struct resource *res;
- unsigned int irq;
- int ret;
- struct sh_csi2 *priv;
- /* Platform data specify the PHY, lanes, ECC, CRC */
- struct sh_csi2_pdata *pdata = pdev->dev.platform_data;
-
- if (!pdata)
- return -EINVAL;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_csi2), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Interrupt unused so far */
- irq = platform_get_irq(pdev, 0);
-
- if (!res || (int)irq <= 0) {
- dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n");
- return -ENODEV;
- }
-
- /* TODO: Add support for CSI2I. Careful: different register layout! */
- if (pdata->type != SH_CSI2C) {
- dev_err(&pdev->dev, "Only CSI2C supported ATM.\n");
- return -EINVAL;
- }
-
- priv->irq = irq;
-
- priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- priv->pdev = pdev;
- priv->subdev.owner = THIS_MODULE;
- priv->subdev.dev = &pdev->dev;
- platform_set_drvdata(pdev, &priv->subdev);
-
- v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
- v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
-
- snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
- dev_name(&pdev->dev));
-
- ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret < 0)
- return ret;
-
- pm_runtime_enable(&pdev->dev);
-
- dev_dbg(&pdev->dev, "CSI2 probed.\n");
-
- return 0;
-}
-
-static int sh_csi2_remove(struct platform_device *pdev)
-{
- struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
- struct sh_csi2 *priv = container_of(subdev, struct sh_csi2, subdev);
-
- v4l2_async_unregister_subdev(&priv->subdev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static struct platform_driver __refdata sh_csi2_pdrv = {
- .remove = sh_csi2_remove,
- .probe = sh_csi2_probe,
- .driver = {
- .name = "sh-mobile-csi2",
- },
-};
-
-module_platform_driver(sh_csi2_pdrv);
-
-MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sh-mobile-csi2");
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 46c7186f7867..edd1c1de4e33 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -581,7 +581,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
- /* We always call try_fmt() before set_fmt() or set_crop() */
+ /* We always call try_fmt() before set_fmt() or set_selection() */
ret = soc_camera_try_fmt(icd, f);
if (ret < 0)
return ret;
@@ -1025,72 +1025,6 @@ static int soc_camera_streamoff(struct file *file, void *priv,
return ret;
}
-static int soc_camera_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *a)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- return ici->ops->cropcap(icd, a);
-}
-
-static int soc_camera_g_crop(struct file *file, void *fh,
- struct v4l2_crop *a)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- int ret;
-
- ret = ici->ops->get_crop(icd, a);
-
- return ret;
-}
-
-/*
- * According to the V4L2 API, drivers shall not update the struct v4l2_crop
- * argument with the actual geometry, instead, the user shall use G_CROP to
- * retrieve it.
- */
-static int soc_camera_s_crop(struct file *file, void *fh,
- const struct v4l2_crop *a)
-{
- struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- const struct v4l2_rect *rect = &a->c;
- struct v4l2_crop current_crop;
- int ret;
-
- if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
- rect->width, rect->height, rect->left, rect->top);
-
- current_crop.type = a->type;
-
- /* If get_crop fails, we'll let host and / or client drivers decide */
- ret = ici->ops->get_crop(icd, &current_crop);
-
- /* Prohibit window size change with initialised buffers */
- if (ret < 0) {
- dev_err(icd->pdev,
- "S_CROP denied: getting current crop failed\n");
- } else if ((a->c.width == current_crop.c.width &&
- a->c.height == current_crop.c.height) ||
- !is_streaming(ici, icd)) {
- /* same size or not streaming - use .set_crop() */
- ret = ici->ops->set_crop(icd, a);
- } else if (ici->ops->set_livecrop) {
- ret = ici->ops->set_livecrop(icd, a);
- } else {
- dev_err(icd->pdev,
- "S_CROP denied: queue initialised and sizes differ\n");
- ret = -EBUSY;
- }
-
- return ret;
-}
-
static int soc_camera_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
@@ -1101,9 +1035,6 @@ static int soc_camera_g_selection(struct file *file, void *fh,
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (!ici->ops->get_selection)
- return -ENOTTY;
-
return ici->ops->get_selection(icd, s);
}
@@ -1135,10 +1066,11 @@ static int soc_camera_s_selection(struct file *file, void *fh,
return -EBUSY;
}
- if (!ici->ops->set_selection)
- return -ENOTTY;
-
- ret = ici->ops->set_selection(icd, s);
+ if (s->target == V4L2_SEL_TGT_CROP && is_streaming(ici, icd) &&
+ ici->ops->set_liveselection)
+ ret = ici->ops->set_liveselection(icd, s);
+ else
+ ret = ici->ops->set_selection(icd, s);
if (!ret &&
s->target == V4L2_SEL_TGT_COMPOSE) {
icd->user_width = s->r.width;
@@ -1881,23 +1813,40 @@ static int soc_camera_remove(struct soc_camera_device *icd)
return 0;
}
-static int default_cropcap(struct soc_camera_device *icd,
- struct v4l2_cropcap *a)
+static int default_g_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- return v4l2_subdev_call(sd, video, cropcap, a);
-}
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ int ret;
-static int default_g_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- return v4l2_subdev_call(sd, video, g_crop, a);
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
+ if (ret)
+ return ret;
+ sel->r = sdsel.r;
+ return 0;
}
-static int default_s_crop(struct soc_camera_device *icd, const struct v4l2_crop *a)
+static int default_s_selection(struct soc_camera_device *icd,
+ struct v4l2_selection *sel)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- return v4l2_subdev_call(sd, video, s_crop, a);
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ if (ret)
+ return ret;
+ sel->r = sdsel.r;
+ return 0;
}
static int default_g_parm(struct soc_camera_device *icd,
@@ -1968,12 +1917,10 @@ int soc_camera_host_register(struct soc_camera_host *ici)
!ici->v4l2_dev.dev)
return -EINVAL;
- if (!ici->ops->set_crop)
- ici->ops->set_crop = default_s_crop;
- if (!ici->ops->get_crop)
- ici->ops->get_crop = default_g_crop;
- if (!ici->ops->cropcap)
- ici->ops->cropcap = default_cropcap;
+ if (!ici->ops->set_selection)
+ ici->ops->set_selection = default_s_selection;
+ if (!ici->ops->get_selection)
+ ici->ops->get_selection = default_g_selection;
if (!ici->ops->set_parm)
ici->ops->set_parm = default_s_parm;
if (!ici->ops->get_parm)
@@ -2126,9 +2073,6 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_expbuf = soc_camera_expbuf,
.vidioc_streamon = soc_camera_streamon,
.vidioc_streamoff = soc_camera_streamoff,
- .vidioc_cropcap = soc_camera_cropcap,
- .vidioc_g_crop = soc_camera_g_crop,
- .vidioc_s_crop = soc_camera_s_crop,
.vidioc_g_selection = soc_camera_g_selection,
.vidioc_s_selection = soc_camera_s_selection,
.vidioc_g_parm = soc_camera_g_parm,
diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c
index a51d2a42998c..534d6c3c6d60 100644
--- a/drivers/media/platform/soc_camera/soc_camera_platform.c
+++ b/drivers/media/platform/soc_camera/soc_camera_platform.c
@@ -76,35 +76,27 @@ static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int soc_camera_platform_g_crop(struct v4l2_subdev *sd,
- struct v4l2_crop *a)
-{
- struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
-
- a->c.left = 0;
- a->c.top = 0;
- a->c.width = p->format.width;
- a->c.height = p->format.height;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- return 0;
-}
-
-static int soc_camera_platform_cropcap(struct v4l2_subdev *sd,
- struct v4l2_cropcap *a)
+static int soc_camera_platform_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- a->bounds.left = 0;
- a->bounds.top = 0;
- a->bounds.width = p->format.width;
- a->bounds.height = p->format.height;
- a->defrect = a->bounds;
- a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- a->pixelaspect.numerator = 1;
- a->pixelaspect.denominator = 1;
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
- return 0;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = p->format.width;
+ sel->r.height = p->format.height;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
@@ -120,13 +112,12 @@ static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd,
static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
.s_stream = soc_camera_platform_s_stream,
- .cropcap = soc_camera_platform_cropcap,
- .g_crop = soc_camera_platform_g_crop,
.g_mbus_config = soc_camera_platform_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = {
.enum_mbus_code = soc_camera_platform_enum_mbus_code,
+ .get_selection = soc_camera_platform_get_selection,
.get_fmt = soc_camera_platform_fill_fmt,
.set_fmt = soc_camera_platform_fill_fmt,
};
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index bda29bc1b933..f77252d6ccd3 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -40,24 +40,22 @@ static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
/* Get and store current client crop */
int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
{
- struct v4l2_crop crop;
- struct v4l2_cropcap cap;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ };
int ret;
- crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, g_crop, &crop);
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
if (!ret) {
- *rect = crop.c;
+ *rect = sdsel.r;
return ret;
}
- /* Camera driver doesn't support .g_crop(), assume default rectangle */
- cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ sdsel.target = V4L2_SEL_TGT_CROP_DEFAULT;
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
if (!ret)
- *rect = cap.defrect;
+ *rect = sdsel.r;
return ret;
}
@@ -93,17 +91,27 @@ static void update_subrect(struct v4l2_rect *rect, struct v4l2_rect *subrect)
* 2. if (1) failed, try to double the client image until we get one big enough
* 3. if (2) failed, try to request the maximum image
*/
-int soc_camera_client_s_crop(struct v4l2_subdev *sd,
- struct v4l2_crop *crop, struct v4l2_crop *cam_crop,
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+ struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
struct v4l2_rect *target_rect, struct v4l2_rect *subrect)
{
- struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ struct v4l2_subdev_selection bounds = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r;
struct device *dev = sd->v4l2_dev->dev;
- struct v4l2_cropcap cap;
int ret;
unsigned int width, height;
- v4l2_subdev_call(sd, video, s_crop, crop);
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ sel->r = sdsel.r;
ret = soc_camera_client_g_rect(sd, cam_rect);
if (ret < 0)
return ret;
@@ -113,29 +121,29 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
* be within camera cropcap bounds
*/
if (!memcmp(rect, cam_rect, sizeof(*rect))) {
- /* Even if camera S_CROP failed, but camera rectangle matches */
- dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n",
+ /* Even if camera S_SELECTION failed, but camera rectangle matches */
+ dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n",
rect->width, rect->height, rect->left, rect->top);
*target_rect = *cam_rect;
return 0;
}
/* Try to fix cropping, that camera hasn't managed to set */
- dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n",
+ dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top,
rect->width, rect->height, rect->left, rect->top);
/* We need sensor maximum rectangle */
- ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds);
if (ret < 0)
return ret;
/* Put user requested rectangle within sensor bounds */
- soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2,
- cap.bounds.width);
- soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4,
- cap.bounds.height);
+ soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2,
+ bounds.r.width);
+ soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4,
+ bounds.r.height);
/*
* Popular special case - some cameras can only handle fixed sizes like
@@ -150,7 +158,7 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
*/
while (!ret && (is_smaller(cam_rect, rect) ||
is_inside(cam_rect, rect)) &&
- (cap.bounds.width > width || cap.bounds.height > height)) {
+ (bounds.r.width > width || bounds.r.height > height)) {
width *= 2;
height *= 2;
@@ -168,36 +176,40 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
* Instead we just drop to the left and top bounds.
*/
if (cam_rect->left > rect->left)
- cam_rect->left = cap.bounds.left;
+ cam_rect->left = bounds.r.left;
if (cam_rect->left + cam_rect->width < rect->left + rect->width)
cam_rect->width = rect->left + rect->width -
cam_rect->left;
if (cam_rect->top > rect->top)
- cam_rect->top = cap.bounds.top;
+ cam_rect->top = bounds.r.top;
if (cam_rect->top + cam_rect->height < rect->top + rect->height)
cam_rect->height = rect->top + rect->height -
cam_rect->top;
- v4l2_subdev_call(sd, video, s_crop, cam_crop);
+ sdsel.r = *cam_rect;
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ *cam_rect = sdsel.r;
ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret,
+ dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
- /* S_CROP must not modify the rectangle */
+ /* S_SELECTION must not modify the rectangle */
if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) {
/*
* The camera failed to configure a suitable cropping,
* we cannot use the current rectangle, set to max
*/
- *cam_rect = cap.bounds;
- v4l2_subdev_call(sd, video, s_crop, cam_crop);
+ sdsel.r = bounds.r;
+ v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel);
+ *cam_rect = sdsel.r;
+
ret = soc_camera_client_g_rect(sd, cam_rect);
- dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret,
+ dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret,
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
}
@@ -209,7 +221,7 @@ int soc_camera_client_s_crop(struct v4l2_subdev *sd,
return ret;
}
-EXPORT_SYMBOL(soc_camera_client_s_crop);
+EXPORT_SYMBOL(soc_camera_client_s_selection);
/* Iterative set_fmt, also updates cached client crop on success */
static int client_set_fmt(struct soc_camera_device *icd,
@@ -221,7 +233,10 @@ static int client_set_fmt(struct soc_camera_device *icd,
struct device *dev = icd->parent;
struct v4l2_mbus_framefmt *mf = &format->format;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
- struct v4l2_cropcap cap;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
bool host_1to1;
int ret;
@@ -243,16 +258,14 @@ static int client_set_fmt(struct soc_camera_device *icd,
if (!host_can_scale)
goto update_cache;
- cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, cropcap, &cap);
+ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel);
if (ret < 0)
return ret;
- if (max_width > cap.bounds.width)
- max_width = cap.bounds.width;
- if (max_height > cap.bounds.height)
- max_height = cap.bounds.height;
+ if (max_width > sdsel.r.width)
+ max_width = sdsel.r.width;
+ if (max_height > sdsel.r.height)
+ max_height = sdsel.r.height;
/* Camera set a format, but geometry is not precise, try to improve */
tmp_w = mf->width;
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h
index 184a30dff541..9ca469312a1f 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.h
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.h
@@ -16,7 +16,7 @@
struct soc_camera_device;
-struct v4l2_crop;
+struct v4l2_selection;
struct v4l2_mbus_framefmt;
struct v4l2_pix_format;
struct v4l2_rect;
@@ -31,8 +31,8 @@ static inline unsigned int soc_camera_shift_scale(unsigned int size,
#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out)
int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect);
-int soc_camera_client_s_crop(struct v4l2_subdev *sd,
- struct v4l2_crop *crop, struct v4l2_crop *cam_crop,
+int soc_camera_client_s_selection(struct v4l2_subdev *sd,
+ struct v4l2_selection *sel, struct v4l2_selection *cam_sel,
struct v4l2_rect *target_rect, struct v4l2_rect *subrect);
int soc_camera_client_scale(struct soc_camera_device *icd,
struct v4l2_rect *rect, struct v4l2_rect *subrect,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 3b1ac687d0df..45f82b5ddd77 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -527,7 +527,7 @@ static void bdisp_stop_streaming(struct vb2_queue *q)
pm_runtime_put(ctx->bdisp_dev->dev);
}
-static struct vb2_ops bdisp_qops = {
+static const struct vb2_ops bdisp_qops = {
.queue_setup = bdisp_queue_setup,
.buf_prepare = bdisp_buf_prepare,
.buf_queue = bdisp_buf_queue,
diff --git a/drivers/media/platform/sti/hva/Makefile b/drivers/media/platform/sti/hva/Makefile
new file mode 100644
index 000000000000..ffb69cebaef3
--- /dev/null
+++ b/drivers/media/platform/sti/hva/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_STI_HVA) := st-hva.o
+st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
new file mode 100644
index 000000000000..8cc8467c0cd3
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MAX_SPS_PPS_SIZE 128
+
+#define BITSTREAM_OFFSET_MASK 0x7F
+
+/* video max size*/
+#define H264_MAX_SIZE_W 1920
+#define H264_MAX_SIZE_H 1920
+
+/* macroBlocs number (width & height) */
+#define MB_W(w) ((w + 0xF) / 0x10)
+#define MB_H(h) ((h + 0xF) / 0x10)
+
+/* formula to get temporal or spatial data size */
+#define DATA_SIZE(w, h) (MB_W(w) * MB_H(h) * 16)
+
+#define SEARCH_WINDOW_BUFFER_MAX_SIZE(w) ((4 * MB_W(w) + 42) * 256 * 3 / 2)
+#define CABAC_CONTEXT_BUFFER_MAX_SIZE(w) (MB_W(w) * 16)
+#define CTX_MB_BUFFER_MAX_SIZE(w) (MB_W(w) * 16 * 8)
+#define SLICE_HEADER_SIZE (4 * 16)
+#define BRC_DATA_SIZE (5 * 16)
+
+/* source buffer copy in YUV 420 MB-tiled format with size=16*256*3/2 */
+#define CURRENT_WINDOW_BUFFER_MAX_SIZE (16 * 256 * 3 / 2)
+
+/*
+ * 4 lines of pixels (in Luma, Chroma blue and Chroma red) of top MB
+ * for deblocking with size=4*16*MBx*2
+ */
+#define LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(w) (4 * 16 * MB_W(w) * 2)
+
+/* factor for bitrate and cpb buffer size max values if profile >= high */
+#define H264_FACTOR_HIGH 1200
+
+/* factor for bitrate and cpb buffer size max values if profile < high */
+#define H264_FACTOR_BASELINE 1000
+
+/* number of bytes for NALU_TYPE_FILLER_DATA header and footer */
+#define H264_FILLER_DATA_SIZE 6
+
+struct h264_profile {
+ enum v4l2_mpeg_video_h264_level level;
+ u32 max_mb_per_seconds;
+ u32 max_frame_size;
+ u32 max_bitrate;
+ u32 max_cpb_size;
+ u32 min_comp_ratio;
+};
+
+static const struct h264_profile h264_infos_list[] = {
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 1485, 99, 64, 175, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1B, 1485, 99, 128, 350, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_1, 3000, 396, 192, 500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_2, 6000, 396, 384, 1000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_3, 11880, 396, 768, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_0, 11880, 396, 2000, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_1, 19800, 792, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_2, 20250, 1620, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_0, 40500, 1620, 10000, 10000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_1, 108000, 3600, 14000, 14000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_2, 216000, 5120, 20000, 20000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 245760, 8192, 20000, 25000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_1, 245760, 8192, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_2, 522240, 8704, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 589824, 22080, 135000, 135000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 983040, 36864, 240000, 240000, 2}
+};
+
+enum hva_brc_type {
+ BRC_TYPE_NONE = 0,
+ BRC_TYPE_CBR = 1,
+ BRC_TYPE_VBR = 2,
+ BRC_TYPE_VBR_LOW_DELAY = 3
+};
+
+enum hva_entropy_coding_mode {
+ CAVLC = 0,
+ CABAC = 1
+};
+
+enum hva_picture_coding_type {
+ PICTURE_CODING_TYPE_I = 0,
+ PICTURE_CODING_TYPE_P = 1,
+ PICTURE_CODING_TYPE_B = 2
+};
+
+enum hva_h264_sampling_mode {
+ SAMPLING_MODE_NV12 = 0,
+ SAMPLING_MODE_UYVY = 1,
+ SAMPLING_MODE_RGB3 = 3,
+ SAMPLING_MODE_XRGB4 = 4,
+ SAMPLING_MODE_NV21 = 8,
+ SAMPLING_MODE_VYUY = 9,
+ SAMPLING_MODE_BGR3 = 11,
+ SAMPLING_MODE_XBGR4 = 12,
+ SAMPLING_MODE_RGBX4 = 20,
+ SAMPLING_MODE_BGRX4 = 28
+};
+
+enum hva_h264_nalu_type {
+ NALU_TYPE_UNKNOWN = 0,
+ NALU_TYPE_SLICE = 1,
+ NALU_TYPE_SLICE_DPA = 2,
+ NALU_TYPE_SLICE_DPB = 3,
+ NALU_TYPE_SLICE_DPC = 4,
+ NALU_TYPE_SLICE_IDR = 5,
+ NALU_TYPE_SEI = 6,
+ NALU_TYPE_SPS = 7,
+ NALU_TYPE_PPS = 8,
+ NALU_TYPE_AU_DELIMITER = 9,
+ NALU_TYPE_SEQ_END = 10,
+ NALU_TYPE_STREAM_END = 11,
+ NALU_TYPE_FILLER_DATA = 12,
+ NALU_TYPE_SPS_EXT = 13,
+ NALU_TYPE_PREFIX_UNIT = 14,
+ NALU_TYPE_SUBSET_SPS = 15,
+ NALU_TYPE_SLICE_AUX = 19,
+ NALU_TYPE_SLICE_EXT = 20
+};
+
+enum hva_h264_sei_payload_type {
+ SEI_BUFFERING_PERIOD = 0,
+ SEI_PICTURE_TIMING = 1,
+ SEI_STEREO_VIDEO_INFO = 21,
+ SEI_FRAME_PACKING_ARRANGEMENT = 45
+};
+
+/**
+ * stereo Video Info struct
+ */
+struct hva_h264_stereo_video_sei {
+ u8 field_views_flag;
+ u8 top_field_is_left_view_flag;
+ u8 current_frame_is_left_view_flag;
+ u8 next_frame_is_second_view_flag;
+ u8 left_view_self_contained_flag;
+ u8 right_view_self_contained_flag;
+};
+
+/**
+ * @frame_width: width in pixels of the buffer containing the input frame
+ * @frame_height: height in pixels of the buffer containing the input frame
+ * @frame_num: the parameter to be written in the slice header
+ * @picture_coding_type: type I, P or B
+ * @pic_order_cnt_type: POC mode, as defined in H264 std : can be 0,1,2
+ * @first_picture_in_sequence: flag telling to encoder that this is the
+ * first picture in a video sequence.
+ * Used for VBR
+ * @slice_size_type: 0 = no constraint to close the slice
+ * 1= a slice is closed as soon as the slice_mb_size limit
+ * is reached
+ * 2= a slice is closed as soon as the slice_byte_size limit
+ * is reached
+ * 3= a slice is closed as soon as either the slice_byte_size
+ * limit or the slice_mb_size limit is reached
+ * @slice_mb_size: defines the slice size in number of macroblocks
+ * (used when slice_size_type=1 or slice_size_type=3)
+ * @ir_param_option: defines the number of macroblocks per frame to be
+ * refreshed by AIR algorithm OR the refresh period
+ * by CIR algorithm
+ * @intra_refresh_type: enables the adaptive intra refresh algorithm.
+ * Disable=0 / Adaptative=1 and Cycle=2 as intra refresh
+ * @use_constrained_intra_flag: constrained_intra_pred_flag from PPS
+ * @transform_mode: controls the use of 4x4/8x8 transform mode
+ * @disable_deblocking_filter_idc:
+ * 0: specifies that all luma and chroma block edges of
+ * the slice are filtered.
+ * 1: specifies that deblocking is disabled for all block
+ * edges of the slice.
+ * 2: specifies that all luma and chroma block edges of
+ * the slice are filtered with exception of the block edges
+ * that coincide with slice boundaries
+ * @slice_alpha_c0_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @slice_beta_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @encoder_complexity: encoder complexity control (IME).
+ * 0 = I_16x16, P_16x16, Full ME Complexity
+ * 1 = I_16x16, I_NxN, P_16x16, Full ME Complexity
+ * 2 = I_16x16, I_NXN, P_16x16, P_WxH, Full ME Complexity
+ * 4 = I_16x16, P_16x16, Reduced ME Complexity
+ * 5 = I_16x16, I_NxN, P_16x16, Reduced ME Complexity
+ * 6 = I_16x16, I_NXN, P_16x16, P_WxH, Reduced ME Complexity
+ * @chroma_qp_index_offset: coming from picture parameter set
+ * (PPS see [H.264 STD] 7.4.2.2)
+ * @entropy_coding_mode: entropy coding mode.
+ * 0 = CAVLC
+ * 1 = CABAC
+ * @brc_type: selects the bit-rate control algorithm
+ * 0 = constant Qp, (no BRC)
+ * 1 = CBR
+ * 2 = VBR
+ * @quant: Quantization param used in case of fix QP encoding (no BRC)
+ * @non_VCL_NALU_Size: size of non-VCL NALUs (SPS, PPS, filler),
+ * used by BRC
+ * @cpb_buffer_size: size of Coded Picture Buffer, used by BRC
+ * @bit_rate: target bitrate, for BRC
+ * @qp_min: min QP threshold
+ * @qp_max: max QP threshold
+ * @framerate_num: target framerate numerator , used by BRC
+ * @framerate_den: target framerate denomurator , used by BRC
+ * @delay: End-to-End Initial Delay
+ * @strict_HRD_compliancy: flag for HDR compliancy (1)
+ * May impact quality encoding
+ * @addr_source_buffer: address of input frame buffer for current frame
+ * @addr_fwd_Ref_Buffer: address of reference frame buffer
+ * @addr_rec_buffer: address of reconstructed frame buffer
+ * @addr_output_bitstream_start: output bitstream start address
+ * @addr_output_bitstream_end: output bitstream end address
+ * @addr_external_sw : address of external search window
+ * @addr_lctx : address of context picture buffer
+ * @addr_local_rec_buffer: address of local reconstructed buffer
+ * @addr_spatial_context: address of spatial context buffer
+ * @bitstream_offset: offset in bits between aligned bitstream start
+ * address and first bit to be written by HVA.
+ * Range value is [0..63]
+ * @sampling_mode: Input picture format .
+ * 0: YUV420 semi_planar Interleaved
+ * 1: YUV422 raster Interleaved
+ * @addr_param_out: address of output parameters structure
+ * @addr_scaling_matrix: address to the coefficient of
+ * the inverse scaling matrix
+ * @addr_scaling_matrix_dir: address to the coefficient of
+ * the direct scaling matrix
+ * @addr_cabac_context_buffer: address of cabac context buffer
+ * @GmvX: Input information about the horizontal global displacement of
+ * the encoded frame versus the previous one
+ * @GmvY: Input information about the vertical global displacement of
+ * the encoded frame versus the previous one
+ * @window_width: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_height: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_horizontal_offset: horizontal offset in pels for input window
+ * within input frame
+ * @window_vertical_offset: vertical offset in pels for input window
+ * within input frame
+ * @addr_roi: Map of QP offset for the Region of Interest algorithm and
+ * also used for Error map.
+ * Bit 0-6 used for qp offset (value -64 to 63).
+ * Bit 7 used to force intra
+ * @addr_slice_header: address to slice header
+ * @slice_header_size_in_bits: size in bits of the Slice header
+ * @slice_header_offset0: Slice header offset where to insert
+ * first_Mb_in_slice
+ * @slice_header_offset1: Slice header offset where to insert
+ * slice_qp_delta
+ * @slice_header_offset2: Slice header offset where to insert
+ * num_MBs_in_slice
+ * @slice_synchro_enable: enable "slice ready" interrupt after each slice
+ * @max_slice_number: Maximum number of slice in a frame
+ * (0 is strictly forbidden)
+ * @rgb2_yuv_y_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_u_coeff: four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_v_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the U (Cb) component.
+ * U = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @slice_byte_size: maximum slice size in bytes
+ * (used when slice_size_type=2 or slice_size_type=3)
+ * @max_air_intra_mb_nb: Maximum number of intra macroblock in a frame
+ * for the AIR algorithm
+ * @brc_no_skip: Disable skipping in the Bitrate Controller
+ * @addr_brc_in_out_parameter: address of static buffer for BRC parameters
+ */
+struct hva_h264_td {
+ u16 frame_width;
+ u16 frame_height;
+ u32 frame_num;
+ u16 picture_coding_type;
+ u16 reserved1;
+ u16 pic_order_cnt_type;
+ u16 first_picture_in_sequence;
+ u16 slice_size_type;
+ u16 reserved2;
+ u32 slice_mb_size;
+ u16 ir_param_option;
+ u16 intra_refresh_type;
+ u16 use_constrained_intra_flag;
+ u16 transform_mode;
+ u16 disable_deblocking_filter_idc;
+ s16 slice_alpha_c0_offset_div2;
+ s16 slice_beta_offset_div2;
+ u16 encoder_complexity;
+ s16 chroma_qp_index_offset;
+ u16 entropy_coding_mode;
+ u16 brc_type;
+ u16 quant;
+ u32 non_vcl_nalu_size;
+ u32 cpb_buffer_size;
+ u32 bit_rate;
+ u16 qp_min;
+ u16 qp_max;
+ u16 framerate_num;
+ u16 framerate_den;
+ u16 delay;
+ u16 strict_hrd_compliancy;
+ u32 addr_source_buffer;
+ u32 addr_fwd_ref_buffer;
+ u32 addr_rec_buffer;
+ u32 addr_output_bitstream_start;
+ u32 addr_output_bitstream_end;
+ u32 addr_external_sw;
+ u32 addr_lctx;
+ u32 addr_local_rec_buffer;
+ u32 addr_spatial_context;
+ u16 bitstream_offset;
+ u16 sampling_mode;
+ u32 addr_param_out;
+ u32 addr_scaling_matrix;
+ u32 addr_scaling_matrix_dir;
+ u32 addr_cabac_context_buffer;
+ u32 reserved3;
+ u32 reserved4;
+ s16 gmv_x;
+ s16 gmv_y;
+ u16 window_width;
+ u16 window_height;
+ u16 window_horizontal_offset;
+ u16 window_vertical_offset;
+ u32 addr_roi;
+ u32 addr_slice_header;
+ u16 slice_header_size_in_bits;
+ u16 slice_header_offset0;
+ u16 slice_header_offset1;
+ u16 slice_header_offset2;
+ u32 reserved5;
+ u32 reserved6;
+ u16 reserved7;
+ u16 reserved8;
+ u16 slice_synchro_enable;
+ u16 max_slice_number;
+ u32 rgb2_yuv_y_coeff;
+ u32 rgb2_yuv_u_coeff;
+ u32 rgb2_yuv_v_coeff;
+ u32 slice_byte_size;
+ u16 max_air_intra_mb_nb;
+ u16 brc_no_skip;
+ u32 addr_temporal_context;
+ u32 addr_brc_in_out_parameter;
+};
+
+/**
+ * @ slice_size: slice size
+ * @ slice_start_time: start time
+ * @ slice_stop_time: stop time
+ * @ slice_num: slice number
+ */
+struct hva_h264_slice_po {
+ u32 slice_size;
+ u32 slice_start_time;
+ u32 slice_end_time;
+ u32 slice_num;
+};
+
+/**
+ * @ bitstream_size: bitstream size
+ * @ dct_bitstream_size: dtc bitstream size
+ * @ stuffing_bits: number of stuffing bits inserted by the encoder
+ * @ removal_time: removal time of current frame (nb of ticks 1/framerate)
+ * @ hvc_start_time: hvc start time
+ * @ hvc_stop_time: hvc stop time
+ * @ slice_count: slice count
+ */
+struct hva_h264_po {
+ u32 bitstream_size;
+ u32 dct_bitstream_size;
+ u32 stuffing_bits;
+ u32 removal_time;
+ u32 hvc_start_time;
+ u32 hvc_stop_time;
+ u32 slice_count;
+ u32 reserved0;
+ struct hva_h264_slice_po slice_params[16];
+};
+
+struct hva_h264_task {
+ struct hva_h264_td td;
+ struct hva_h264_po po;
+};
+
+/**
+ * @seq_info: sequence information buffer
+ * @ref_frame: reference frame buffer
+ * @rec_frame: reconstructed frame buffer
+ * @task: task descriptor
+ */
+struct hva_h264_ctx {
+ struct hva_buffer *seq_info;
+ struct hva_buffer *ref_frame;
+ struct hva_buffer *rec_frame;
+ struct hva_buffer *task;
+};
+
+static int hva_h264_fill_slice_header(struct hva_ctx *pctx,
+ u8 *slice_header_addr,
+ struct hva_controls *ctrls,
+ int frame_num,
+ u16 *header_size,
+ u16 *header_offset0,
+ u16 *header_offset1,
+ u16 *header_offset2)
+{
+ /*
+ * with this HVA hardware version, part of the slice header is computed
+ * on host and part by hardware.
+ * The part of host is precomputed and available through this array.
+ */
+ struct device *dev = ctx_to_dev(pctx);
+ int cabac = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC;
+ const unsigned char slice_header[] = { 0x00, 0x00, 0x00, 0x01,
+ 0x41, 0x34, 0x07, 0x00};
+ int idr_pic_id = frame_num % 2;
+ enum hva_picture_coding_type type;
+ u32 frame_order = frame_num % ctrls->gop_size;
+
+ if (!(frame_num % ctrls->gop_size))
+ type = PICTURE_CODING_TYPE_I;
+ else
+ type = PICTURE_CODING_TYPE_P;
+
+ memcpy(slice_header_addr, slice_header, sizeof(slice_header));
+
+ *header_size = 56;
+ *header_offset0 = 40;
+ *header_offset1 = 13;
+ *header_offset2 = 0;
+
+ if (type == PICTURE_CODING_TYPE_I) {
+ slice_header_addr[4] = 0x65;
+ slice_header_addr[5] = 0x11;
+
+ /* toggle the I frame */
+ if ((frame_num / ctrls->gop_size) % 2) {
+ *header_size += 4;
+ *header_offset1 += 4;
+ slice_header_addr[6] = 0x04;
+ slice_header_addr[7] = 0x70;
+
+ } else {
+ *header_size += 2;
+ *header_offset1 += 2;
+ slice_header_addr[6] = 0x09;
+ slice_header_addr[7] = 0xC0;
+ }
+ } else {
+ if (ctrls->entropy_mode == cabac) {
+ *header_size += 1;
+ *header_offset1 += 1;
+ slice_header_addr[7] = 0x80;
+ }
+ /*
+ * update slice header with P frame order
+ * frame order is limited to 16 (coded on 4bits only)
+ */
+ slice_header_addr[5] += ((frame_order & 0x0C) >> 2);
+ slice_header_addr[6] += ((frame_order & 0x03) << 6);
+ }
+
+ dev_dbg(dev,
+ "%s %s slice header order %d idrPicId %d header size %d\n",
+ pctx->name, __func__, frame_order, idr_pic_id, *header_size);
+ return 0;
+}
+
+static int hva_h264_fill_data_nal(struct hva_ctx *pctx,
+ unsigned int stuffing_bytes, u8 *addr,
+ unsigned int stream_size, unsigned int *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+
+ dev_dbg(dev, "%s %s stuffing bytes %d\n", pctx->name, __func__,
+ stuffing_bytes);
+
+ if ((*size + stuffing_bytes + H264_FILLER_DATA_SIZE) > stream_size) {
+ dev_dbg(dev, "%s %s too many stuffing bytes %d\n",
+ pctx->name, __func__, stuffing_bytes);
+ return 0;
+ }
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_FILLER_DATA;
+ *size += 1;
+
+ memset(addr + *size, 0xff, stuffing_bytes);
+ *size += stuffing_bytes;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+}
+
+static int hva_h264_fill_sei_nal(struct hva_ctx *pctx,
+ enum hva_h264_sei_payload_type type,
+ u8 *addr, u32 *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+ struct hva_h264_stereo_video_sei info;
+ u8 offset = 7;
+ u8 msg = 0;
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_SEI;
+ *size += 1;
+
+ /* payload type */
+ addr[*size] = type;
+ *size += 1;
+
+ switch (type) {
+ case SEI_STEREO_VIDEO_INFO:
+ memset(&info, 0, sizeof(info));
+
+ /* set to top/bottom frame packing arrangement */
+ info.field_views_flag = 1;
+ info.top_field_is_left_view_flag = 1;
+
+ /* payload size */
+ addr[*size] = 1;
+ *size += 1;
+
+ /* payload */
+ msg = info.field_views_flag << offset--;
+
+ if (info.field_views_flag) {
+ msg |= info.top_field_is_left_view_flag <<
+ offset--;
+ } else {
+ msg |= info.current_frame_is_left_view_flag <<
+ offset--;
+ msg |= info.next_frame_is_second_view_flag <<
+ offset--;
+ }
+ msg |= info.left_view_self_contained_flag << offset--;
+ msg |= info.right_view_self_contained_flag << offset--;
+
+ addr[*size] = msg;
+ *size += 1;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+ case SEI_BUFFERING_PERIOD:
+ case SEI_PICTURE_TIMING:
+ case SEI_FRAME_PACKING_ARRANGEMENT:
+ default:
+ dev_err(dev, "%s sei nal type not supported %d\n",
+ pctx->name, type);
+ return -EINVAL;
+ }
+}
+
+static int hva_h264_prepare_task(struct hva_ctx *pctx,
+ struct hva_h264_task *task,
+ struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_buffer *seq_info = ctx->seq_info;
+ struct hva_buffer *fwd_ref_frame = ctx->ref_frame;
+ struct hva_buffer *loc_rec_frame = ctx->rec_frame;
+ struct hva_h264_td *td = &task->td;
+ struct hva_controls *ctrls = &pctx->ctrls;
+ struct v4l2_fract *time_per_frame = &pctx->ctrls.time_per_frame;
+ int cavlc = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+ u32 frame_num = pctx->stream_num;
+ u32 addr_esram = hva->esram_addr;
+ enum v4l2_mpeg_video_h264_level level;
+ dma_addr_t paddr = 0;
+ u8 *slice_header_vaddr;
+ u32 frame_width = frame->info.aligned_width;
+ u32 frame_height = frame->info.aligned_height;
+ u32 max_cpb_buffer_size;
+ unsigned int payload = stream->bytesused;
+ u32 max_bitrate;
+
+ /* check width and height parameters */
+ if ((frame_width > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H)) ||
+ (frame_height > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H))) {
+ dev_err(dev,
+ "%s width(%d) or height(%d) exceeds limits (%dx%d)\n",
+ pctx->name, frame_width, frame_height,
+ H264_MAX_SIZE_W, H264_MAX_SIZE_H);
+ return -EINVAL;
+ }
+
+ level = ctrls->level;
+
+ memset(td, 0, sizeof(struct hva_h264_td));
+
+ td->frame_width = frame_width;
+ td->frame_height = frame_height;
+
+ /* set frame alignement */
+ td->window_width = frame_width;
+ td->window_height = frame_height;
+ td->window_horizontal_offset = 0;
+ td->window_vertical_offset = 0;
+
+ td->first_picture_in_sequence = (!frame_num) ? 1 : 0;
+
+ /* pic_order_cnt_type hard coded to '2' as only I & P frames */
+ td->pic_order_cnt_type = 2;
+
+ /* useConstrainedIntraFlag set to false for better coding efficiency */
+ td->use_constrained_intra_flag = false;
+ td->brc_type = (ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ ? BRC_TYPE_CBR : BRC_TYPE_VBR;
+
+ td->entropy_coding_mode = (ctrls->entropy_mode == cavlc) ? CAVLC :
+ CABAC;
+
+ td->bit_rate = ctrls->bitrate;
+
+ /* set framerate, framerate = 1 n/ time per frame */
+ if (time_per_frame->numerator >= 536) {
+ /*
+ * due to a hardware bug, framerate denominator can't exceed
+ * 536 (BRC overflow). Compute nearest framerate
+ */
+ td->framerate_den = 1;
+ td->framerate_num = (time_per_frame->denominator +
+ (time_per_frame->numerator >> 1) - 1) /
+ time_per_frame->numerator;
+
+ /*
+ * update bitrate to introduce a correction due to
+ * the new framerate
+ * new bitrate = (old bitrate * new framerate) / old framerate
+ */
+ td->bit_rate /= time_per_frame->numerator;
+ td->bit_rate *= time_per_frame->denominator;
+ td->bit_rate /= td->framerate_num;
+ } else {
+ td->framerate_den = time_per_frame->numerator;
+ td->framerate_num = time_per_frame->denominator;
+ }
+
+ /* compute maximum bitrate depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_HIGH;
+ else
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_BASELINE;
+
+ /* check if bitrate doesn't exceed max size */
+ if (td->bit_rate > max_bitrate) {
+ dev_dbg(dev,
+ "%s bitrate (%d) larger than level and profile allow, clip to %d\n",
+ pctx->name, td->bit_rate, max_bitrate);
+ td->bit_rate = max_bitrate;
+ }
+
+ /* convert cpb_buffer_size in bits */
+ td->cpb_buffer_size = ctrls->cpb_size * 8000;
+
+ /* compute maximum cpb buffer size depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_HIGH;
+ else
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_BASELINE;
+
+ /* check if cpb buffer size doesn't exceed max size */
+ if (td->cpb_buffer_size > max_cpb_buffer_size) {
+ dev_dbg(dev,
+ "%s cpb size larger than level %d allows, clip to %d\n",
+ pctx->name, td->cpb_buffer_size, max_cpb_buffer_size);
+ td->cpb_buffer_size = max_cpb_buffer_size;
+ }
+
+ /* enable skipping in the Bitrate Controller */
+ td->brc_no_skip = 0;
+
+ /* initial delay */
+ if ((ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) &&
+ td->bit_rate)
+ td->delay = 1000 * (td->cpb_buffer_size / td->bit_rate);
+ else
+ td->delay = 0;
+
+ switch (frame->info.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ td->sampling_mode = SAMPLING_MODE_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ td->sampling_mode = SAMPLING_MODE_NV21;
+ break;
+ default:
+ dev_err(dev, "%s invalid source pixel format\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ /*
+ * fill matrix color converter (RGB to YUV)
+ * Y = 0,299 R + 0,587 G + 0,114 B
+ * Cb = -0,1687 R -0,3313 G + 0,5 B + 128
+ * Cr = 0,5 R - 0,4187 G - 0,0813 B + 128
+ */
+ td->rgb2_yuv_y_coeff = 0x12031008;
+ td->rgb2_yuv_u_coeff = 0x800EF7FB;
+ td->rgb2_yuv_v_coeff = 0x80FEF40E;
+
+ /* enable/disable transform mode */
+ td->transform_mode = ctrls->dct8x8;
+
+ /* encoder complexity fix to 2, ENCODE_I_16x16_I_NxN_P_16x16_P_WxH */
+ td->encoder_complexity = 2;
+
+ /* quant fix to 28, default VBR value */
+ td->quant = 28;
+
+ if (td->framerate_den == 0) {
+ dev_err(dev, "%s invalid framerate\n", pctx->name);
+ return -EINVAL;
+ }
+
+ /* if automatic framerate, deactivate bitrate controller */
+ if (td->framerate_num == 0)
+ td->brc_type = 0;
+
+ /* compliancy fix to true */
+ td->strict_hrd_compliancy = 1;
+
+ /* set minimum & maximum quantizers */
+ td->qp_min = clamp_val(ctrls->qpmin, 0, 51);
+ td->qp_max = clamp_val(ctrls->qpmax, 0, 51);
+
+ td->addr_source_buffer = frame->paddr;
+ td->addr_fwd_ref_buffer = fwd_ref_frame->paddr;
+ td->addr_rec_buffer = loc_rec_frame->paddr;
+
+ td->addr_output_bitstream_end = (u32)stream->paddr + stream->size;
+
+ td->addr_output_bitstream_start = (u32)stream->paddr;
+ td->bitstream_offset = (((u32)stream->paddr & 0xF) << 3) &
+ BITSTREAM_OFFSET_MASK;
+
+ td->addr_param_out = (u32)ctx->task->paddr +
+ offsetof(struct hva_h264_task, po);
+
+ /* swap spatial and temporal context */
+ if (frame_num % 2) {
+ paddr = seq_info->paddr;
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ } else {
+ paddr = seq_info->paddr;
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ }
+
+ paddr = seq_info->paddr + 2 * DATA_SIZE(frame_width, frame_height);
+
+ td->addr_brc_in_out_parameter = ALIGN(paddr, 0x100);
+
+ paddr = td->addr_brc_in_out_parameter + BRC_DATA_SIZE;
+ td->addr_slice_header = ALIGN(paddr, 0x100);
+ td->addr_external_sw = ALIGN(addr_esram, 0x100);
+
+ addr_esram += SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width);
+ td->addr_local_rec_buffer = ALIGN(addr_esram, 0x100);
+
+ addr_esram += LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width);
+ td->addr_lctx = ALIGN(addr_esram, 0x100);
+
+ addr_esram += CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height));
+ td->addr_cabac_context_buffer = ALIGN(addr_esram, 0x100);
+
+ if (!(frame_num % ctrls->gop_size)) {
+ td->picture_coding_type = PICTURE_CODING_TYPE_I;
+ stream->vbuf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ td->picture_coding_type = PICTURE_CODING_TYPE_P;
+ stream->vbuf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ /* fill the slice header part */
+ slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header -
+ seq_info->paddr);
+
+ hva_h264_fill_slice_header(pctx, slice_header_vaddr, ctrls, frame_num,
+ &td->slice_header_size_in_bits,
+ &td->slice_header_offset0,
+ &td->slice_header_offset1,
+ &td->slice_header_offset2);
+
+ td->chroma_qp_index_offset = 2;
+ td->slice_synchro_enable = 0;
+ td->max_slice_number = 1;
+
+ /*
+ * check the sps/pps header size for key frame only
+ * sps/pps header was previously fill by libv4l
+ * during qbuf of stream buffer
+ */
+ if ((stream->vbuf.flags == V4L2_BUF_FLAG_KEYFRAME) &&
+ (payload > MAX_SPS_PPS_SIZE)) {
+ dev_err(dev, "%s invalid sps/pps size %d\n", pctx->name,
+ payload);
+ return -EINVAL;
+ }
+
+ if (stream->vbuf.flags != V4L2_BUF_FLAG_KEYFRAME)
+ payload = 0;
+
+ /* add SEI nal (video stereo info) */
+ if (ctrls->sei_fp && hva_h264_fill_sei_nal(pctx, SEI_STEREO_VIDEO_INFO,
+ (u8 *)stream->vaddr,
+ &payload)) {
+ dev_err(dev, "%s fail to get SEI nal\n", pctx->name);
+ return -EINVAL;
+ }
+
+ /* fill size of non-VCL NAL units (SPS, PPS, filler and SEI) */
+ td->non_vcl_nalu_size = payload * 8;
+
+ /* compute bitstream offset & new start address of bitstream */
+ td->addr_output_bitstream_start += ((payload >> 4) << 4);
+ td->bitstream_offset += (payload - ((payload >> 4) << 4)) * 8;
+
+ stream->bytesused = payload;
+
+ return 0;
+}
+
+static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->bitstream_size;
+}
+
+static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->stuffing_bits >> 3;
+}
+
+static int hva_h264_open(struct hva_ctx *pctx)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx;
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ u32 frame_width = pctx->frameinfo.aligned_width;
+ u32 frame_height = pctx->frameinfo.aligned_height;
+ u32 size;
+ int ret;
+
+ /* check esram size necessary to encode a frame */
+ size = SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width) +
+ LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width) +
+ CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height)) +
+ CABAC_CONTEXT_BUFFER_MAX_SIZE(frame_width);
+
+ if (hva->esram_size < size) {
+ dev_err(dev, "%s not enough esram (max:%d request:%d)\n",
+ pctx->name, hva->esram_size, size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* allocate context for codec */
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* allocate sequence info buffer */
+ ret = hva_mem_alloc(pctx,
+ 2 * DATA_SIZE(frame_width, frame_height) +
+ SLICE_HEADER_SIZE +
+ BRC_DATA_SIZE,
+ "hva sequence info",
+ &ctx->seq_info);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate sequence info buffer\n",
+ pctx->name);
+ goto err_ctx;
+ }
+
+ /* allocate reference frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reference frame",
+ &ctx->ref_frame);
+ if (ret) {
+ dev_err(dev, "%s failed to allocate reference frame buffer\n",
+ pctx->name);
+ goto err_seq_info;
+ }
+
+ /* allocate reconstructed frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reconstructed frame",
+ &ctx->rec_frame);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate reconstructed frame buffer\n",
+ pctx->name);
+ goto err_ref_frame;
+ }
+
+ /* allocate task descriptor */
+ ret = hva_mem_alloc(pctx,
+ sizeof(struct hva_h264_task),
+ "hva task descriptor",
+ &ctx->task);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate task descriptor\n",
+ pctx->name);
+ goto err_rec_frame;
+ }
+
+ pctx->priv = (void *)ctx;
+
+ return 0;
+
+err_rec_frame:
+ hva_mem_free(pctx, ctx->rec_frame);
+err_ref_frame:
+ hva_mem_free(pctx, ctx->ref_frame);
+err_seq_info:
+ hva_mem_free(pctx, ctx->seq_info);
+err_ctx:
+ devm_kfree(dev, ctx);
+err:
+ return ret;
+}
+
+static int hva_h264_close(struct hva_ctx *pctx)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct device *dev = ctx_to_dev(pctx);
+
+ if (ctx->seq_info)
+ hva_mem_free(pctx, ctx->seq_info);
+
+ if (ctx->ref_frame)
+ hva_mem_free(pctx, ctx->ref_frame);
+
+ if (ctx->rec_frame)
+ hva_mem_free(pctx, ctx->rec_frame);
+
+ if (ctx->task)
+ hva_mem_free(pctx, ctx->task);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
+ struct hva_buffer *tmp_frame;
+ u32 stuffing_bytes = 0;
+ int ret = 0;
+
+ ret = hva_h264_prepare_task(pctx, task, frame, stream);
+ if (ret)
+ goto err;
+
+ ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task);
+ if (ret)
+ goto err;
+
+ pctx->stream_num++;
+ stream->bytesused += hva_h264_get_stream_size(task);
+
+ stuffing_bytes = hva_h264_get_stuffing_bytes(task);
+
+ if (stuffing_bytes)
+ hva_h264_fill_data_nal(pctx, stuffing_bytes,
+ (u8 *)stream->vaddr,
+ stream->size,
+ &stream->bytesused);
+
+ /* switch reference & reconstructed frame */
+ tmp_frame = ctx->ref_frame;
+ ctx->ref_frame = ctx->rec_frame;
+ ctx->rec_frame = tmp_frame;
+
+ return 0;
+err:
+ stream->bytesused = 0;
+ return ret;
+}
+
+const struct hva_enc nv12h264enc = {
+ .name = "H264(NV12)",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
+
+const struct hva_enc nv21h264enc = {
+ .name = "H264(NV21)",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
new file mode 100644
index 000000000000..d341d4994528
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+/* HVA register offsets */
+#define HVA_HIF_REG_RST 0x0100U
+#define HVA_HIF_REG_RST_ACK 0x0104U
+#define HVA_HIF_REG_MIF_CFG 0x0108U
+#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
+#define HVA_HIF_REG_CFL 0x0110U
+#define HVA_HIF_FIFO_CMD 0x0114U
+#define HVA_HIF_FIFO_STS 0x0118U
+#define HVA_HIF_REG_SFL 0x011CU
+#define HVA_HIF_REG_IT_ACK 0x0120U
+#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
+#define HVA_HIF_REG_LMI_ERR 0x0128U
+#define HVA_HIF_REG_EMI_ERR 0x012CU
+#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
+#define HVA_HIF_REG_HEC_STS 0x0134U
+#define HVA_HIF_REG_HVC_STS 0x0138U
+#define HVA_HIF_REG_HJE_STS 0x013CU
+#define HVA_HIF_REG_CNT 0x0140U
+#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
+#define HVA_HIF_REG_CLK_GATING 0x0148U
+#define HVA_HIF_REG_VERSION 0x014CU
+#define HVA_HIF_REG_BSM 0x0150U
+
+/* define value for version id register (HVA_HIF_REG_VERSION) */
+#define VERSION_ID_MASK 0x0000FFFF
+
+/* define values for BSM register (HVA_HIF_REG_BSM) */
+#define BSM_CFG_VAL1 0x0003F000
+#define BSM_CFG_VAL2 0x003F0000
+
+/* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define MIF_CFG_VAL1 0x04460446
+#define MIF_CFG_VAL2 0x04460806
+#define MIF_CFG_VAL3 0x00000000
+
+/* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define HEC_MIF_CFG_VAL 0x000000C4
+
+/* Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
+#define CLK_GATING_HVC BIT(0)
+#define CLK_GATING_HEC BIT(1)
+#define CLK_GATING_HJE BIT(2)
+
+/* fix hva clock rate */
+#define CLK_RATE 300000000
+
+/* fix delay for pmruntime */
+#define AUTOSUSPEND_DELAY_MS 3
+
+/*
+ * hw encode error values
+ * NO_ERROR: Success, Task OK
+ * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
+ * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
+ * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
+ * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
+ * H264_SLICE_READY: VECH264 Slice ready
+ * TASK_LIST_FULL: HVA/FPC task list full
+ (discard latest transform command)
+ * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
+ * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
+ * NO_INT_COMPLETION: Time-out on interrupt completion
+ * LMI_ERR: Local Memory Interface Error
+ * EMI_ERR: External Memory Interface Error
+ * HECMI_ERR: HEC Memory Interface Error
+ */
+enum hva_hw_error {
+ NO_ERROR = 0x0,
+ H264_BITSTREAM_OVERSIZE = 0x2,
+ H264_FRAME_SKIPPED = 0x4,
+ H264_SLICE_LIMIT_SIZE = 0x5,
+ H264_MAX_SLICE_NUMBER = 0x7,
+ H264_SLICE_READY = 0x8,
+ TASK_LIST_FULL = 0xF0,
+ UNKNOWN_COMMAND = 0xF1,
+ WRONG_CODEC_OR_RESOLUTION = 0xF4,
+ NO_INT_COMPLETION = 0x100,
+ LMI_ERR = 0x101,
+ EMI_ERR = 0x102,
+ HECMI_ERR = 0x103,
+};
+
+static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u32 status = hva->sts_reg & 0xFF;
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx = NULL;
+
+ dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s %s: bad context identifier: %d\n",
+ ctx->name, __func__, ctx_id);
+ ctx->hw_err = true;
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ switch (status) {
+ case NO_ERROR:
+ dev_dbg(dev, "%s %s: no error\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_SLICE_READY:
+ dev_dbg(dev, "%s %s: h264 slice ready\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_FRAME_SKIPPED:
+ dev_dbg(dev, "%s %s: h264 frame skipped\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_BITSTREAM_OVERSIZE:
+ dev_err(dev, "%s %s:h264 bitstream oversize\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_SLICE_LIMIT_SIZE:
+ dev_err(dev, "%s %s: h264 slice limit size is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_MAX_SLICE_NUMBER:
+ dev_err(dev, "%s %s: h264 max slice number is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case TASK_LIST_FULL:
+ dev_err(dev, "%s %s:task list full\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case UNKNOWN_COMMAND:
+ dev_err(dev, "%s %s: command not known\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case WRONG_CODEC_OR_RESOLUTION:
+ dev_err(dev, "%s %s: wrong codec or resolution\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ default:
+ dev_err(dev, "%s %s: status not recognized\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* read error registers */
+ hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
+ hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
+ hva->hec_mif_err_reg = readl_relaxed(hva->regs +
+ HVA_HIF_REG_HEC_MIF_ERR);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx;
+
+ dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
+ ctx_id);
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ if (hva->lmi_err_reg) {
+ dev_err(dev, "%s local memory interface error: 0x%08x\n",
+ ctx->name, hva->lmi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->lmi_err_reg) {
+ dev_err(dev, "%s external memory interface error: 0x%08x\n",
+ ctx->name, hva->emi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->hec_mif_err_reg) {
+ dev_err(dev, "%s hec memory interface error: 0x%08x\n",
+ ctx->name, hva->hec_mif_err_reg);
+ ctx->hw_err = true;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned long int version;
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
+ mutex_unlock(&hva->protect_mutex);
+ return -EFAULT;
+ }
+
+ version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
+ VERSION_ID_MASK;
+
+ pm_runtime_put_autosuspend(dev);
+
+ switch (version) {
+ case HVA_VERSION_V400:
+ dev_dbg(dev, "%s IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ break;
+ default:
+ dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ version = HVA_VERSION_UNKNOWN;
+ break;
+ }
+
+ return version;
+}
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *regs;
+ struct resource *esram;
+ int ret;
+
+ WARN_ON(!hva);
+
+ /* get memory for registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hva->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR_OR_NULL(hva->regs)) {
+ dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
+ return PTR_ERR(hva->regs);
+ }
+
+ /* get memory for esram */
+ esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (IS_ERR_OR_NULL(esram)) {
+ dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
+ return PTR_ERR(esram);
+ }
+ hva->esram_addr = esram->start;
+ hva->esram_size = resource_size(esram);
+
+ dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
+ HVA_PREFIX, hva->esram_addr, hva->esram_size);
+
+ /* get clock resource */
+ hva->clk = devm_clk_get(dev, "clk_hva");
+ if (IS_ERR(hva->clk)) {
+ dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
+ return PTR_ERR(hva->clk);
+ }
+
+ ret = clk_prepare(hva->clk);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
+ hva->clk = ERR_PTR(-EINVAL);
+ return ret;
+ }
+
+ /* get status interruption resource */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to get status IRQ\n", HVA_PREFIX);
+ goto err_clk;
+ }
+ hva->irq_its = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
+ hva_hw_its_irq_thread,
+ IRQF_ONESHOT,
+ "hva_its_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install status IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_its);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_its);
+
+ /* get error interruption resource */
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to get error IRQ\n", HVA_PREFIX);
+ goto err_clk;
+ }
+ hva->irq_err = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
+ hva_hw_err_irq_thread,
+ IRQF_ONESHOT,
+ "hva_err_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install error IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_err);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_err);
+
+ /* initialise protection mutex */
+ mutex_init(&hva->protect_mutex);
+
+ /* initialise completion signal */
+ init_completion(&hva->interrupt);
+
+ /* initialise runtime power management */
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
+ goto err_clk;
+ }
+
+ /* check IP hardware version */
+ hva->ip_version = hva_hw_get_ip_version(hva);
+
+ if (hva->ip_version == HVA_VERSION_UNKNOWN) {
+ ret = -EINVAL;
+ goto err_pm;
+ }
+
+ dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
+ hva->ip_version);
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(dev);
+err_clk:
+ if (hva->clk)
+ clk_unprepare(hva->clk);
+
+ return ret;
+}
+
+void hva_hw_remove(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ pm_runtime_put_autosuspend(dev);
+ pm_runtime_disable(dev);
+}
+
+int hva_hw_runtime_suspend(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hva->clk);
+
+ return 0;
+}
+
+int hva_hw_runtime_resume(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ if (clk_prepare_enable(hva->clk)) {
+ dev_err(hva->dev, "%s failed to prepare hva clk\n",
+ HVA_PREFIX);
+ return -EINVAL;
+ }
+
+ if (clk_set_rate(hva->clk, CLK_RATE)) {
+ dev_err(dev, "%s failed to set clock frequency\n",
+ HVA_PREFIX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = hva_to_dev(hva);
+ u8 client_id = ctx->id;
+ int ret;
+ u32 reg = 0;
+
+ mutex_lock(&hva->protect_mutex);
+
+ /* enable irqs */
+ enable_irq(hva->irq_its);
+ enable_irq(hva->irq_err);
+
+ if (pm_runtime_get_sync(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
+ switch (cmd) {
+ case H264_ENC:
+ reg |= CLK_GATING_HVC;
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ ret = -EFAULT;
+ goto out;
+ }
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+
+ dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
+ __func__);
+
+ /* byte swap config */
+ writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
+
+ /* define Max Opcode Size and Max Message Size for LMI and EMI */
+ writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
+ writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
+
+ /*
+ * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
+ * the context identifier is provided as client identifier to the
+ * hardware, and is retrieved in the interrupt functions from the
+ * status register
+ */
+ dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
+ ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
+ writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
+ writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
+
+ if (!wait_for_completion_timeout(&hva->interrupt,
+ msecs_to_jiffies(2000))) {
+ dev_err(dev, "%s %s: time out on completion\n", ctx->name,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* get encoding status */
+ ret = ctx->hw_err ? -EFAULT : 0;
+
+out:
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ switch (cmd) {
+ case H264_ENC:
+ reg &= ~CLK_GATING_HVC;
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ }
+
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+
+ return ret;
+}
diff --git a/drivers/media/platform/sti/hva/hva-hw.h b/drivers/media/platform/sti/hva/hva-hw.h
new file mode 100644
index 000000000000..efb45b927524
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-hw.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_HW_H
+#define HVA_HW_H
+
+#include "hva-mem.h"
+
+/* HVA Versions */
+#define HVA_VERSION_UNKNOWN 0x000
+#define HVA_VERSION_V400 0x400
+
+/* HVA command types */
+enum hva_hw_cmd_type {
+ /* RESERVED = 0x00 */
+ /* RESERVED = 0x01 */
+ H264_ENC = 0x02,
+ /* RESERVED = 0x03 */
+ /* RESERVED = 0x04 */
+ /* RESERVED = 0x05 */
+ /* RESERVED = 0x06 */
+ /* RESERVED = 0x07 */
+ REMOVE_CLIENT = 0x08,
+ FREEZE_CLIENT = 0x09,
+ START_CLIENT = 0x0A,
+ FREEZE_ALL = 0x0B,
+ START_ALL = 0x0C,
+ REMOVE_ALL = 0x0D
+};
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva);
+void hva_hw_remove(struct hva_dev *hva);
+int hva_hw_runtime_suspend(struct device *dev);
+int hva_hw_runtime_resume(struct device *dev);
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task);
+
+#endif /* HVA_HW_H */
diff --git a/drivers/media/platform/sti/hva/hva-mem.c b/drivers/media/platform/sti/hva/hva-mem.c
new file mode 100644
index 000000000000..649f66007ad6
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "hva.h"
+#include "hva-mem.h"
+
+int hva_mem_alloc(struct hva_ctx *ctx, u32 size, const char *name,
+ struct hva_buffer **buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_buffer *b;
+ dma_addr_t paddr;
+ void *base;
+
+ b = devm_kzalloc(dev, sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ DMA_ATTR_WRITE_COMBINE);
+ if (!base) {
+ dev_err(dev, "%s %s : dma_alloc_attrs failed for %s (size=%d)\n",
+ ctx->name, __func__, name, size);
+ devm_kfree(dev, b);
+ return -ENOMEM;
+ }
+
+ b->size = size;
+ b->paddr = paddr;
+ b->vaddr = base;
+ b->name = name;
+
+ dev_dbg(dev,
+ "%s allocate %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, size, b->vaddr, &b->paddr, b->name);
+
+ /* return hva buffer to user */
+ *buf = b;
+
+ return 0;
+}
+
+void hva_mem_free(struct hva_ctx *ctx, struct hva_buffer *buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev,
+ "%s free %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr,
+ DMA_ATTR_WRITE_COMBINE);
+
+ devm_kfree(dev, buf);
+}
diff --git a/drivers/media/platform/sti/hva/hva-mem.h b/drivers/media/platform/sti/hva/hva-mem.h
new file mode 100644
index 000000000000..a95c728a45e6
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-mem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_MEM_H
+#define HVA_MEM_H
+
+/**
+ * struct hva_buffer - hva buffer
+ *
+ * @name: name of requester
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @size: size of buffer
+ */
+struct hva_buffer {
+ const char *name;
+ dma_addr_t paddr;
+ void *vaddr;
+ u32 size;
+};
+
+int hva_mem_alloc(struct hva_ctx *ctx,
+ __u32 size,
+ const char *name,
+ struct hva_buffer **buf);
+
+void hva_mem_free(struct hva_ctx *ctx,
+ struct hva_buffer *buf);
+
+#endif /* HVA_MEM_H */
diff --git a/drivers/media/platform/sti/hva/hva-v4l2.c b/drivers/media/platform/sti/hva/hva-v4l2.c
new file mode 100644
index 000000000000..6bf3c8588230
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva-v4l2.c
@@ -0,0 +1,1415 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define HVA_NAME "st-hva"
+
+#define MIN_FRAMES 1
+#define MIN_STREAMS 1
+
+#define HVA_MIN_WIDTH 32
+#define HVA_MAX_WIDTH 1920
+#define HVA_MIN_HEIGHT 32
+#define HVA_MAX_HEIGHT 1920
+
+/* HVA requires a 16x16 pixels alignment for frames */
+#define HVA_WIDTH_ALIGNMENT 16
+#define HVA_HEIGHT_ALIGNMENT 16
+
+#define HVA_DEFAULT_WIDTH HVA_MIN_WIDTH
+#define HVA_DEFAULT_HEIGHT HVA_MIN_HEIGHT
+#define HVA_DEFAULT_FRAME_NUM 1
+#define HVA_DEFAULT_FRAME_DEN 30
+
+#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
+ "frame" : "stream")
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+/* registry of available encoders */
+static const struct hva_enc *hva_encoders[] = {
+ &nv12h264enc,
+ &nv21h264enc,
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_stream_size(u32 w, u32 h)
+{
+ /*
+ * HVA only encodes in YUV420 format, whatever the frame format.
+ * A compression ratio of 2 is assumed: thus, the maximum size
+ * of a stream is estimated to ((width x height x 3 / 2) / 2)
+ */
+ return (w * h * 3) / 4;
+}
+
+static void set_default_params(struct hva_ctx *ctx)
+{
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = HVA_DEFAULT_WIDTH;
+ frameinfo->height = HVA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ HVA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ HVA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+
+ streaminfo->streamformat = V4L2_PIX_FMT_H264;
+ streaminfo->width = HVA_DEFAULT_WIDTH;
+ streaminfo->height = HVA_DEFAULT_HEIGHT;
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_stream_size = estimated_stream_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct hva_enc *hva_find_encoder(struct hva_ctx *ctx,
+ u32 pixelformat,
+ u32 streamformat)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ const struct hva_enc *enc;
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ enc = hva->encoders[i];
+ if ((enc->pixelformat == pixelformat) &&
+ (enc->streamformat == streamformat))
+ return enc;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+ bool found = false;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct hva_dev *hva)
+{
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ register_format(hva->encoders[i]->pixelformat,
+ hva->pixelformats,
+ &hva->nb_of_pixelformats);
+
+ register_format(hva->encoders[i]->streamformat,
+ hva->streamformats,
+ &hva->nb_of_streamformats);
+ }
+}
+
+static void register_encoders(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hva_encoders); i++) {
+ if (hva->nb_of_encoders >= HVA_MAX_ENCODERS) {
+ dev_dbg(dev,
+ "%s failed to register %s encoder (%d maximum reached)\n",
+ HVA_PREFIX, hva_encoders[i]->name,
+ HVA_MAX_ENCODERS);
+ return;
+ }
+
+ hva->encoders[hva->nb_of_encoders++] = hva_encoders[i];
+ dev_info(dev, "%s %s encoder registered\n", HVA_PREFIX,
+ hva_encoders[i]->name);
+ }
+}
+
+static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
+ u32 pixelformat, struct hva_enc **penc)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_enc *enc;
+ int ret;
+
+ /* find an encoder which can deal with these formats */
+ enc = (struct hva_enc *)hva_find_encoder(ctx, pixelformat,
+ streamformat);
+ if (!enc) {
+ dev_err(dev, "%s no encoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s one encoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ hva->instance_id, (char *)&streamformat);
+
+ /* open encoder instance */
+ ret = enc->open(ctx);
+ if (ret) {
+ dev_err(dev, "%s failed to open encoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s %s encoder opened\n", ctx->name, enc->name);
+
+ *penc = enc;
+
+ return ret;
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int hva_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ strlcpy(cap->driver, HVA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, hva->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ hva->pdev->name);
+
+ return 0;
+}
+
+static int hva_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->streamformats[f->index];
+
+ return 0;
+}
+
+static int hva_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->pixelformats[f->index];
+
+ return 0;
+}
+
+static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ f->fmt.pix.width = streaminfo->width;
+ f->fmt.pix.height = streaminfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = streaminfo->streamformat;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = ctx->max_stream_size;
+
+ return 0;
+}
+
+static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+
+ f->fmt.pix.width = frameinfo->width;
+ f->fmt.pix.height = frameinfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = frameinfo->pixelformat;
+ f->fmt.pix.bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ f->fmt.pix.sizeimage = frameinfo->size;
+
+ return 0;
+}
+
+static int hva_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+ u32 stream_size;
+
+ enc = hva_find_encoder(ctx, ctx->frameinfo.pixelformat, streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ width = pix->width;
+ height = pix->height;
+ if (ctx->flags & HVA_FLAG_FRAMEINFO) {
+ /*
+ * if the frame resolution is already fixed, only allow the
+ * same stream resolution
+ */
+ pix->width = ctx->frameinfo.width;
+ pix->height = ctx->frameinfo.height;
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit frame resolution\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ } else {
+ /* adjust width & height */
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, enc->max_width,
+ 0,
+ &pix->height,
+ HVA_MIN_HEIGHT, enc->max_height,
+ 0,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ }
+
+ stream_size = estimated_stream_size(pix->width, pix->height);
+ if (pix->sizeimage < stream_size)
+ pix->sizeimage = stream_size;
+
+ pix->bytesperline = 0;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+
+ enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, HVA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ HVA_MIN_HEIGHT, HVA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->bytesperline = frame_stride(width, pixelformat);
+ pix->sizeimage = frame_size(width, height, pixelformat);
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&f->fmt.pix.pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_stream_size = f->fmt.pix.sizeimage;
+ ctx->streaminfo.width = f->fmt.pix.width;
+ ctx->streaminfo.height = f->fmt.pix.height;
+ ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
+ ctx->flags |= HVA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+
+ ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ ctx->frameinfo.aligned_height = ALIGN(pix->height,
+ HVA_HEIGHT_ALIGNMENT);
+ ctx->frameinfo.size = pix->sizeimage;
+ ctx->frameinfo.pixelformat = pix->pixelformat;
+ ctx->frameinfo.width = pix->width;
+ ctx->frameinfo.height = pix->height;
+ ctx->flags |= HVA_FLAG_FRAMEINFO;
+
+ return 0;
+}
+
+static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ sp->parm.output.timeperframe.numerator = time_per_frame->numerator;
+ sp->parm.output.timeperframe.denominator =
+ time_per_frame->denominator;
+
+ return 0;
+}
+
+static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!sp->parm.output.timeperframe.numerator ||
+ !sp->parm.output.timeperframe.denominator)
+ return hva_g_parm(file, fh, sp);
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ time_per_frame->numerator = sp->parm.output.timeperframe.numerator;
+ time_per_frame->denominator =
+ sp->parm.output.timeperframe.denominator;
+
+ return 0;
+}
+
+static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ /*
+ * depending on the targeted compressed video format, the
+ * capture buffer might contain headers (e.g. H.264 SPS/PPS)
+ * filled in by the driver client; the size of these data is
+ * copied from the bytesused field of the V4L2 buffer in the
+ * payload field of the hva stream buffer
+ */
+ struct vb2_queue *vq;
+ struct hva_stream *stream;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
+
+ if (buf->index >= vq->num_buffers) {
+ dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
+ ctx->name, buf->index, vq->num_buffers);
+ return -EINVAL;
+ }
+
+ stream = (struct hva_stream *)vq->bufs[buf->index];
+ stream->bytesused = buf->bytesused;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+/* V4L2 ioctl ops */
+static const struct v4l2_ioctl_ops hva_ioctl_ops = {
+ .vidioc_querycap = hva_querycap,
+ .vidioc_enum_fmt_vid_cap = hva_enum_fmt_stream,
+ .vidioc_enum_fmt_vid_out = hva_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = hva_g_fmt_stream,
+ .vidioc_g_fmt_vid_out = hva_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = hva_try_fmt_stream,
+ .vidioc_try_fmt_vid_out = hva_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = hva_s_fmt_stream,
+ .vidioc_s_fmt_vid_out = hva_s_fmt_frame,
+ .vidioc_g_parm = hva_g_parm,
+ .vidioc_s_parm = hva_s_parm,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = hva_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 control operations
+ */
+
+static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hva_ctx *ctx = container_of(ctrl->handler, struct hva_ctx,
+ ctrl_handler);
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s S_CTRL: id = %d, val = %d\n", ctx->name,
+ ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctx->ctrls.bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->ctrls.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->ctrls.bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctx->ctrls.aspect = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctx->ctrls.profile = ctrl->val;
+ if (ctx->flags & HVA_FLAG_STREAMINFO)
+ snprintf(ctx->streaminfo.profile,
+ sizeof(ctx->streaminfo.profile),
+ "%s profile",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctx->ctrls.level = ctrl->val;
+ if (ctx->flags & HVA_FLAG_STREAMINFO)
+ snprintf(ctx->streaminfo.level,
+ sizeof(ctx->streaminfo.level),
+ "level %s",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ ctx->ctrls.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ ctx->ctrls.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ ctx->ctrls.dct8x8 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctx->ctrls.qpmin = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctx->ctrls.qpmax = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ ctx->ctrls.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ ctx->ctrls.vui_sar_idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING:
+ ctx->ctrls.sei_fp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ ctx->ctrls.sei_fp_type = ctrl->val;
+ break;
+ default:
+ dev_dbg(dev, "%s S_CTRL: invalid control (id = %d)\n",
+ ctx->name, ctrl->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* V4L2 control ops */
+static const struct v4l2_ctrl_ops hva_ctrl_ops = {
+ .s_ctrl = hva_s_ctrl,
+};
+
+static int hva_ctrls_setup(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 mask;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type =
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 15);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 60, 1, 16);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 1000, 60000000, 1000, 20000000);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_ASPECT_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_ASPECT_1x1);
+
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH));
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH,
+ mask,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ 1, 10000, 1, 3000);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 51, 1, 5);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ 0, 1, 1, 1);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING,
+ 0, 1, 1, 0);
+
+ mask = ~(1 << sei_fp_type);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE,
+ sei_fp_type,
+ mask,
+ sei_fp_type);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ dev_dbg(dev, "%s controls setup failed (%d)\n",
+ ctx->name, err);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ /* set default time per frame */
+ ctx->ctrls.time_per_frame.numerator = HVA_DEFAULT_FRAME_NUM;
+ ctx->ctrls.time_per_frame.denominator = HVA_DEFAULT_FRAME_DEN;
+
+ return 0;
+}
+
+/*
+ * mem-to-mem operations
+ */
+
+static void hva_run_work(struct work_struct *work)
+{
+ struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct hva_enc *enc = ctx->enc;
+ struct hva_frame *frame;
+ struct hva_stream *stream;
+ int ret;
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ frame = to_hva_frame(src_buf);
+ stream = to_hva_stream(dst_buf);
+ frame->vbuf.sequence = ctx->frame_num++;
+
+ ret = enc->encode(ctx, frame, stream);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
+ if (ret) {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* propagate frame timestamp */
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ dst_buf->sequence = ctx->stream_num - 1;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void hva_device_run(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ queue_work(hva->work_queue, &ctx->run_work);
+}
+
+static void hva_job_abort(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int hva_job_ready(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no frame buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no stream buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops hva_m2m_ops = {
+ .device_run = hva_device_run,
+ .job_abort = hva_job_abort,
+ .job_ready = hva_job_ready,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int hva_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx_to_dev(ctx);
+ unsigned int size;
+
+ dev_dbg(dev, "%s %s queue setup: num_buffers %d\n", ctx->name,
+ to_type_str(vq->type), *num_buffers);
+
+ size = vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
+ ctx->frameinfo.size : ctx->max_stream_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* only one plane supported */
+ *num_planes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int hva_buf_prepare(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ struct hva_frame *frame = to_hva_frame(vbuf);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(dev,
+ "%s frame[%d] prepare: %d field not supported\n",
+ ctx->name, vb->index, vbuf->field);
+ return -EINVAL;
+ }
+
+ if (!frame->prepared) {
+ /* get memory addresses */
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+ frame->prepared = true;
+
+ dev_dbg(dev,
+ "%s frame[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ frame->vaddr, &frame->paddr);
+ }
+ } else {
+ struct hva_stream *stream = to_hva_stream(vbuf);
+
+ if (!stream->prepared) {
+ /* get memory addresses */
+ stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ stream->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
+ stream->prepared = true;
+
+ dev_dbg(dev,
+ "%s stream[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ stream->vaddr, &stream->paddr);
+ }
+ }
+
+ return 0;
+}
+
+static void hva_buf_queue(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf;
+ int ret;
+ unsigned int i;
+ bool found = false;
+
+ dev_dbg(dev, "%s %s start streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ /* open encoder when both start_streaming have been called */
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
+ return 0;
+ } else {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ return 0;
+ }
+
+ /* store the instance context in the instances array */
+ for (i = 0; i < HVA_MAX_INSTANCES; i++) {
+ if (!hva->instances[i]) {
+ hva->instances[i] = ctx;
+ /* save the context identifier in the context */
+ ctx->id = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(dev, "%s maximum instances reached\n", ctx->name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hva->nb_of_instances++;
+
+ if (!ctx->enc) {
+ ret = hva_open_encoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat,
+ &ctx->enc);
+ if (ret < 0)
+ goto err_ctx;
+ }
+
+ return 0;
+
+err_ctx:
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+err:
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ } else {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+}
+
+static void hva_stop_streaming(struct vb2_queue *vq)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+ struct vb2_v4l2_buffer *vbuf;
+
+ dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->frame_num = 0;
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->stream_num = 0;
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
+ (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
+ dev_dbg(dev, "%s %s out=%d cap=%d\n",
+ ctx->name, to_type_str(vq->type),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
+ return;
+ }
+
+ /* close encoder when both stop_streaming have been called */
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops hva_qops = {
+ .queue_setup = hva_queue_setup,
+ .buf_prepare = hva_buf_prepare,
+ .buf_queue = hva_buf_queue,
+ .start_streaming = hva_start_streaming,
+ .stop_streaming = hva_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(struct hva_ctx *ctx, struct vb2_queue *vq)
+{
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = ctx;
+ vq->ops = &hva_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ vq->lock = &ctx->hva_dev->lock;
+
+ return vb2_queue_init(vq);
+}
+
+static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct hva_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->buf_struct_size = sizeof(struct hva_frame);
+ src_vq->min_buffers_needed = MIN_FRAMES;
+ src_vq->dev = ctx->hva_dev->dev;
+
+ ret = queue_init(ctx, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->buf_struct_size = sizeof(struct hva_stream);
+ dst_vq->min_buffers_needed = MIN_STREAMS;
+ dst_vq->dev = ctx->hva_dev->dev;
+
+ return queue_init(ctx, dst_vq);
+}
+
+static int hva_open(struct file *file)
+{
+ struct hva_dev *hva = video_drvdata(file);
+ struct device *dev = hva_to_dev(hva);
+ struct hva_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ctx->hva_dev = hva;
+
+ INIT_WORK(&ctx->run_work, hva_run_work);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ret = hva_ctrls_setup(ctx);
+ if (ret) {
+ dev_err(dev, "%s [x:x] failed to setup controls\n",
+ HVA_PREFIX);
+ goto err_fh;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(hva->m2m_dev, ctx,
+ &hva_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(dev, "%s failed to initialize m2m context (%d)\n",
+ HVA_PREFIX, ret);
+ goto err_ctrls;
+ }
+
+ /* set the instance name */
+ mutex_lock(&hva->lock);
+ hva->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ hva->instance_id);
+ mutex_unlock(&hva->lock);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+ dev_info(dev, "%s encoder instance created\n", ctx->name);
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+err_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+out:
+ return ret;
+}
+
+static int hva_release(struct file *file)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ dev_info(dev, "%s encoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations hva_fops = {
+ .owner = THIS_MODULE,
+ .open = hva_open,
+ .release = hva_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int hva_register_device(struct hva_dev *hva)
+{
+ int ret;
+ struct video_device *vdev;
+ struct device *dev;
+
+ if (!hva)
+ return -ENODEV;
+ dev = hva_to_dev(hva);
+
+ hva->m2m_dev = v4l2_m2m_init(&hva_m2m_ops);
+ if (IS_ERR(hva->m2m_dev)) {
+ dev_err(dev, "%s failed to initialize v4l2-m2m device\n",
+ HVA_PREFIX);
+ ret = PTR_ERR(hva->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(dev, "%s failed to allocate video device\n",
+ HVA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &hva_fops;
+ vdev->ioctl_ops = &hva_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &hva->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &hva->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
+ hva->ip_version);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dev, "%s failed to register video device\n",
+ HVA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ hva->vdev = vdev;
+ video_set_drvdata(vdev, hva);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(hva->m2m_dev);
+err:
+ return ret;
+}
+
+static void hva_unregister_device(struct hva_dev *hva)
+{
+ if (!hva)
+ return;
+
+ if (hva->m2m_dev)
+ v4l2_m2m_release(hva->m2m_dev);
+
+ video_unregister_device(hva->vdev);
+}
+
+static int hva_probe(struct platform_device *pdev)
+{
+ struct hva_dev *hva;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hva = devm_kzalloc(dev, sizeof(*hva), GFP_KERNEL);
+ if (!hva) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hva->dev = dev;
+ hva->pdev = pdev;
+ platform_set_drvdata(pdev, hva);
+
+ mutex_init(&hva->lock);
+
+ /* probe hardware */
+ ret = hva_hw_probe(pdev, hva);
+ if (ret)
+ goto err;
+
+ /* register all available encoders */
+ register_encoders(hva);
+
+ /* register all supported formats */
+ register_formats(hva);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &hva->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "%s %s failed to register V4L2 device\n",
+ HVA_PREFIX, HVA_NAME);
+ goto err_hw;
+ }
+
+ hva->work_queue = create_workqueue(HVA_NAME);
+ if (!hva->work_queue) {
+ dev_err(dev, "%s %s failed to allocate work queue\n",
+ HVA_PREFIX, HVA_NAME);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = hva_register_device(hva);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n", HVA_PREFIX,
+ HVA_NAME, hva->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(hva->work_queue);
+err_v4l2:
+ v4l2_device_unregister(&hva->v4l2_dev);
+err_hw:
+ hva_hw_remove(hva);
+err:
+ return ret;
+}
+
+static int hva_remove(struct platform_device *pdev)
+{
+ struct hva_dev *hva = platform_get_drvdata(pdev);
+ struct device *dev = hva_to_dev(hva);
+
+ hva_unregister_device(hva);
+
+ destroy_workqueue(hva->work_queue);
+
+ hva_hw_remove(hva);
+
+ v4l2_device_unregister(&hva->v4l2_dev);
+
+ dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops hva_pm_ops = {
+ .runtime_suspend = hva_hw_runtime_suspend,
+ .runtime_resume = hva_hw_runtime_resume,
+};
+
+static const struct of_device_id hva_match_types[] = {
+ {
+ .compatible = "st,st-hva",
+ },
+ { /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, hva_match_types);
+
+static struct platform_driver hva_driver = {
+ .probe = hva_probe,
+ .remove = hva_remove,
+ .driver = {
+ .name = HVA_NAME,
+ .of_match_table = hva_match_types,
+ .pm = &hva_pm_ops,
+ },
+};
+
+module_platform_driver(hva_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics HVA video encoder V4L2 driver");
diff --git a/drivers/media/platform/sti/hva/hva.h b/drivers/media/platform/sti/hva/hva.h
new file mode 100644
index 000000000000..caa580825541
--- /dev/null
+++ b/drivers/media/platform/sti/hva/hva.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef HVA_H
+#define HVA_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+#define hva_to_dev(h) (h->dev)
+
+#define ctx_to_dev(c) (c->hva_dev->dev)
+
+#define ctx_to_hdev(c) (c->hva_dev)
+
+#define HVA_PREFIX "[---:----]"
+
+extern const struct hva_enc nv12h264enc;
+extern const struct hva_enc nv21h264enc;
+
+/**
+ * struct hva_frameinfo - information about hva frame
+ *
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder alignment constraint)
+ * @aligned_height: height of frame (with encoder alignment constraint)
+ * @size: maximum size in bytes required for data
+*/
+struct hva_frameinfo {
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+};
+
+/**
+ * struct hva_streaminfo - information about hva stream
+ *
+ * @streamformat: fourcc code of compressed video format (H.264...)
+ * @width: width of stream
+ * @height: height of stream
+ * @profile: profile string
+ * @level: level string
+ */
+struct hva_streaminfo {
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u8 profile[32];
+ u8 level[32];
+};
+
+/**
+ * struct hva_controls - hva controls set
+ *
+ * @time_per_frame: time per frame in seconds
+ * @bitrate_mode: bitrate mode (constant bitrate or variable bitrate)
+ * @gop_size: groupe of picture size
+ * @bitrate: bitrate (in bps)
+ * @aspect: video aspect
+ * @profile: H.264 profile
+ * @level: H.264 level
+ * @entropy_mode: H.264 entropy mode (CABAC or CVLC)
+ * @cpb_size: coded picture buffer size (in kB)
+ * @dct8x8: transform mode 8x8 enable
+ * @qpmin: minimum quantizer
+ * @qpmax: maximum quantizer
+ * @vui_sar: pixel aspect ratio enable
+ * @vui_sar_idc: pixel aspect ratio identifier
+ * @sei_fp: sei frame packing arrangement enable
+ * @sei_fp_type: sei frame packing arrangement type
+ */
+struct hva_controls {
+ struct v4l2_fract time_per_frame;
+ enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ u32 gop_size;
+ u32 bitrate;
+ enum v4l2_mpeg_video_aspect aspect;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u32 cpb_size;
+ bool dct8x8;
+ u32 qpmin;
+ u32 qpmax;
+ bool vui_sar;
+ enum v4l2_mpeg_video_h264_vui_sar_idc vui_sar_idc;
+ bool sei_fp;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type;
+};
+
+/**
+ * struct hva_frame - hva frame buffer (output)
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ */
+struct hva_frame {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ struct hva_frameinfo info;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+};
+
+/*
+ * to_hva_frame() - cast struct vb2_v4l2_buffer * to struct hva_frame *
+ */
+#define to_hva_frame(vb) \
+ container_of(vb, struct hva_frame, vbuf)
+
+/**
+ * struct hva_stream - hva stream buffer (capture)
+ *
+ * @v4l2: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ * @size: size of the buffer in bytes
+ * @bytesused: number of bytes occupied by data in the buffer
+ */
+struct hva_stream {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+ unsigned int size;
+ unsigned int bytesused;
+};
+
+/*
+ * to_hva_stream() - cast struct vb2_v4l2_buffer * to struct hva_stream *
+ */
+#define to_hva_stream(vb) \
+ container_of(vb, struct hva_stream, vbuf)
+
+struct hva_dev;
+struct hva_enc;
+
+/**
+ * struct hva_ctx - context of hva instance
+ *
+ * @hva_dev: the device that this instance is associated with
+ * @fh: V4L2 file handle
+ * @ctrl_handler: V4L2 controls handler
+ * @ctrls: hva controls set
+ * @id: instance identifier
+ * @aborting: true if current job aborted
+ * @name: instance name (debug purpose)
+ * @run_work: encode work
+ * @lock: mutex used to lock access of this context
+ * @flags: validity of streaminfo and frameinfo fields
+ * @frame_num: frame number
+ * @stream_num: stream number
+ * @max_stream_size: maximum size in bytes required for stream data
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ * @streaminfo: stream properties
+ * @frameinfo: frame properties
+ * @enc: current encoder
+ * @priv: private codec data for this instance, allocated
+ * by encoder @open time
+ * @hw_err: true if hardware error detected
+ */
+struct hva_ctx {
+ struct hva_dev *hva_dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct hva_controls ctrls;
+ u8 id;
+ bool aborting;
+ char name[100];
+ struct work_struct run_work;
+ /* mutex protecting this data structure */
+ struct mutex lock;
+ u32 flags;
+ u32 frame_num;
+ u32 stream_num;
+ u32 max_stream_size;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ struct hva_streaminfo streaminfo;
+ struct hva_frameinfo frameinfo;
+ struct hva_enc *enc;
+ void *priv;
+ bool hw_err;
+};
+
+#define HVA_FLAG_STREAMINFO 0x0001
+#define HVA_FLAG_FRAMEINFO 0x0002
+
+#define HVA_MAX_INSTANCES 16
+#define HVA_MAX_ENCODERS 10
+#define HVA_MAX_FORMATS HVA_MAX_ENCODERS
+
+/**
+ * struct hva_dev - abstraction for hva entity
+ *
+ * @v4l2_dev: V4L2 device
+ * @vdev: video device
+ * @pdev: platform device
+ * @dev: device
+ * @lock: mutex used for critical sections & V4L2 ops
+ * serialization
+ * @m2m_dev: memory-to-memory V4L2 device information
+ * @instances: opened instances
+ * @nb_of_instances: number of opened instances
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @regs: register io memory access
+ * @esram_addr: esram address
+ * @esram_size: esram size
+ * @clk: hva clock
+ * @irq_its: status interruption
+ * @irq_err: error interruption
+ * @work_queue: work queue to handle the encode jobs
+ * @protect_mutex: mutex used to lock access of hardware
+ * @interrupt: completion interrupt
+ * @ip_version: IP hardware version
+ * @encoders: registered encoders
+ * @nb_of_encoders: number of registered encoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats: number of supported compressed video formats
+ * @sfl_reg: status fifo level register value
+ * @sts_reg: status register value
+ * @lmi_err_reg: local memory interface error register value
+ * @emi_err_reg: external memory interface error register value
+ * @hec_mif_err_reg: HEC memory interface error register value
+ */
+struct hva_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ /* mutex protecting vb2_queue structure */
+ struct mutex lock;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct hva_ctx *instances[HVA_MAX_INSTANCES];
+ unsigned int nb_of_instances;
+ unsigned int instance_id;
+ void __iomem *regs;
+ u32 esram_addr;
+ u32 esram_size;
+ struct clk *clk;
+ int irq_its;
+ int irq_err;
+ struct workqueue_struct *work_queue;
+ /* mutex protecting hardware access */
+ struct mutex protect_mutex;
+ struct completion interrupt;
+ unsigned long int ip_version;
+ const struct hva_enc *encoders[HVA_MAX_ENCODERS];
+ u32 nb_of_encoders;
+ u32 pixelformats[HVA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[HVA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u32 sfl_reg;
+ u32 sts_reg;
+ u32 lmi_err_reg;
+ u32 emi_err_reg;
+ u32 hec_mif_err_reg;
+};
+
+/**
+ * struct hva_enc - hva encoder
+ *
+ * @name: encoder name
+ * @streamformat: fourcc code for compressed video format (H.264...)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @max_width: maximum width of frame for this encoder
+ * @max_height: maximum height of frame for this encoder
+ * @open: open encoder
+ * @close: close encoder
+ * @encode: encode a frame (struct hva_frame) in a stream
+ * (struct hva_stream)
+ */
+
+struct hva_enc {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ int (*open)(struct hva_ctx *ctx);
+ int (*close)(struct hva_ctx *ctx);
+ int (*encode)(struct hva_ctx *ctx, struct hva_frame *frame,
+ struct hva_stream *stream);
+};
+
+#endif /* HVA_H */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index e967fcfdc1d8..44323cb5d287 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1379,7 +1379,7 @@ static void cal_stop_streaming(struct vb2_queue *vq)
cal_runtime_put(ctx->dev);
}
-static struct vb2_ops cal_video_qops = {
+static const struct vb2_ops cal_video_qops = {
.queue_setup = cal_queue_setup,
.buf_prepare = cal_buffer_prepare,
.buf_queue = cal_buffer_queue,
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 55a1458ac783..0189f7f7cb03 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1878,7 +1878,7 @@ static void vpe_stop_streaming(struct vb2_queue *q)
vpdma_dump_regs(ctx->dev->vpdma);
}
-static struct vb2_ops vpe_qops = {
+static const struct vb2_ops vpe_qops = {
.queue_setup = vpe_queue_setup,
.buf_prepare = vpe_buf_prepare,
.buf_queue = vpe_buf_queue,
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index cd0ff4a66fdc..a98f679bd88d 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -815,7 +815,7 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
}
}
-static struct vb2_ops vim2m_qops = {
+static const struct vb2_ops vim2m_qops = {
.queue_setup = vim2m_queue_setup,
.buf_prepare = vim2m_buf_prepare,
.buf_queue = vim2m_buf_queue,
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 7f937136c3f5..5464fefbaab9 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -163,38 +163,38 @@ const struct v4l2_rect vivid_max_rect = {
static const u8 vivid_hdmi_edid[256] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- 0x63, 0x3a, 0xaa, 0x55, 0x00, 0x00, 0x00, 0x00,
- 0x0a, 0x18, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
- 0x0e, 0x00, 0xb2, 0xa0, 0x57, 0x49, 0x9b, 0x26,
- 0x10, 0x48, 0x4f, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+ 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
- 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x02, 0x3a,
- 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
- 0x46, 0x00, 0x10, 0x09, 0x00, 0x00, 0x00, 0x1e,
+ 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x08, 0xe8,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
- 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 'v',
- '4', 'l', '2', '-', 'h', 'd', 'm', 'i',
- 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x10,
+ 0x87, 0x3c, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x76,
+ 0x69, 0x76, 0x69, 0x64, 0x0a, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0,
-
- 0x02, 0x03, 0x1a, 0xc0, 0x48, 0xa2, 0x10, 0x04,
- 0x02, 0x01, 0x21, 0x14, 0x13, 0x23, 0x09, 0x07,
- 0x07, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0xe2,
- 0x00, 0x2a, 0x01, 0x1d, 0x00, 0x80, 0x51, 0xd0,
- 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x1e, 0x8c, 0x0a, 0xd0, 0x8a,
- 0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
+
+ 0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
+ 0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
+ 0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
+ 0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
+ 0x0c, 0x00, 0x10, 0x00, 0x00, 0x78, 0x21, 0x00,
+ 0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
+ 0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
+ 0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
+ 0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
+ 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
+ 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38, 0x1f,
+ 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32,
+ 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
+ 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
+ 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
};
static int vidioc_querycap(struct file *file, void *priv,
@@ -839,6 +839,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
V4L2_CAP_READWRITE;
+ ret = -ENOMEM;
/* initialize the test pattern generator */
tpg_init(&dev->tpg, 640, 360);
if (tpg_alloc(&dev->tpg, MAX_ZOOM * MAX_WIDTH))
@@ -1033,8 +1034,10 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
*/
dev->cec_workqueue =
alloc_ordered_workqueue("vivid-%03d-cec", WQ_MEM_RECLAIM, inst);
- if (!dev->cec_workqueue)
+ if (!dev->cec_workqueue) {
+ ret = -ENOMEM;
goto unreg_dev;
+ }
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index b98089c95ef5..aceb38d9f7e7 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -761,7 +761,7 @@ static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
"Rec. 709",
"xvYCC 601",
"xvYCC 709",
- "sYCC",
+ "",
"BT.2020",
"BT.2020 Constant Luminance",
"SMPTE 240M",
@@ -773,6 +773,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
.id = VIVID_CID_YCBCR_ENC,
.name = "Y'CbCr Encoding",
.type = V4L2_CTRL_TYPE_MENU,
+ .menu_skip_mask = 1 << 5,
.max = ARRAY_SIZE(vivid_ctrl_ycbcr_enc_strings) - 2,
.qmenu = vivid_ctrl_ycbcr_enc_strings,
};
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index d404a7ce33a4..d5c84ecf2027 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -823,7 +823,7 @@ int vivid_vid_cap_g_selection(struct file *file, void *priv,
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (vivid_is_webcam(dev))
- return -EINVAL;
+ return -ENODATA;
sel->r.left = sel->r.top = 0;
switch (sel->target) {
@@ -872,7 +872,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (vivid_is_webcam(dev))
- return -EINVAL;
+ return -ENODATA;
switch (s->target) {
case V4L2_SEL_TGT_CROP:
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 06a2ec7e5ad4..b23fa879a9aa 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -53,6 +53,7 @@ struct vsp1_uds;
struct vsp1_device_info {
u32 version;
+ const char *model;
unsigned int gen;
unsigned int features;
unsigned int rpf_count;
@@ -65,6 +66,7 @@ struct vsp1_device_info {
struct vsp1_device {
struct device *dev;
const struct vsp1_device_info *info;
+ u32 version;
void __iomem *mmio;
struct rcar_fcp_device *fcp;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index 8268b87727a7..ee8355c28f94 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -142,10 +142,15 @@ static int bru_set_format(struct v4l2_subdev *subdev,
struct vsp1_bru *bru = to_bru(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&bru->entity.lock);
config = vsp1_entity_get_pad_config(&bru->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
bru_try_format(bru, config, fmt->pad, &fmt->format);
@@ -174,7 +179,9 @@ static int bru_set_format(struct v4l2_subdev *subdev,
}
}
- return 0;
+done:
+ mutex_unlock(&bru->entity.lock);
+ return ret;
}
static int bru_get_selection(struct v4l2_subdev *subdev,
@@ -201,7 +208,9 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
if (!config)
return -EINVAL;
+ mutex_lock(&bru->entity.lock);
sel->r = *bru_get_compose(bru, config, sel->pad);
+ mutex_unlock(&bru->entity.lock);
return 0;
default:
@@ -217,6 +226,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
+ int ret = 0;
if (sel->pad == bru->entity.source_pad)
return -EINVAL;
@@ -224,11 +234,16 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
+ mutex_lock(&bru->entity.lock);
+
config = vsp1_entity_get_pad_config(&bru->entity, cfg, sel->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
- /* The compose rectangle top left corner must be inside the output
+ /*
+ * The compose rectangle top left corner must be inside the output
* frame.
*/
format = vsp1_entity_get_pad_format(&bru->entity, config,
@@ -246,7 +261,9 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
compose = bru_get_compose(bru, config, sel->pad);
*compose = sel->r;
- return 0;
+done:
+ mutex_unlock(&bru->entity.lock);
+ return ret;
}
static const struct v4l2_subdev_pad_ops bru_pad_ops = {
@@ -269,14 +286,15 @@ static const struct v4l2_subdev_ops bru_ops = {
static void bru_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_bru *bru = to_bru(&entity->subdev);
struct v4l2_mbus_framefmt *format;
unsigned int flags;
unsigned int i;
- if (!full)
+ if (params != VSP1_ENTITY_PARAMS_INIT)
return;
format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
index b63d2dbe5ea3..f2fb26e5ab4e 100644
--- a/drivers/media/platform/vsp1/vsp1_clu.c
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -148,10 +148,15 @@ static int clu_set_format(struct v4l2_subdev *subdev,
struct vsp1_clu *clu = to_clu(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&clu->entity.lock);
config = vsp1_entity_get_pad_config(&clu->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -164,7 +169,7 @@ static int clu_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == CLU_PAD_SOURCE) {
/* The CLU output format can't be modified. */
fmt->format = *format;
- return 0;
+ goto done;
}
format->code = fmt->format.code;
@@ -182,7 +187,9 @@ static int clu_set_format(struct v4l2_subdev *subdev,
CLU_PAD_SOURCE);
*format = fmt->format;
- return 0;
+done:
+ mutex_unlock(&clu->entity.lock);
+ return ret;
}
/* -----------------------------------------------------------------------------
@@ -207,42 +214,51 @@ static const struct v4l2_subdev_ops clu_ops = {
static void clu_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_clu *clu = to_clu(&entity->subdev);
struct vsp1_dl_body *dlb;
unsigned long flags;
u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN;
- /* The format can't be changed during streaming, only verify it at
- * stream start and store the information internally for future partial
- * reconfiguration calls.
- */
- if (full) {
+ switch (params) {
+ case VSP1_ENTITY_PARAMS_INIT: {
+ /*
+ * The format can't be changed during streaming, only verify it
+ * at setup time and store the information internally for future
+ * runtime configuration calls.
+ */
struct v4l2_mbus_framefmt *format;
format = vsp1_entity_get_pad_format(&clu->entity,
clu->entity.config,
CLU_PAD_SINK);
clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
- return;
+ break;
}
- /* 2D mode can only be used with the YCbCr pixel encoding. */
- if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
- ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
- | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
- | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
+ case VSP1_ENTITY_PARAMS_PARTITION:
+ break;
+
+ case VSP1_ENTITY_PARAMS_RUNTIME:
+ /* 2D mode can only be used with the YCbCr pixel encoding. */
+ if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
+ ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
+ | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
+ | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
- vsp1_clu_write(clu, dl, VI6_CLU_CTRL, ctrl);
+ vsp1_clu_write(clu, dl, VI6_CLU_CTRL, ctrl);
- spin_lock_irqsave(&clu->lock, flags);
- dlb = clu->clu;
- clu->clu = NULL;
- spin_unlock_irqrestore(&clu->lock, flags);
+ spin_lock_irqsave(&clu->lock, flags);
+ dlb = clu->clu;
+ clu->clu = NULL;
+ spin_unlock_irqrestore(&clu->lock, flags);
- if (dlb)
- vsp1_dl_list_add_fragment(dl, dlb);
+ if (dlb)
+ vsp1_dl_list_add_fragment(dl, dlb);
+ break;
+ }
}
static const struct vsp1_entity_operations clu_entity_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 37c3518aa2a8..ad545aff4e35 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -21,7 +21,6 @@
#include "vsp1_dl.h"
#define VSP1_DL_NUM_ENTRIES 256
-#define VSP1_DL_NUM_LISTS 3
#define VSP1_DLH_INT_ENABLE (1 << 1)
#define VSP1_DLH_AUTO_START (1 << 0)
@@ -71,6 +70,7 @@ struct vsp1_dl_body {
* @dma: DMA address for the header
* @body0: first display list body
* @fragments: list of extra display list bodies
+ * @chain: entry in the display list partition chain
*/
struct vsp1_dl_list {
struct list_head list;
@@ -81,6 +81,9 @@ struct vsp1_dl_list {
struct vsp1_dl_body body0;
struct list_head fragments;
+
+ bool has_chain;
+ struct list_head chain;
};
enum vsp1_dl_mode {
@@ -262,7 +265,6 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
memset(dl->header, 0, sizeof(*dl->header));
dl->header->lists[0].addr = dl->body0.dma;
- dl->header->flags = VSP1_DLH_INT_ENABLE;
}
return dl;
@@ -293,6 +295,12 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
if (!list_empty(&dlm->free)) {
dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
list_del(&dl->list);
+
+ /*
+ * The display list chain must be initialised to ensure every
+ * display list can assert list_empty() if it is not in a chain.
+ */
+ INIT_LIST_HEAD(&dl->chain);
}
spin_unlock_irqrestore(&dlm->lock, flags);
@@ -303,10 +311,24 @@ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
{
+ struct vsp1_dl_list *dl_child;
+
if (!dl)
return;
- /* We can't free fragments here as DMA memory can only be freed in
+ /*
+ * Release any linked display-lists which were chained for a single
+ * hardware operation.
+ */
+ if (dl->has_chain) {
+ list_for_each_entry(dl_child, &dl->chain, chain)
+ __vsp1_dl_list_put(dl_child);
+ }
+
+ dl->has_chain = false;
+
+ /*
+ * We can't free fragments here as DMA memory can only be freed in
* interruptible context. Move all fragments to the display list
* manager's list of fragments to be freed, they will be
* garbage-collected by the work queue.
@@ -383,6 +405,76 @@ int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
return 0;
}
+/**
+ * vsp1_dl_list_add_chain - Add a display list to a chain
+ * @head: The head display list
+ * @dl: The new display list
+ *
+ * Add a display list to an existing display list chain. The chained lists
+ * will be automatically processed by the hardware without intervention from
+ * the CPU. A display list end interrupt will only complete after the last
+ * display list in the chain has completed processing.
+ *
+ * Adding a display list to a chain passes ownership of the display list to
+ * the head display list item. The chain is released when the head dl item is
+ * put back with __vsp1_dl_list_put().
+ *
+ * Chained display lists are only usable in header mode. Attempts to add a
+ * display list to a chain in header-less mode will return an error.
+ */
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
+ struct vsp1_dl_list *dl)
+{
+ /* Chained lists are only available in header mode. */
+ if (head->dlm->mode != VSP1_DL_MODE_HEADER)
+ return -EINVAL;
+
+ head->has_chain = true;
+ list_add_tail(&dl->chain, &head->chain);
+ return 0;
+}
+
+static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
+{
+ struct vsp1_dl_header_list *hdr = dl->header->lists;
+ struct vsp1_dl_body *dlb;
+ unsigned int num_lists = 0;
+
+ /*
+ * Fill the header with the display list bodies addresses and sizes. The
+ * address of the first body has already been filled when the display
+ * list was allocated.
+ */
+
+ hdr->num_bytes = dl->body0.num_entries
+ * sizeof(*dl->header->lists);
+
+ list_for_each_entry(dlb, &dl->fragments, list) {
+ num_lists++;
+ hdr++;
+
+ hdr->addr = dlb->dma;
+ hdr->num_bytes = dlb->num_entries
+ * sizeof(*dl->header->lists);
+ }
+
+ dl->header->num_lists = num_lists;
+
+ /*
+ * If this display list's chain is not empty, we are on a list, where
+ * the next item in the list is the display list entity which should be
+ * automatically queued by the hardware.
+ */
+ if (!list_empty(&dl->chain) && !is_last) {
+ struct vsp1_dl_list *next = list_next_entry(dl, chain);
+
+ dl->header->next_header = next->dma;
+ dl->header->flags = VSP1_DLH_AUTO_START;
+ } else {
+ dl->header->flags = VSP1_DLH_INT_ENABLE;
+ }
+}
+
void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
{
struct vsp1_dl_manager *dlm = dl->dlm;
@@ -393,30 +485,26 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
spin_lock_irqsave(&dlm->lock, flags);
if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
- struct vsp1_dl_header_list *hdr = dl->header->lists;
- struct vsp1_dl_body *dlb;
- unsigned int num_lists = 0;
+ struct vsp1_dl_list *dl_child;
- /* Fill the header with the display list bodies addresses and
- * sizes. The address of the first body has already been filled
- * when the display list was allocated.
- *
+ /*
* In header mode the caller guarantees that the hardware is
* idle at this point.
*/
- hdr->num_bytes = dl->body0.num_entries
- * sizeof(*dl->header->lists);
- list_for_each_entry(dlb, &dl->fragments, list) {
- num_lists++;
- hdr++;
+ /* Fill the header for the head and chained display lists. */
+ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
- hdr->addr = dlb->dma;
- hdr->num_bytes = dlb->num_entries
- * sizeof(*dl->header->lists);
+ list_for_each_entry(dl_child, &dl->chain, chain) {
+ bool last = list_is_last(&dl_child->chain, &dl->chain);
+
+ vsp1_dl_list_fill_header(dl_child, last);
}
- dl->header->num_lists = num_lists;
+ /*
+ * Commit the head display list to hardware. Chained headers
+ * will auto-start.
+ */
vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
dlm->active = dl;
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index de387cd4d745..7131aa3c5978 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -41,5 +41,6 @@ void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb);
void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb);
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl);
#endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index fe9665e57b3b..cd209dccff1b 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -276,17 +276,18 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
}
dev_dbg(vsp1->dev,
- "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad } zpos %u\n",
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
__func__, rpf_index,
cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
- cfg->zpos);
+ &cfg->mem[2], cfg->zpos);
- /* Store the format, stride, memory buffer address, crop and compose
+ /*
+ * Store the format, stride, memory buffer address, crop and compose
* rectangles and Z-order position and for the input.
*/
- fmtinfo = vsp1_get_format_info(cfg->pixelformat);
+ fmtinfo = vsp1_get_format_info(vsp1, cfg->pixelformat);
if (!fmtinfo) {
dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
cfg->pixelformat);
@@ -301,7 +302,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
rpf->mem.addr[0] = cfg->mem[0];
rpf->mem.addr[1] = cfg->mem[1];
- rpf->mem.addr[2] = 0;
+ rpf->mem.addr[2] = cfg->mem[2];
vsp1->drm->inputs[rpf_index].crop = cfg->src;
vsp1->drm->inputs[rpf_index].compose = cfg->dst;
@@ -492,16 +493,13 @@ void vsp1_du_atomic_flush(struct device *dev)
vsp1_entity_route_setup(entity, pipe->dl);
if (entity->ops->configure) {
- entity->ops->configure(entity, pipe, pipe->dl, true);
- entity->ops->configure(entity, pipe, pipe->dl, false);
+ entity->ops->configure(entity, pipe, pipe->dl,
+ VSP1_ENTITY_PARAMS_INIT);
+ entity->ops->configure(entity, pipe, pipe->dl,
+ VSP1_ENTITY_PARAMS_RUNTIME);
+ entity->ops->configure(entity, pipe, pipe->dl,
+ VSP1_ENTITY_PARAMS_PARTITION);
}
-
- /* The memory buffer address must be applied after configuring
- * the RPF to make sure the crop offset are computed.
- */
- if (entity->type == VSP1_ENTITY_RPF)
- vsp1_rwpf_set_memory(to_rwpf(&entity->subdev),
- pipe->dl);
}
vsp1_dl_list_commit(pipe->dl);
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index cc316d281687..57c713a4e1df 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -60,7 +60,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
- if (status & VI6_WFP_IRQ_STA_FRE) {
+ if (status & VI6_WFP_IRQ_STA_DFE) {
vsp1_pipeline_frame_end(wpf->pipe);
ret = IRQ_HANDLED;
}
@@ -220,7 +220,8 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
int ret;
mdev->dev = vsp1->dev;
- strlcpy(mdev->model, "VSP1", sizeof(mdev->model));
+ mdev->hw_revision = vsp1->version;
+ strlcpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
dev_name(mdev->dev));
media_device_init(mdev);
@@ -559,6 +560,7 @@ static const struct dev_pm_ops vsp1_pm_ops = {
static const struct vsp1_device_info vsp1_device_infos[] = {
{
.version = VI6_IP_VERSION_MODEL_VSPS_H2,
+ .model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
| VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
@@ -569,6 +571,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
+ .model = "VSP1-R",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
@@ -578,6 +581,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+ .model = "VSP1-D",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
.rpf_count = 4,
@@ -587,6 +591,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPS_M2,
+ .model = "VSP1-S",
.gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
| VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
@@ -596,7 +601,30 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.num_bru_inputs = 4,
.uapi = true,
}, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_V2H,
+ .model = "VSP1V-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V2H,
+ .model = "VSP1V-D",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_LIF,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+ .model = "VSP2-I",
.gen = 3,
.features = VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU
| VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
@@ -606,6 +634,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+ .model = "VSP2-BD",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
@@ -614,6 +643,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+ .model = "VSP2-BC",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
| VSP1_HAS_WPF_VFLIP,
@@ -623,6 +653,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
+ .model = "VSP2-D",
.gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
@@ -638,7 +669,6 @@ static int vsp1_probe(struct platform_device *pdev)
struct resource *irq;
struct resource *io;
unsigned int i;
- u32 version;
int ret;
vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
@@ -689,11 +719,11 @@ static int vsp1_probe(struct platform_device *pdev)
if (ret < 0)
goto done;
- version = vsp1_read(vsp1, VI6_IP_VERSION);
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
pm_runtime_put_sync(&pdev->dev);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
- if ((version & VI6_IP_VERSION_MODEL_MASK) ==
+ if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
vsp1_device_infos[i].version) {
vsp1->info = &vsp1_device_infos[i];
break;
@@ -701,12 +731,13 @@ static int vsp1_probe(struct platform_device *pdev)
}
if (!vsp1->info) {
- dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", version);
+ dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
+ vsp1->version);
ret = -ENXIO;
goto done;
}
- dev_dbg(&pdev->dev, "IP version 0x%08x\n", version);
+ dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
/* Instanciate entities */
ret = vsp1_create_entities(vsp1);
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 4cf6cc719c00..da673495c222 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -51,6 +51,9 @@ void vsp1_entity_route_setup(struct vsp1_entity *source,
* @cfg: the TRY pad configuration
* @which: configuration selector (ACTIVE or TRY)
*
+ * When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
+ * the entity lock to access the returned configuration.
+ *
* Return the pad configuration requested by the which argument. The TRY
* configuration is passed explicitly to the function through the cfg argument
* and simply returned when requested. The ACTIVE configuration comes from the
@@ -160,7 +163,9 @@ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
if (!config)
return -EINVAL;
+ mutex_lock(&entity->lock);
fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+ mutex_unlock(&entity->lock);
return 0;
}
@@ -204,8 +209,10 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
if (!config)
return -EINVAL;
+ mutex_lock(&entity->lock);
format = vsp1_entity_get_pad_format(entity, config, 0);
code->code = format->code;
+ mutex_unlock(&entity->lock);
}
return 0;
@@ -235,6 +242,7 @@ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
struct vsp1_entity *entity = to_vsp1_entity(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
if (!config)
@@ -242,8 +250,12 @@ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(entity, config, fse->pad);
- if (fse->index || fse->code != format->code)
- return -EINVAL;
+ mutex_lock(&entity->lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
if (fse->pad == 0) {
fse->min_width = min_width;
@@ -260,7 +272,9 @@ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
fse->max_height = format->height;
}
- return 0;
+done:
+ mutex_unlock(&entity->lock);
+ return ret;
}
/* -----------------------------------------------------------------------------
@@ -358,6 +372,8 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
if (i == ARRAY_SIZE(vsp1_routes))
return -EINVAL;
+ mutex_init(&entity->lock);
+
entity->vsp1 = vsp1;
entity->source_pad = num_pads - 1;
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index b43457fd2c43..901146f807b9 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -14,7 +14,7 @@
#define __VSP1_ENTITY_H__
#include <linux/list.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <media/v4l2-subdev.h>
@@ -35,6 +35,18 @@ enum vsp1_entity_type {
VSP1_ENTITY_WPF,
};
+/**
+ * enum vsp1_entity_params - Entity configuration parameters class
+ * @VSP1_ENTITY_PARAMS_INIT - Initial parameters
+ * @VSP1_ENTITY_PARAMS_PARTITION - Per-image partition parameters
+ * @VSP1_ENTITY_PARAMS_RUNTIME - Runtime-configurable parameters
+ */
+enum vsp1_entity_params {
+ VSP1_ENTITY_PARAMS_INIT,
+ VSP1_ENTITY_PARAMS_PARTITION,
+ VSP1_ENTITY_PARAMS_RUNTIME,
+};
+
#define VSP1_ENTITY_MAX_INPUTS 5 /* For the BRU */
/*
@@ -63,17 +75,16 @@ struct vsp1_route {
/**
* struct vsp1_entity_operations - Entity operations
* @destroy: Destroy the entity.
- * @set_memory: Setup memory buffer access. This operation applies the settings
- * stored in the rwpf mem field to the display list. Valid for RPF
- * and WPF only.
* @configure: Setup the hardware based on the entity state (pipeline, formats,
* selection rectangles, ...)
+ * @max_width: Return the max supported width of data that the entity can
+ * process in a single operation.
*/
struct vsp1_entity_operations {
void (*destroy)(struct vsp1_entity *);
- void (*set_memory)(struct vsp1_entity *, struct vsp1_dl_list *dl);
void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *,
- struct vsp1_dl_list *, bool);
+ struct vsp1_dl_list *, enum vsp1_entity_params);
+ unsigned int (*max_width)(struct vsp1_entity *, struct vsp1_pipeline *);
};
struct vsp1_entity {
@@ -96,6 +107,8 @@ struct vsp1_entity {
struct v4l2_subdev subdev;
struct v4l2_subdev_pad_config *config;
+
+ struct mutex lock; /* Protects the pad config */
};
static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 6e5077beb38c..94316afc54ff 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -71,10 +71,15 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
struct vsp1_hsit *hsit = to_hsit(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&hsit->entity.lock);
config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
@@ -83,7 +88,7 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
* modified.
*/
fmt->format = *format;
- return 0;
+ goto done;
}
format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
@@ -104,7 +109,9 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
: MEDIA_BUS_FMT_AHSV8888_1X32;
- return 0;
+done:
+ mutex_unlock(&hsit->entity.lock);
+ return ret;
}
static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
@@ -125,11 +132,12 @@ static const struct v4l2_subdev_ops hsit_ops = {
static void hsit_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
- if (!full)
+ if (params != VSP1_ENTITY_PARAMS_INIT)
return;
if (hsit->inverse)
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index a720063f38c5..e32acae1fc6e 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -66,10 +66,15 @@ static int lif_set_format(struct v4l2_subdev *subdev,
struct vsp1_lif *lif = to_lif(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&lif->entity.lock);
config = vsp1_entity_get_pad_config(&lif->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -83,7 +88,7 @@ static int lif_set_format(struct v4l2_subdev *subdev,
* format.
*/
fmt->format = *format;
- return 0;
+ goto done;
}
format->code = fmt->format.code;
@@ -101,7 +106,9 @@ static int lif_set_format(struct v4l2_subdev *subdev,
LIF_PAD_SOURCE);
*format = fmt->format;
- return 0;
+done:
+ mutex_unlock(&lif->entity.lock);
+ return ret;
}
static const struct v4l2_subdev_pad_ops lif_pad_ops = {
@@ -122,7 +129,8 @@ static const struct v4l2_subdev_ops lif_ops = {
static void lif_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
const struct v4l2_mbus_framefmt *format;
struct vsp1_lif *lif = to_lif(&entity->subdev);
@@ -130,7 +138,7 @@ static void lif_configure(struct vsp1_entity *entity,
unsigned int obth = 400;
unsigned int lbth = 200;
- if (!full)
+ if (params != VSP1_ENTITY_PARAMS_INIT)
return;
format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index dc31de9602ba..c67cc60db0db 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -124,10 +124,15 @@ static int lut_set_format(struct v4l2_subdev *subdev,
struct vsp1_lut *lut = to_lut(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&lut->entity.lock);
config = vsp1_entity_get_pad_config(&lut->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -140,7 +145,7 @@ static int lut_set_format(struct v4l2_subdev *subdev,
if (fmt->pad == LUT_PAD_SOURCE) {
/* The LUT output format can't be modified. */
fmt->format = *format;
- return 0;
+ goto done;
}
format->code = fmt->format.code;
@@ -158,7 +163,9 @@ static int lut_set_format(struct v4l2_subdev *subdev,
LUT_PAD_SOURCE);
*format = fmt->format;
- return 0;
+done:
+ mutex_unlock(&lut->entity.lock);
+ return ret;
}
/* -----------------------------------------------------------------------------
@@ -183,24 +190,31 @@ static const struct v4l2_subdev_ops lut_ops = {
static void lut_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_lut *lut = to_lut(&entity->subdev);
struct vsp1_dl_body *dlb;
unsigned long flags;
- if (full) {
+ switch (params) {
+ case VSP1_ENTITY_PARAMS_INIT:
vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
- return;
- }
+ break;
- spin_lock_irqsave(&lut->lock, flags);
- dlb = lut->lut;
- lut->lut = NULL;
- spin_unlock_irqrestore(&lut->lock, flags);
+ case VSP1_ENTITY_PARAMS_PARTITION:
+ break;
+
+ case VSP1_ENTITY_PARAMS_RUNTIME:
+ spin_lock_irqsave(&lut->lock, flags);
+ dlb = lut->lut;
+ lut->lut = NULL;
+ spin_unlock_irqrestore(&lut->lock, flags);
- if (dlb)
- vsp1_dl_list_add_fragment(dl, dlb);
+ if (dlb)
+ vsp1_dl_list_add_fragment(dl, dlb);
+ break;
+ }
}
static const struct vsp1_entity_operations lut_entity_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 3e75fb3fcace..756ca4ea7668 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -136,17 +136,23 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
3, { 8, 8, 8 }, false, true, 1, 1, false },
};
-/*
+/**
* vsp1_get_format_info - Retrieve format information for a 4CC
+ * @vsp1: the VSP1 device
* @fourcc: the format 4CC
*
* Return a pointer to the format information structure corresponding to the
* given V4L2 format 4CC, or NULL if no corresponding format can be found.
*/
-const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc)
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc)
{
unsigned int i;
+ /* Special case, the VYUY format is supported on Gen2 only. */
+ if (vsp1->info->gen != 2 && fourcc == V4L2_PIX_FMT_VYUY)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
const struct vsp1_format_info *info = &vsp1_video_formats[i];
@@ -365,6 +371,7 @@ void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
void vsp1_pipelines_resume(struct vsp1_device *vsp1)
{
+ unsigned long flags;
unsigned int i;
/* Resume all running pipelines. */
@@ -379,7 +386,9 @@ void vsp1_pipelines_resume(struct vsp1_device *vsp1)
if (pipe == NULL)
continue;
+ spin_lock_irqsave(&pipe->irqlock, flags);
if (vsp1_pipeline_ready(pipe))
vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
}
}
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index d20d997b1fda..ac4ad2655551 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -77,6 +77,9 @@ enum vsp1_pipeline_state {
* @uds_input: entity at the input of the UDS, if the UDS is present
* @entities: list of entities in the pipeline
* @dl: display list associated with the pipeline
+ * @div_size: The maximum allowed partition size for the pipeline
+ * @partitions: The number of partitions used to process one frame
+ * @current_partition: The partition number currently being configured
*/
struct vsp1_pipeline {
struct media_pipeline pipe;
@@ -104,6 +107,11 @@ struct vsp1_pipeline {
struct list_head entities;
struct vsp1_dl_list *dl;
+
+ unsigned int div_size;
+ unsigned int partitions;
+ struct v4l2_rect partition;
+ unsigned int current_partition;
};
void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
@@ -122,6 +130,7 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
void vsp1_pipelines_resume(struct vsp1_device *vsp1);
-const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc);
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc);
#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 3b03007ba625..47b1dee044fb 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -660,6 +660,8 @@
#define VI6_IP_VERSION_MODEL_VSPR_H2 (0x0a << 8)
#define VI6_IP_VERSION_MODEL_VSPD_GEN2 (0x0b << 8)
#define VI6_IP_VERSION_MODEL_VSPS_M2 (0x0c << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_V2H (0x12 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V2H (0x13 << 8)
#define VI6_IP_VERSION_MODEL_VSPI_GEN3 (0x14 << 8)
#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8)
#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 388838913205..b2e34a800ffa 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -46,34 +46,22 @@ static const struct v4l2_subdev_ops rpf_ops = {
* VSP1 Entity Operations
*/
-static void rpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
-{
- struct vsp1_rwpf *rpf = entity_to_rwpf(entity);
-
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
- rpf->mem.addr[0] + rpf->offsets[0]);
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
- rpf->mem.addr[1] + rpf->offsets[1]);
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
- rpf->mem.addr[2] + rpf->offsets[1]);
-}
-
static void rpf_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
const struct v4l2_pix_format_mplane *format = &rpf->format;
const struct v4l2_mbus_framefmt *source_format;
const struct v4l2_mbus_framefmt *sink_format;
- const struct v4l2_rect *crop;
unsigned int left = 0;
unsigned int top = 0;
u32 pstride;
u32 infmt;
- if (!full) {
+ if (params == VSP1_ENTITY_PARAMS_RUNTIME) {
vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, rpf->mult_alpha |
@@ -83,34 +71,80 @@ static void rpf_configure(struct vsp1_entity *entity,
return;
}
- /* Source size, stride and crop offsets.
- *
- * The crop offsets correspond to the location of the crop rectangle top
- * left corner in the plane buffer. Only two offsets are needed, as
- * planes 2 and 3 always have identical strides.
- */
- crop = vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+ if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+ unsigned int offsets[2];
+ struct v4l2_rect crop;
+
+ /*
+ * Source size and crop offsets.
+ *
+ * The crop offsets correspond to the location of the crop
+ * rectangle top left corner in the plane buffer. Only two
+ * offsets are needed, as planes 2 and 3 always have identical
+ * strides.
+ */
+ crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+
+ /*
+ * Partition Algorithm Control
+ *
+ * The partition algorithm can split this frame into multiple
+ * slices. We must scale our partition window based on the pipe
+ * configuration to match the destination partition window.
+ * To achieve this, we adjust our crop to provide a 'sub-crop'
+ * matching the expected partition window. Only 'left' and
+ * 'width' need to be adjusted.
+ */
+ if (pipe->partitions > 1) {
+ const struct v4l2_mbus_framefmt *output;
+ struct vsp1_entity *wpf = &pipe->output->entity;
+ unsigned int input_width = crop.width;
+
+ /*
+ * Scale the partition window based on the configuration
+ * of the pipeline.
+ */
+ output = vsp1_entity_get_pad_format(wpf, wpf->config,
+ RWPF_PAD_SOURCE);
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
- (crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
- (crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
- (crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
- (crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+ crop.width = pipe->partition.width * input_width
+ / output->width;
+ crop.left += pipe->partition.left * input_width
+ / output->width;
+ }
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
+ (crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
+ (crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+
+ offsets[0] = crop.top * format->plane_fmt[0].bytesperline
+ + crop.left * fmtinfo->bpp[0] / 8;
+
+ if (format->num_planes > 1)
+ offsets[1] = crop.top * format->plane_fmt[1].bytesperline
+ + crop.left / fmtinfo->hsub
+ * fmtinfo->bpp[1] / 8;
+ else
+ offsets[1] = 0;
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
+ rpf->mem.addr[0] + offsets[0]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
+ rpf->mem.addr[1] + offsets[1]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
+ rpf->mem.addr[2] + offsets[1]);
+ return;
+ }
- rpf->offsets[0] = crop->top * format->plane_fmt[0].bytesperline
- + crop->left * fmtinfo->bpp[0] / 8;
+ /* Stride */
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
-
- if (format->num_planes > 1) {
- rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
- + crop->left * fmtinfo->bpp[1] / 8;
+ if (format->num_planes > 1)
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
- } else {
- rpf->offsets[1] = 0;
- }
vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_PSTRIDE, pstride);
@@ -215,7 +249,6 @@ static void rpf_configure(struct vsp1_entity *entity,
}
static const struct vsp1_entity_operations rpf_entity_ops = {
- .set_memory = rpf_set_memory,
.configure = rpf_configure,
};
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 8d461b375e91..66e4d7ea31d6 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -66,11 +66,15 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- struct v4l2_rect *crop;
+ int ret = 0;
+
+ mutex_lock(&rwpf->entity.lock);
config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
@@ -85,7 +89,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
*/
format->code = fmt->format.code;
fmt->format = *format;
- return 0;
+ goto done;
}
format->code = fmt->format.code;
@@ -98,19 +102,25 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
fmt->format = *format;
- /* Update the sink crop rectangle. */
- crop = vsp1_rwpf_get_crop(rwpf, config);
- crop->left = 0;
- crop->top = 0;
- crop->width = fmt->format.width;
- crop->height = fmt->format.height;
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+ struct v4l2_rect *crop;
+
+ /* Update the sink crop rectangle. */
+ crop = vsp1_rwpf_get_crop(rwpf, config);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
/* Propagate the format to the source pad. */
format = vsp1_entity_get_pad_format(&rwpf->entity, config,
RWPF_PAD_SOURCE);
*format = fmt->format;
- return 0;
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
}
static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
@@ -120,14 +130,22 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
- /* Cropping is implemented on the sink pad. */
- if (sel->pad != RWPF_PAD_SINK)
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
return -EINVAL;
+ mutex_lock(&rwpf->entity.lock);
+
config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
@@ -144,10 +162,13 @@ static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
}
static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
@@ -158,21 +179,27 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
+ int ret = 0;
- /* Cropping is implemented on the sink pad. */
- if (sel->pad != RWPF_PAD_SINK)
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
return -EINVAL;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
+ mutex_lock(&rwpf->entity.lock);
+
config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
- /* Make sure the crop rectangle is entirely contained in the image. The
- * WPF top and left offsets are limited to 255.
- */
+ /* Make sure the crop rectangle is entirely contained in the image. */
format = vsp1_entity_get_pad_format(&rwpf->entity, config,
RWPF_PAD_SINK);
@@ -188,10 +215,6 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
- if (rwpf->entity.type == VSP1_ENTITY_WPF) {
- sel->r.left = min_t(unsigned int, sel->r.left, 255);
- sel->r.top = min_t(unsigned int, sel->r.top, 255);
- }
sel->r.width = min_t(unsigned int, sel->r.width,
format->width - sel->r.left);
sel->r.height = min_t(unsigned int, sel->r.height,
@@ -206,7 +229,9 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
format->width = crop->width;
format->height = crop->height;
- return 0;
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
}
const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index cb20484e80da..1c98aff3da5d 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -61,7 +61,6 @@ struct vsp1_rwpf {
unsigned int active;
} flip;
- unsigned int offsets[2];
struct vsp1_rwpf_memory mem;
struct vsp1_dl_manager *dlm;
@@ -86,17 +85,5 @@ extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_pad_config *config);
-/**
- * vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
- * @rwpf: the [RW]PF instance
- * @dl: the display list
- *
- * This function applies the cached memory buffer address to the display list.
- */
-static inline void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf,
- struct vsp1_dl_list *dl)
-{
- rwpf->entity.ops->set_memory(&rwpf->entity, dl);
-}
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 47f5e0cea2ce..b4e568a3b4ed 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -128,6 +128,7 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
struct vsp1_sru *sru = to_sru(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
if (!config)
@@ -135,8 +136,12 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
- if (fse->index || fse->code != format->code)
- return -EINVAL;
+ mutex_lock(&sru->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
if (fse->pad == SRU_PAD_SINK) {
fse->min_width = SRU_MIN_SIZE;
@@ -156,7 +161,9 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
}
}
- return 0;
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
}
static void sru_try_format(struct vsp1_sru *sru,
@@ -217,10 +224,15 @@ static int sru_set_format(struct v4l2_subdev *subdev,
struct vsp1_sru *sru = to_sru(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&sru->entity.lock);
config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
sru_try_format(sru, config, fmt->pad, &fmt->format);
@@ -236,7 +248,9 @@ static int sru_set_format(struct v4l2_subdev *subdev,
sru_try_format(sru, config, SRU_PAD_SOURCE, format);
}
- return 0;
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
}
static const struct v4l2_subdev_pad_ops sru_pad_ops = {
@@ -257,7 +271,8 @@ static const struct v4l2_subdev_ops sru_ops = {
static void sru_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
const struct vsp1_sru_param *param;
struct vsp1_sru *sru = to_sru(&entity->subdev);
@@ -265,7 +280,7 @@ static void sru_configure(struct vsp1_entity *entity,
struct v4l2_mbus_framefmt *output;
u32 ctrl0;
- if (!full)
+ if (params != VSP1_ENTITY_PARAMS_INIT)
return;
input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
@@ -291,8 +306,27 @@ static void sru_configure(struct vsp1_entity *entity,
vsp1_sru_write(sru, dl, VI6_SRU_CTRL2, param->ctrl2);
}
+static unsigned int sru_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ if (input->width != output->width)
+ return 512;
+ else
+ return 256;
+}
+
static const struct vsp1_entity_operations sru_entity_ops = {
.configure = sru_configure,
+ .max_width = sru_max_width,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 652dcd895022..da8f89a31ea4 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -18,6 +18,7 @@
#include "vsp1.h"
#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
#include "vsp1_uds.h"
#define UDS_MIN_SIZE 4U
@@ -133,6 +134,7 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
struct vsp1_uds *uds = to_uds(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
if (!config)
@@ -141,8 +143,12 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&uds->entity, config,
UDS_PAD_SINK);
- if (fse->index || fse->code != format->code)
- return -EINVAL;
+ mutex_lock(&uds->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
if (fse->pad == UDS_PAD_SINK) {
fse->min_width = UDS_MIN_SIZE;
@@ -156,7 +162,9 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
&fse->max_height);
}
- return 0;
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
}
static void uds_try_format(struct vsp1_uds *uds,
@@ -202,10 +210,15 @@ static int uds_set_format(struct v4l2_subdev *subdev,
struct vsp1_uds *uds = to_uds(subdev);
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&uds->entity.lock);
config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
- if (!config)
- return -EINVAL;
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
uds_try_format(uds, config, fmt->pad, &fmt->format);
@@ -221,7 +234,9 @@ static int uds_set_format(struct v4l2_subdev *subdev,
uds_try_format(uds, config, UDS_PAD_SOURCE, format);
}
- return 0;
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
}
/* -----------------------------------------------------------------------------
@@ -246,7 +261,8 @@ static const struct v4l2_subdev_ops uds_ops = {
static void uds_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_uds *uds = to_uds(&entity->subdev);
const struct v4l2_mbus_framefmt *output;
@@ -255,7 +271,16 @@ static void uds_configure(struct vsp1_entity *entity,
unsigned int vscale;
bool multitap;
- if (!full)
+ if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+ const struct v4l2_rect *clip = &pipe->partition;
+
+ vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
+ (clip->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+ (clip->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+ return;
+ }
+
+ if (params != VSP1_ENTITY_PARAMS_INIT)
return;
input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
@@ -287,17 +312,39 @@ static void uds_configure(struct vsp1_entity *entity,
(uds_passband_width(vscale)
<< VI6_UDS_PASS_BWIDTH_V_SHIFT));
- /* Set the scaling ratios and the output size. */
+ /* Set the scaling ratios. */
vsp1_uds_write(uds, dl, VI6_UDS_SCALE,
(hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
(vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
- vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
- (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
- (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static unsigned int uds_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+ hscale = output->width / input->width;
+
+ if (hscale <= 2)
+ return 256;
+ else if (hscale <= 4)
+ return 512;
+ else if (hscale <= 8)
+ return 1024;
+ else
+ return 2048;
}
static const struct vsp1_entity_operations uds_entity_ops = {
.configure = uds_configure,
+ .max_width = uds_max_width,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 9fb4fc26a359..d351b9c768d2 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -117,9 +117,9 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
/* Retrieve format information and select the default format if the
* requested format isn't supported.
*/
- info = vsp1_get_format_info(pix->pixelformat);
+ info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
if (info == NULL)
- info = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
+ info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
pix->pixelformat = info->fourcc;
pix->colorspace = V4L2_COLORSPACE_SRGB;
@@ -169,6 +169,113 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
}
/* -----------------------------------------------------------------------------
+ * VSP1 Partition Algorithm support
+ */
+
+static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_entity *entity;
+ unsigned int div_size;
+
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SOURCE);
+ div_size = format->width;
+
+ /* Gen2 hardware doesn't require image partitioning. */
+ if (vsp1->info->gen == 2) {
+ pipe->div_size = div_size;
+ pipe->partitions = 1;
+ return;
+ }
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH;
+
+ if (entity->ops->max_width) {
+ entity_max = entity->ops->max_width(entity, pipe);
+ if (entity_max)
+ div_size = min(div_size, entity_max);
+ }
+ }
+
+ pipe->div_size = div_size;
+ pipe->partitions = DIV_ROUND_UP(format->width, div_size);
+}
+
+/**
+ * vsp1_video_partition - Calculate the active partition output window
+ *
+ * @div_size: pre-determined maximum partition division size
+ * @index: partition index
+ *
+ * Returns a v4l2_rect describing the partition window.
+ */
+static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
+ unsigned int div_size,
+ unsigned int index)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect partition;
+ unsigned int modulus;
+
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SOURCE);
+
+ /* A single partition simply processes the output size in full. */
+ if (pipe->partitions <= 1) {
+ partition.left = 0;
+ partition.top = 0;
+ partition.width = format->width;
+ partition.height = format->height;
+ return partition;
+ }
+
+ /* Initialise the partition with sane starting conditions. */
+ partition.left = index * div_size;
+ partition.top = 0;
+ partition.width = div_size;
+ partition.height = format->height;
+
+ modulus = format->width % div_size;
+
+ /*
+ * We need to prevent the last partition from being smaller than the
+ * *minimum* width of the hardware capabilities.
+ *
+ * If the modulus is less than half of the partition size,
+ * the penultimate partition is reduced to half, which is added
+ * to the final partition: |1234|1234|1234|12|341|
+ * to prevents this: |1234|1234|1234|1234|1|.
+ */
+ if (modulus) {
+ /*
+ * pipe->partitions is 1 based, whilst index is a 0 based index.
+ * Normalise this locally.
+ */
+ unsigned int partitions = pipe->partitions - 1;
+
+ if (modulus < div_size / 2) {
+ if (index == partitions - 1) {
+ /* Halve the penultimate partition. */
+ partition.width = div_size / 2;
+ } else if (index == partitions) {
+ /* Increase the final partition. */
+ partition.width = (div_size / 2) + modulus;
+ partition.left -= div_size / 2;
+ }
+ } else if (index == partitions) {
+ partition.width = modulus;
+ }
+ }
+
+ return partition;
+}
+
+/* -----------------------------------------------------------------------------
* Pipeline Management
*/
@@ -234,44 +341,81 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
{
struct vsp1_video *video = rwpf->video;
struct vsp1_vb2_buffer *buf;
- unsigned long flags;
buf = vsp1_video_complete_buffer(video);
if (buf == NULL)
return;
- spin_lock_irqsave(&pipe->irqlock, flags);
-
video->rwpf->mem = buf->mem;
pipe->buffers_ready |= 1 << video->pipe_index;
+}
- spin_unlock_irqrestore(&pipe->irqlock, flags);
+static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_entity *entity;
+
+ pipe->partition = vsp1_video_partition(pipe, pipe->div_size,
+ pipe->current_partition);
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->ops->configure)
+ entity->ops->configure(entity, pipe, dl,
+ VSP1_ENTITY_PARAMS_PARTITION);
+ }
}
static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
struct vsp1_entity *entity;
- unsigned int i;
if (!pipe->dl)
pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+ /*
+ * Start with the runtime parameters as the configure operation can
+ * compute/cache information needed when configuring partitions. This
+ * is the case with flipping in the WPF.
+ */
list_for_each_entry(entity, &pipe->entities, list_pipe) {
if (entity->ops->configure)
- entity->ops->configure(entity, pipe, pipe->dl, false);
+ entity->ops->configure(entity, pipe, pipe->dl,
+ VSP1_ENTITY_PARAMS_RUNTIME);
}
- for (i = 0; i < vsp1->info->rpf_count; ++i) {
- struct vsp1_rwpf *rwpf = pipe->inputs[i];
+ /* Run the first partition */
+ pipe->current_partition = 0;
+ vsp1_video_pipeline_run_partition(pipe, pipe->dl);
- if (rwpf)
- vsp1_rwpf_set_memory(rwpf, pipe->dl);
- }
+ /* Process consecutive partitions as necessary */
+ for (pipe->current_partition = 1;
+ pipe->current_partition < pipe->partitions;
+ pipe->current_partition++) {
+ struct vsp1_dl_list *dl;
- if (!pipe->lif)
- vsp1_rwpf_set_memory(pipe->output, pipe->dl);
+ /*
+ * Partition configuration operations will utilise
+ * the pipe->current_partition variable to determine
+ * the work they should complete.
+ */
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+
+ /*
+ * An incomplete chain will still function, but output only
+ * the partitions that had a dl available. The frame end
+ * interrupt will be marked on the last dl in the chain.
+ */
+ if (!dl) {
+ dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
+ break;
+ }
+
+ vsp1_video_pipeline_run_partition(pipe, dl);
+ vsp1_dl_list_add_chain(pipe->dl, dl);
+ }
+ /* Complete, and commit the head display list. */
vsp1_dl_list_commit(pipe->dl);
pipe->dl = NULL;
@@ -285,6 +429,8 @@ static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
unsigned long flags;
unsigned int i;
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
/* Complete buffers on all video nodes. */
for (i = 0; i < vsp1->info->rpf_count; ++i) {
if (!pipe->inputs[i])
@@ -295,8 +441,6 @@ static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
vsp1_video_frame_end(pipe, pipe->output);
- spin_lock_irqsave(&pipe->irqlock, flags);
-
state = pipe->state;
pipe->state = VSP1_PIPELINE_STOPPED;
@@ -607,6 +751,9 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
{
struct vsp1_entity *entity;
+ /* Determine this pipelines sizes for image partitioning support. */
+ vsp1_video_pipeline_setup_partitions(pipe);
+
/* Prepare the display list. */
pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
if (!pipe->dl)
@@ -634,7 +781,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
vsp1_entity_route_setup(entity, pipe->dl);
if (entity->ops->configure)
- entity->ops->configure(entity, pipe, pipe->dl, true);
+ entity->ops->configure(entity, pipe, pipe->dl,
+ VSP1_ENTITY_PARAMS_INIT);
}
return 0;
@@ -675,6 +823,14 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
unsigned long flags;
int ret;
+ /*
+ * Clear the buffers ready flag to make sure the device won't be started
+ * by a QBUF on the video node on the other side of the pipeline.
+ */
+ spin_lock_irqsave(&video->irqlock, flags);
+ pipe->buffers_ready &= ~(1 << video->pipe_index);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
mutex_lock(&pipe->lock);
if (--pipe->stream_count == pipe->num_inputs) {
/* Stop the pipeline. */
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 31983169c24a..7c48f81cd5c1 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -173,58 +173,28 @@ static void vsp1_wpf_destroy(struct vsp1_entity *entity)
vsp1_dlm_destroy(wpf->dlm);
}
-static void wpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
-{
- struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
- const struct v4l2_pix_format_mplane *format = &wpf->format;
- struct vsp1_rwpf_memory mem = wpf->mem;
- unsigned int flip = wpf->flip.active;
- unsigned int offset;
-
- /* Update the memory offsets based on flipping configuration. The
- * destination addresses point to the locations where the VSP starts
- * writing to memory, which can be different corners of the image
- * depending on vertical flipping. Horizontal flipping is handled
- * through a line buffer and doesn't modify the start address.
- */
- if (flip & BIT(WPF_CTRL_VFLIP)) {
- mem.addr[0] += (format->height - 1)
- * format->plane_fmt[0].bytesperline;
-
- if (format->num_planes > 1) {
- offset = (format->height / wpf->fmtinfo->vsub - 1)
- * format->plane_fmt[1].bytesperline;
- mem.addr[1] += offset;
- mem.addr[2] += offset;
- }
- }
-
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
-}
-
static void wpf_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl, bool full)
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
{
struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
const struct v4l2_mbus_framefmt *source_format;
const struct v4l2_mbus_framefmt *sink_format;
- const struct v4l2_rect *crop;
unsigned int i;
u32 outfmt = 0;
u32 srcrpf = 0;
- if (!full) {
+ if (params == VSP1_ENTITY_PARAMS_RUNTIME) {
const unsigned int mask = BIT(WPF_CTRL_VFLIP)
| BIT(WPF_CTRL_HFLIP);
+ unsigned long flags;
- spin_lock(&wpf->flip.lock);
+ spin_lock_irqsave(&wpf->flip.lock, flags);
wpf->flip.active = (wpf->flip.active & ~mask)
| (wpf->flip.pending & mask);
- spin_unlock(&wpf->flip.lock);
+ spin_unlock_irqrestore(&wpf->flip.lock, flags);
outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt;
@@ -237,17 +207,6 @@ static void wpf_configure(struct vsp1_entity *entity,
return;
}
- /* Cropping */
- crop = vsp1_rwpf_get_crop(wpf, wpf->entity.config);
-
- vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
- (crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
- (crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
- vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
- (crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
- (crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
-
- /* Format */
sink_format = vsp1_entity_get_pad_format(&wpf->entity,
wpf->entity.config,
RWPF_PAD_SINK);
@@ -255,6 +214,80 @@ static void wpf_configure(struct vsp1_entity *entity,
wpf->entity.config,
RWPF_PAD_SOURCE);
+ if (params == VSP1_ENTITY_PARAMS_PARTITION) {
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ struct vsp1_rwpf_memory mem = wpf->mem;
+ unsigned int flip = wpf->flip.active;
+ unsigned int width = source_format->width;
+ unsigned int height = source_format->height;
+ unsigned int offset;
+
+ /*
+ * Cropping. The partition algorithm can split the image into
+ * multiple slices.
+ */
+ if (pipe->partitions > 1)
+ width = pipe->partition.width;
+
+ vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+ vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (height << VI6_WPF_SZCLIP_SIZE_SHIFT));
+
+ if (pipe->lif)
+ return;
+
+ /*
+ * Update the memory offsets based on flipping configuration.
+ * The destination addresses point to the locations where the
+ * VSP starts writing to memory, which can be different corners
+ * of the image depending on vertical flipping.
+ */
+ if (pipe->partitions > 1) {
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+
+ /*
+ * Horizontal flipping is handled through a line buffer
+ * and doesn't modify the start address, but still needs
+ * to be handled when image partitioning is in effect to
+ * order the partitions correctly.
+ */
+ if (flip & BIT(WPF_CTRL_HFLIP))
+ offset = format->width - pipe->partition.left
+ - pipe->partition.width;
+ else
+ offset = pipe->partition.left;
+
+ mem.addr[0] += offset * fmtinfo->bpp[0] / 8;
+ if (format->num_planes > 1) {
+ mem.addr[1] += offset / fmtinfo->hsub
+ * fmtinfo->bpp[1] / 8;
+ mem.addr[2] += offset / fmtinfo->hsub
+ * fmtinfo->bpp[2] / 8;
+ }
+ }
+
+ if (flip & BIT(WPF_CTRL_VFLIP)) {
+ mem.addr[0] += (format->height - 1)
+ * format->plane_fmt[0].bytesperline;
+
+ if (format->num_planes > 1) {
+ offset = (format->height / wpf->fmtinfo->vsub - 1)
+ * format->plane_fmt[1].bytesperline;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+ }
+
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
+ return;
+ }
+
+ /* Format */
if (!pipe->lif) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
@@ -318,12 +351,11 @@ static void wpf_configure(struct vsp1_entity *entity,
/* Enable interrupts */
vsp1_dl_list_write(dl, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
vsp1_dl_list_write(dl, VI6_WPF_IRQ_ENB(wpf->entity.index),
- VI6_WFP_IRQ_ENB_FREE);
+ VI6_WFP_IRQ_ENB_DFEE);
}
static const struct vsp1_entity_operations wpf_entity_ops = {
.destroy = vsp1_wpf_destroy,
- .set_memory = wpf_set_memory,
.configure = wpf_configure,
};
@@ -360,7 +392,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
return ERR_PTR(ret);
/* Initialize the display list manager. */
- wpf->dlm = vsp1_dlm_create(vsp1, index, 4);
+ wpf->dlm = vsp1_dlm_create(vsp1, index, 64);
if (!wpf->dlm) {
ret = -ENOMEM;
goto error;
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 7ae1a134b1ff..1d5836c3fb7a 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -474,7 +474,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq)
spin_unlock_irq(&dma->queued_lock);
}
-static struct vb2_ops xvip_dma_queue_qops = {
+static const struct vb2_ops xvip_dma_queue_qops = {
.queue_setup = xvip_dma_queue_setup,
.buf_prepare = xvip_dma_buffer_prepare,
.buf_queue = xvip_dma_buffer_queue,
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 471d6a8ae8a4..ee0470a3196b 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -509,7 +509,6 @@ static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume);
static struct i2c_driver si470x_i2c_driver = {
.driver = {
.name = "si470x",
- .owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &si470x_i2c_pm,
#endif
diff --git a/drivers/media/radio/si4713/radio-usb-si4713.c b/drivers/media/radio/si4713/radio-usb-si4713.c
index 5146be2a1a50..e5e5a1672bdb 100644
--- a/drivers/media/radio/si4713/radio-usb-si4713.c
+++ b/drivers/media/radio/si4713/radio-usb-si4713.c
@@ -402,7 +402,7 @@ static u32 si4713_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm si4713_algo = {
+static const struct i2c_algorithm si4713_algo = {
.master_xfer = si4713_transfer,
.functionality = si4713_functionality,
};
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index e0c531fa01da..5cf983be07a2 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -203,7 +203,8 @@ static int igorplugusb_probe(struct usb_interface *intf,
* This device can only store 36 pulses + spaces, which is not enough
* for the NEC protocol and many others.
*/
- rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_RC6_6A_20 |
+ rc->allowed_protocols = RC_BIT_ALL & ~(RC_BIT_NEC | RC_BIT_NECX |
+ RC_BIT_NEC32 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
diff --git a/drivers/media/rc/img-ir/img-ir-nec.c b/drivers/media/rc/img-ir/img-ir-nec.c
index 27a7ea8f1260..09314933ea08 100644
--- a/drivers/media/rc/img-ir/img-ir-nec.c
+++ b/drivers/media/rc/img-ir/img-ir-nec.c
@@ -34,19 +34,21 @@ static int img_ir_nec_scancode(int len, u64 raw, u64 enabled_protocols,
bitrev8(addr_inv) << 16 |
bitrev8(data) << 8 |
bitrev8(data_inv);
+ request->protocol = RC_TYPE_NEC32;
} else if ((addr_inv ^ addr) != 0xff) {
/* Extended NEC */
/* scan encoding: AAaaDD */
request->scancode = addr << 16 |
addr_inv << 8 |
data;
+ request->protocol = RC_TYPE_NECX;
} else {
/* Normal NEC */
/* scan encoding: AADD */
request->scancode = addr << 8 |
data;
+ request->protocol = RC_TYPE_NEC;
}
- request->protocol = RC_TYPE_NEC;
return IMG_IR_SCANCODE;
}
@@ -109,7 +111,7 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
* http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
*/
struct img_ir_decoder img_ir_nec = {
- .type = RC_BIT_NEC,
+ .type = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32,
.control = {
.decoden = 1,
.code_type = IMG_IR_CODETYPE_PULSEDIST,
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index bea0d1eedee0..2a9d155548ab 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -49,6 +49,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct nec_dec *data = &dev->raw->nec;
u32 scancode;
+ enum rc_type rc_type;
u8 address, not_address, command, not_command;
bool send_32bits = false;
@@ -171,22 +172,25 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
* least Apple and TiVo remotes */
scancode = data->bits;
IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
+ rc_type = RC_TYPE_NEC32;
} else if ((address ^ not_address) != 0xff) {
/* Extended NEC */
scancode = address << 16 |
not_address << 8 |
command;
IR_dprintk(1, "NEC (Ext) scancode 0x%06x\n", scancode);
+ rc_type = RC_TYPE_NECX;
} else {
/* Normal NEC */
scancode = address << 8 | command;
IR_dprintk(1, "NEC scancode 0x%04x\n", scancode);
+ rc_type = RC_TYPE_NEC;
}
if (data->is_nec_x)
data->necx_repeat = true;
- rc_keydown(dev, RC_TYPE_NEC, scancode, 0);
+ rc_keydown(dev, rc_type, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
@@ -198,7 +202,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
static struct ir_raw_handler nec_handler = {
- .protocols = RC_BIT_NEC,
+ .protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32,
.decode = ir_nec_decode,
};
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index e0e2edefa651..5cc54c967a80 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -248,7 +248,7 @@ again:
toggle = 0;
break;
case 24:
- protocol = RC_BIT_RC6_6A_24;
+ protocol = RC_TYPE_RC6_6A_24;
toggle = 0;
break;
case 32:
@@ -257,7 +257,7 @@ again:
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
} else {
- protocol = RC_BIT_RC6_6A_32;
+ protocol = RC_TYPE_RC6_6A_32;
toggle = 0;
}
break;
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index fcc3b82d1454..003fff07ade2 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -24,6 +24,7 @@
#define DRIVER_NAME "meson-ir"
+/* valid on all Meson platforms */
#define IR_DEC_LDR_ACTIVE 0x00
#define IR_DEC_LDR_IDLE 0x04
#define IR_DEC_LDR_REPEAT 0x08
@@ -32,12 +33,21 @@
#define IR_DEC_FRAME 0x14
#define IR_DEC_STATUS 0x18
#define IR_DEC_REG1 0x1c
+/* only available on Meson 8b and newer */
+#define IR_DEC_REG2 0x20
#define REG0_RATE_MASK (BIT(11) - 1)
-#define REG1_MODE_MASK (BIT(7) | BIT(8))
-#define REG1_MODE_NEC (0 << 7)
-#define REG1_MODE_GENERAL (2 << 7)
+#define DECODE_MODE_NEC 0x0
+#define DECODE_MODE_RAW 0x2
+
+/* Meson 6b uses REG1 to configure the mode */
+#define REG1_MODE_MASK GENMASK(8, 7)
+#define REG1_MODE_SHIFT 7
+
+/* Meson 8b / GXBB use REG2 to configure the mode */
+#define REG2_MODE_MASK GENMASK(3, 0)
+#define REG2_MODE_SHIFT 0
#define REG1_TIME_IV_SHIFT 16
#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
@@ -158,8 +168,15 @@ static int meson_ir_probe(struct platform_device *pdev)
/* Reset the decoder */
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
- /* Set general operation mode */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL);
+
+ /* Set general operation mode (= raw/software decoding) */
+ if (of_device_is_compatible(node, "amlogic,meson6-ir"))
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
+ DECODE_MODE_RAW << REG1_MODE_SHIFT);
+ else
+ meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
+ DECODE_MODE_RAW << REG2_MODE_SHIFT);
+
/* Set rate */
meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
/* IRQ on rising and falling edges */
@@ -197,6 +214,8 @@ static int meson_ir_remove(struct platform_device *pdev)
static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson6-ir" },
+ { .compatible = "amlogic,meson8b-ir" },
+ { .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 00215f343819..04fedaa75612 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -769,21 +769,11 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
rawir.pulse ? "pulse" : "space", rawir.duration);
ir_raw_event_store_with_filter(nvt->rdev, &rawir);
-
- /*
- * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
- * indicates end of IR signal, but new data incoming. In both
- * cases, it means we're ready to call ir_raw_event_handle
- */
- if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
- nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
- ir_raw_event_handle(nvt->rdev);
- }
}
nvt->pkts = 0;
- nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
+ nvt_dbg("Calling ir_raw_event_handle\n");
ir_raw_event_handle(nvt->rdev);
nvt_dbg_verbose("%s done", __func__);
@@ -801,8 +791,7 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
/* copy data from hardware rx fifo into driver buffer */
static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
{
- u8 fifocount, val;
- unsigned int b_idx;
+ u8 fifocount;
int i;
/* Get count of how many bytes to read from RX FIFO */
@@ -810,21 +799,11 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
- b_idx = nvt->pkts;
-
- /* This should never happen, but lets check anyway... */
- if (b_idx + fifocount > RX_BUF_LEN) {
- nvt_process_rx_ir_data(nvt);
- b_idx = 0;
- }
-
/* Read fifocount bytes from CIR Sample RX FIFO register */
- for (i = 0; i < fifocount; i++) {
- val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
- nvt->buf[b_idx + i] = val;
- }
+ for (i = 0; i < fifocount; i++)
+ nvt->buf[i] = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
- nvt->pkts += fifocount;
+ nvt->pkts = fifocount;
nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
nvt_process_rx_ir_data(nvt);
@@ -886,6 +865,15 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
status = nvt_cir_reg_read(nvt, CIR_IRSTS);
iren = nvt_cir_reg_read(nvt, CIR_IREN);
+ /* At least NCT6779D creates a spurious interrupt when the
+ * logical device is being disabled.
+ */
+ if (status == 0xff && iren == 0xff) {
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+ nvt_dbg_verbose("Spurious interrupt detected");
+ return IRQ_HANDLED;
+ }
+
/* IRQ may be shared with CIR WAKE, therefore check for each
* status bit whether the related interrupt source is enabled
*/
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 144304c94606..205ecc602e34 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -26,6 +26,7 @@ static LIST_HEAD(ir_raw_client_list);
/* Used to handle IR raw handler extensions */
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
+static DEFINE_MUTEX(available_protocols_lock);
static u64 available_protocols;
static int ir_raw_event_thread(void *data)
@@ -234,9 +235,9 @@ u64
ir_raw_get_allowed_protocols(void)
{
u64 protocols;
- mutex_lock(&ir_raw_handler_lock);
+ mutex_lock(&available_protocols_lock);
protocols = available_protocols;
- mutex_unlock(&ir_raw_handler_lock);
+ mutex_unlock(&available_protocols_lock);
return protocols;
}
@@ -330,7 +331,9 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_register)
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
+ mutex_lock(&available_protocols_lock);
available_protocols |= ir_raw_handler->protocols;
+ mutex_unlock(&available_protocols_lock);
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -349,7 +352,9 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
if (ir_raw_handler->raw_unregister)
ir_raw_handler->raw_unregister(raw->dev);
}
+ mutex_lock(&available_protocols_lock);
available_protocols &= ~protocols;
+ mutex_unlock(&available_protocols_lock);
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 8e7f2929fa6f..d9c1f2ff7119 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -795,7 +795,9 @@ static const struct {
{ RC_BIT_UNKNOWN, "unknown", NULL },
{ RC_BIT_RC5 |
RC_BIT_RC5X, "rc-5", "ir-rc5-decoder" },
- { RC_BIT_NEC, "nec", "ir-nec-decoder" },
+ { RC_BIT_NEC |
+ RC_BIT_NECX |
+ RC_BIT_NEC32, "nec", "ir-nec-decoder" },
{ RC_BIT_RC6_0 |
RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 |
@@ -1460,6 +1462,10 @@ int rc_register_device(struct rc_dev *dev)
dev->input_dev->phys = dev->input_phys;
dev->input_dev->name = dev->input_name;
+ rc = input_register_device(dev->input_dev);
+ if (rc)
+ goto out_table;
+
/*
* Default delay of 250ms is too short for some protocols, especially
* since the timeout is currently set to 250ms. Increase it to 500ms,
@@ -1475,11 +1481,6 @@ int rc_register_device(struct rc_dev *dev)
*/
dev->input_dev->rep[REP_PERIOD] = 125;
- /* rc_open will be called here */
- rc = input_register_device(dev->input_dev);
- if (rc)
- goto out_table;
-
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->input_name ?: "Unspecified device", path ?: "N/A");
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ec8016d9b009..05ba47bc0b61 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -124,6 +124,41 @@
#define USB_RR3USB_PRODUCT_ID 0x0001
#define USB_RR3IIUSB_PRODUCT_ID 0x0005
+
+/*
+ * The redrat3 encodes an IR signal as set of different lengths and a set
+ * of indices into those lengths. This sets how much two lengths must
+ * differ before they are considered distinct, the value is specified
+ * in microseconds.
+ * Default 5, value 0 to 127.
+ */
+static int length_fuzz = 5;
+module_param(length_fuzz, uint, 0644);
+MODULE_PARM_DESC(length_fuzz, "Length Fuzz (0-127)");
+
+/*
+ * When receiving a continuous ir stream (for example when a user is
+ * holding a button down on a remote), this specifies the minimum size
+ * of a space when the redrat3 sends a irdata packet to the host. Specified
+ * in miliseconds. Default value 18ms.
+ * The value can be between 2 and 30 inclusive.
+ */
+static int minimum_pause = 18;
+module_param(minimum_pause, uint, 0644);
+MODULE_PARM_DESC(minimum_pause, "Minimum Pause in ms (2-30)");
+
+/*
+ * The carrier frequency is measured during the first pulse of the IR
+ * signal. The larger the number of periods used To measure, the more
+ * accurate the result is likely to be, however some signals have short
+ * initial pulses, so in some case it may be necessary to reduce this value.
+ * Default 8, value 1 to 255.
+ */
+static int periods_measure_carrier = 8;
+module_param(periods_measure_carrier, uint, 0644);
+MODULE_PARM_DESC(periods_measure_carrier, "Number of Periods to Measure Carrier (1-255)");
+
+
struct redrat3_header {
__be16 length;
__be16 transfer_type;
@@ -188,9 +223,6 @@ struct redrat3_dev {
/* usb dma */
dma_addr_t dma_in;
- /* rx signal timeout */
- u32 hw_timeout;
-
/* Is the device currently transmitting?*/
bool transmitting;
@@ -372,7 +404,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
/* add a trailing space */
rawir.pulse = false;
rawir.timeout = true;
- rawir.duration = US_TO_NS(rr3->hw_timeout);
+ rawir.duration = rr3->rc->timeout;
dev_dbg(dev, "storing trailing timeout with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -480,7 +512,7 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutns)
struct redrat3_dev *rr3 = rc_dev->priv;
struct usb_device *udev = rr3->udev;
struct device *dev = rr3->dev;
- u32 *timeout;
+ __be32 *timeout;
int ret;
timeout = kmalloc(sizeof(*timeout), GFP_KERNEL);
@@ -495,10 +527,9 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutns)
dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n",
be32_to_cpu(*timeout), ret);
- if (ret == sizeof(*timeout)) {
- rr3->hw_timeout = timeoutns / 1000;
+ if (ret == sizeof(*timeout))
ret = 0;
- } else if (ret >= 0)
+ else if (ret >= 0)
ret = -EIO;
kfree(timeout);
@@ -529,12 +560,25 @@ static void redrat3_reset(struct redrat3_dev *rr3)
RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25);
dev_dbg(dev, "reset returned 0x%02x\n", rc);
- *val = 5;
+ *val = length_fuzz;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25);
dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc);
+ *val = (65536 - (minimum_pause * 2000)) / 256;
+ rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ RR3_IR_IO_MIN_PAUSE, 0, val, len, HZ * 25);
+ dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc);
+
+ *val = periods_measure_carrier;
+ rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ RR3_IR_IO_PERIODS_MF, 0, val, len, HZ * 25);
+ dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val,
+ rc);
+
*val = RR3_DRIVER_MAXLENS;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -889,7 +933,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->allowed_protocols = RC_BIT_ALL;
rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
- rc->timeout = US_TO_NS(rr3->hw_timeout);
+ rc->timeout = US_TO_NS(redrat3_get_timeout(rr3));
rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
@@ -998,9 +1042,6 @@ static int redrat3_dev_probe(struct usb_interface *intf,
if (retval < 0)
goto error;
- /* store current hardware timeout, in µs */
- rr3->hw_timeout = redrat3_get_timeout(rr3);
-
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 815243c65bc3..4004260a7c69 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -499,7 +499,7 @@ static int streamzap_resume(struct usb_interface *intf)
struct streamzap_ir *sz = usb_get_intfdata(intf);
if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) {
- dev_err(sz->dev, "Error sumbiting urb\n");
+ dev_err(sz->dev, "Error submitting urb\n");
return -EIO;
}
diff --git a/drivers/media/spi/Kconfig b/drivers/media/spi/Kconfig
new file mode 100644
index 000000000000..a21f5a39a440
--- /dev/null
+++ b/drivers/media/spi/Kconfig
@@ -0,0 +1,14 @@
+if VIDEO_V4L2
+
+menu "SPI helper chips"
+ visible if !MEDIA_SUBDRV_AUTOSELECT || COMPILE_TEST
+
+config VIDEO_GS1662
+ tristate "Gennum Serializers video"
+ depends on SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ Enable the GS1662 driver which serializes video streams.
+
+endmenu
+
+endif
diff --git a/drivers/media/spi/Makefile b/drivers/media/spi/Makefile
new file mode 100644
index 000000000000..ea64013d16cc
--- /dev/null
+++ b/drivers/media/spi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_GS1662) += gs1662.o
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
new file mode 100644
index 000000000000..d76f36233f43
--- /dev/null
+++ b/drivers/media/spi/gs1662.c
@@ -0,0 +1,478 @@
+/*
+ * GS1662 device registration.
+ *
+ * Copyright (C) 2015-2016 Nexvision
+ * Author: Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-dv-timings.h>
+#include <linux/v4l2-dv-timings.h>
+
+#define REG_STATUS 0x04
+#define REG_FORCE_FMT 0x06
+#define REG_LINES_PER_FRAME 0x12
+#define REG_WORDS_PER_LINE 0x13
+#define REG_WORDS_PER_ACT_LINE 0x14
+#define REG_ACT_LINES_PER_FRAME 0x15
+
+#define MASK_H_LOCK 0x001
+#define MASK_V_LOCK 0x002
+#define MASK_STD_LOCK 0x004
+#define MASK_FORCE_STD 0x020
+#define MASK_STD_STATUS 0x3E0
+
+#define GS_WIDTH_MIN 720
+#define GS_WIDTH_MAX 2048
+#define GS_HEIGHT_MIN 487
+#define GS_HEIGHT_MAX 1080
+#define GS_PIXELCLOCK_MIN 10519200
+#define GS_PIXELCLOCK_MAX 74250000
+
+struct gs {
+ struct spi_device *pdev;
+ struct v4l2_subdev sd;
+ struct v4l2_dv_timings current_timings;
+ int enabled;
+};
+
+struct gs_reg_fmt {
+ u16 reg_value;
+ struct v4l2_dv_timings format;
+};
+
+struct gs_reg_fmt_custom {
+ u16 reg_value;
+ __u32 width;
+ __u32 height;
+ __u64 pixelclock;
+ __u32 interlaced;
+};
+
+static const struct spi_device_id gs_id[] = {
+ { "gs1662", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, gs_id);
+
+static const struct v4l2_dv_timings fmt_cap[] = {
+ V4L2_DV_BT_SDI_720X487I60,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+};
+
+static const struct gs_reg_fmt reg_fmt[] = {
+ { 0x00, V4L2_DV_BT_CEA_1280X720P60 },
+ { 0x01, V4L2_DV_BT_CEA_1280X720P60 },
+ { 0x02, V4L2_DV_BT_CEA_1280X720P30 },
+ { 0x03, V4L2_DV_BT_CEA_1280X720P30 },
+ { 0x04, V4L2_DV_BT_CEA_1280X720P50 },
+ { 0x05, V4L2_DV_BT_CEA_1280X720P50 },
+ { 0x06, V4L2_DV_BT_CEA_1280X720P25 },
+ { 0x07, V4L2_DV_BT_CEA_1280X720P25 },
+ { 0x08, V4L2_DV_BT_CEA_1280X720P24 },
+ { 0x09, V4L2_DV_BT_CEA_1280X720P24 },
+ { 0x0A, V4L2_DV_BT_CEA_1920X1080I60 },
+ { 0x0B, V4L2_DV_BT_CEA_1920X1080P30 },
+
+ /* Default value: keep this field before 0xC */
+ { 0x14, V4L2_DV_BT_CEA_1920X1080I50 },
+ { 0x0C, V4L2_DV_BT_CEA_1920X1080I50 },
+ { 0x0D, V4L2_DV_BT_CEA_1920X1080P25 },
+ { 0x0E, V4L2_DV_BT_CEA_1920X1080P25 },
+ { 0x10, V4L2_DV_BT_CEA_1920X1080P24 },
+ { 0x12, V4L2_DV_BT_CEA_1920X1080P24 },
+ { 0x16, V4L2_DV_BT_SDI_720X487I60 },
+ { 0x19, V4L2_DV_BT_SDI_720X487I60 },
+ { 0x18, V4L2_DV_BT_CEA_720X576P50 },
+ { 0x1A, V4L2_DV_BT_CEA_720X576P50 },
+
+ /* Implement following timings before enable it.
+ * Because of we don't have access to these theoretical timings yet.
+ * Workaround: use functions to get and set registers for these formats.
+ */
+#if 0
+ { 0x0F, V4L2_DV_BT_XXX_1920X1080I25 }, /* SMPTE 274M */
+ { 0x11, V4L2_DV_BT_XXX_1920X1080I24 }, /* SMPTE 274M */
+ { 0x13, V4L2_DV_BT_XXX_1920X1080I25 }, /* SMPTE 274M */
+ { 0x15, V4L2_DV_BT_XXX_1920X1035I60 }, /* SMPTE 260M */
+ { 0x17, V4L2_DV_BT_SDI_720X507I60 }, /* SMPTE 125M */
+ { 0x1B, V4L2_DV_BT_SDI_720X507I60 }, /* SMPTE 125M */
+ { 0x1C, V4L2_DV_BT_XXX_2048X1080P25 }, /* SMPTE 428.1M */
+#endif
+};
+
+static const struct v4l2_dv_timings_cap gs_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(GS_WIDTH_MIN, GS_WIDTH_MAX, GS_HEIGHT_MIN,
+ GS_HEIGHT_MAX, GS_PIXELCLOCK_MIN,
+ GS_PIXELCLOCK_MAX,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_SDI,
+ V4L2_DV_BT_CAP_PROGRESSIVE
+ | V4L2_DV_BT_CAP_INTERLACED)
+};
+
+static int gs_read_register(struct spi_device *spi, u16 addr, u16 *value)
+{
+ int ret;
+ u16 buf_addr = (0x8000 | (0x0FFF & addr));
+ u16 buf_value = 0;
+ struct spi_message msg;
+ struct spi_transfer tx[] = {
+ {
+ .tx_buf = &buf_addr,
+ .len = 2,
+ .delay_usecs = 1,
+ }, {
+ .rx_buf = &buf_value,
+ .len = 2,
+ .delay_usecs = 1,
+ },
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&tx[0], &msg);
+ spi_message_add_tail(&tx[1], &msg);
+ ret = spi_sync(spi, &msg);
+
+ *value = buf_value;
+
+ return ret;
+}
+
+static int gs_write_register(struct spi_device *spi, u16 addr, u16 value)
+{
+ int ret;
+ u16 buf_addr = addr;
+ u16 buf_value = value;
+ struct spi_message msg;
+ struct spi_transfer tx[] = {
+ {
+ .tx_buf = &buf_addr,
+ .len = 2,
+ .delay_usecs = 1,
+ }, {
+ .tx_buf = &buf_value,
+ .len = 2,
+ .delay_usecs = 1,
+ },
+ };
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&tx[0], &msg);
+ spi_message_add_tail(&tx[1], &msg);
+ ret = spi_sync(spi, &msg);
+
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int gs_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+ u16 val;
+ int ret;
+
+ ret = gs_read_register(spi, reg->reg & 0xFFFF, &val);
+ reg->val = val;
+ reg->size = 2;
+ return ret;
+}
+
+static int gs_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ return gs_write_register(spi, reg->reg & 0xFFFF, reg->val & 0xFFFF);
+}
+#endif
+
+static int gs_status_format(u16 status, struct v4l2_dv_timings *timings)
+{
+ int std = (status & MASK_STD_STATUS) >> 5;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_fmt); i++) {
+ if (reg_fmt[i].reg_value == std) {
+ *timings = reg_fmt[i].format;
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+
+static u16 get_register_timings(struct v4l2_dv_timings *timings)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_fmt); i++) {
+ if (v4l2_match_dv_timings(timings, &reg_fmt[i].format, 0,
+ false))
+ return reg_fmt[i].reg_value | MASK_FORCE_STD;
+ }
+
+ return 0x0;
+}
+
+static inline struct gs *to_gs(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct gs, sd);
+}
+
+static int gs_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct gs *gs = to_gs(sd);
+ int reg_value;
+
+ reg_value = get_register_timings(timings);
+ if (reg_value == 0x0)
+ return -EINVAL;
+
+ gs->current_timings = *timings;
+ return 0;
+}
+
+static int gs_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct gs *gs = to_gs(sd);
+
+ *timings = gs->current_timings;
+ return 0;
+}
+
+static int gs_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct gs *gs = to_gs(sd);
+ struct v4l2_dv_timings fmt;
+ u16 reg_value, i;
+ int ret;
+
+ if (gs->enabled)
+ return -EBUSY;
+
+ /*
+ * Check if the component detect a line, a frame or something else
+ * which looks like a video signal activity.
+ */
+ for (i = 0; i < 4; i++) {
+ gs_read_register(gs->pdev, REG_LINES_PER_FRAME + i, &reg_value);
+ if (reg_value)
+ break;
+ }
+
+ /* If no register reports a video signal */
+ if (i >= 4)
+ return -ENOLINK;
+
+ gs_read_register(gs->pdev, REG_STATUS, &reg_value);
+ if (!(reg_value & MASK_H_LOCK) || !(reg_value & MASK_V_LOCK))
+ return -ENOLCK;
+ if (!(reg_value & MASK_STD_LOCK))
+ return -ERANGE;
+
+ ret = gs_status_format(reg_value, &fmt);
+
+ if (ret < 0)
+ return ret;
+
+ *timings = fmt;
+ return 0;
+}
+
+static int gs_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->index >= ARRAY_SIZE(fmt_cap))
+ return -EINVAL;
+
+ if (timings->pad != 0)
+ return -EINVAL;
+
+ timings->timings = fmt_cap[timings->index];
+ return 0;
+}
+
+static int gs_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gs *gs = to_gs(sd);
+ int reg_value;
+
+ if (gs->enabled == enable)
+ return 0;
+
+ gs->enabled = enable;
+
+ if (enable) {
+ /* To force the specific format */
+ reg_value = get_register_timings(&gs->current_timings);
+ return gs_write_register(gs->pdev, REG_FORCE_FMT, reg_value);
+ }
+
+ /* To renable auto-detection mode */
+ return gs_write_register(gs->pdev, REG_FORCE_FMT, 0x0);
+}
+
+static int gs_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct gs *gs = to_gs(sd);
+ u16 reg_value, i;
+ int ret;
+
+ /*
+ * Check if the component detect a line, a frame or something else
+ * which looks like a video signal activity.
+ */
+ for (i = 0; i < 4; i++) {
+ ret = gs_read_register(gs->pdev,
+ REG_LINES_PER_FRAME + i, &reg_value);
+ if (reg_value)
+ break;
+ if (ret) {
+ *status = V4L2_IN_ST_NO_POWER;
+ return ret;
+ }
+ }
+
+ /* If no register reports a video signal */
+ if (i >= 4)
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+
+ ret = gs_read_register(gs->pdev, REG_STATUS, &reg_value);
+ if (!(reg_value & MASK_H_LOCK))
+ *status |= V4L2_IN_ST_NO_H_LOCK;
+ if (!(reg_value & MASK_V_LOCK))
+ *status |= V4L2_IN_ST_NO_V_LOCK;
+ if (!(reg_value & MASK_STD_LOCK))
+ *status |= V4L2_IN_ST_NO_STD_LOCK;
+
+ return ret;
+}
+
+static int gs_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (cap->pad != 0)
+ return -EINVAL;
+
+ *cap = gs_timings_cap;
+ return 0;
+}
+
+/* V4L2 core operation handlers */
+static const struct v4l2_subdev_core_ops gs_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = gs_g_register,
+ .s_register = gs_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops gs_video_ops = {
+ .s_dv_timings = gs_s_dv_timings,
+ .g_dv_timings = gs_g_dv_timings,
+ .s_stream = gs_s_stream,
+ .g_input_status = gs_g_input_status,
+ .query_dv_timings = gs_query_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops gs_pad_ops = {
+ .enum_dv_timings = gs_enum_dv_timings,
+ .dv_timings_cap = gs_dv_timings_cap,
+};
+
+/* V4L2 top level operation handlers */
+static const struct v4l2_subdev_ops gs_ops = {
+ .core = &gs_core_ops,
+ .video = &gs_video_ops,
+ .pad = &gs_pad_ops,
+};
+
+static int gs_probe(struct spi_device *spi)
+{
+ int ret;
+ struct gs *gs;
+ struct v4l2_subdev *sd;
+
+ gs = devm_kzalloc(&spi->dev, sizeof(struct gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ gs->pdev = spi;
+ sd = &gs->sd;
+
+ spi->mode = SPI_MODE_0;
+ spi->irq = -1;
+ spi->max_speed_hz = 10000000;
+ spi->bits_per_word = 16;
+ ret = spi_setup(spi);
+ v4l2_spi_subdev_init(sd, spi, &gs_ops);
+
+ gs->current_timings = reg_fmt[0].format;
+ gs->enabled = 0;
+
+ /* Set H_CONFIG to SMPTE timings */
+ gs_write_register(spi, 0x0, 0x300);
+
+ return ret;
+}
+
+static int gs_remove(struct spi_device *spi)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(spi);
+ struct gs *gs = to_gs(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ kfree(gs);
+ return 0;
+}
+
+static struct spi_driver gs_driver = {
+ .driver = {
+ .name = "gs1662",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = gs_probe,
+ .remove = gs_remove,
+ .id_table = gs_id,
+};
+
+module_spi_driver(gs_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>");
+MODULE_DESCRIPTION("Gennum GS1662 HD/SD-SDI Serializer driver");
diff --git a/drivers/media/tuners/fc0011.h b/drivers/media/tuners/fc0011.h
index 81bb568d6943..438cf897acd1 100644
--- a/drivers/media/tuners/fc0011.h
+++ b/drivers/media/tuners/fc0011.h
@@ -1,7 +1,6 @@
#ifndef LINUX_FC0011_H_
#define LINUX_FC0011_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/tuners/fc0012.h b/drivers/media/tuners/fc0012.h
index 9ad32859bab0..4a23e418daf0 100644
--- a/drivers/media/tuners/fc0012.h
+++ b/drivers/media/tuners/fc0012.h
@@ -21,7 +21,6 @@
#ifndef _FC0012_H_
#define _FC0012_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "fc001x-common.h"
diff --git a/drivers/media/tuners/fc0013.h b/drivers/media/tuners/fc0013.h
index e130bd7a3230..8c34105c9383 100644
--- a/drivers/media/tuners/fc0013.h
+++ b/drivers/media/tuners/fc0013.h
@@ -22,7 +22,6 @@
#ifndef _FC0013_H_
#define _FC0013_H_
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "fc001x-common.h"
diff --git a/drivers/media/tuners/max2165.h b/drivers/media/tuners/max2165.h
index 5054f01a78fb..aadd9fea59e4 100644
--- a/drivers/media/tuners/max2165.h
+++ b/drivers/media/tuners/max2165.h
@@ -22,8 +22,6 @@
#ifndef __MAX2165_H__
#define __MAX2165_H__
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct i2c_adapter;
diff --git a/drivers/media/tuners/mc44s803.h b/drivers/media/tuners/mc44s803.h
index b3e614be657d..6b40df339284 100644
--- a/drivers/media/tuners/mc44s803.h
+++ b/drivers/media/tuners/mc44s803.h
@@ -22,8 +22,6 @@
#ifndef MC44S803_H
#define MC44S803_H
-#include <linux/kconfig.h>
-
struct dvb_frontend;
struct i2c_adapter;
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 7f0b9d5940db..dfec23743afe 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2201,7 +2201,7 @@ static int mt2063_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
return 0;
}
-static struct dvb_tuner_ops mt2063_ops = {
+static const struct dvb_tuner_ops mt2063_ops = {
.info = {
.name = "MT2063 Silicon Tuner",
.frequency_min = 45000000,
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 9e031040c13f..52da4671b0e0 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -363,7 +363,7 @@ static int mt2032_set_params(struct dvb_frontend *fe,
return ret;
}
-static struct dvb_tuner_ops mt2032_tuner_ops = {
+static const struct dvb_tuner_ops mt2032_tuner_ops = {
.set_analog_params = mt2032_set_params,
.release = microtune_release,
.get_frequency = microtune_get_frequency,
@@ -563,7 +563,7 @@ static int mt2050_set_params(struct dvb_frontend *fe,
return ret;
}
-static struct dvb_tuner_ops mt2050_tuner_ops = {
+static const struct dvb_tuner_ops mt2050_tuner_ops = {
.set_analog_params = mt2050_set_params,
.release = microtune_release,
.get_frequency = microtune_get_frequency,
diff --git a/drivers/media/tuners/mxl5005s.h b/drivers/media/tuners/mxl5005s.h
index 5764b12c5c7c..d842734f2dcd 100644
--- a/drivers/media/tuners/mxl5005s.h
+++ b/drivers/media/tuners/mxl5005s.h
@@ -23,8 +23,6 @@
#ifndef __MXL5005S_H
#define __MXL5005S_H
-#include <linux/kconfig.h>
-
#include <linux/i2c.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index f4ae04c3328a..42569c6811e6 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -794,7 +794,7 @@ static int mxl5007t_release(struct dvb_frontend *fe)
/* ------------------------------------------------------------------------- */
-static struct dvb_tuner_ops mxl5007t_tuner_ops = {
+static const struct dvb_tuner_ops mxl5007t_tuner_ops = {
.info = {
.name = "MaxLinear MxL5007T",
},
diff --git a/drivers/media/tuners/r820t.h b/drivers/media/tuners/r820t.h
index b1e5661af1c7..fdcab91405de 100644
--- a/drivers/media/tuners/r820t.h
+++ b/drivers/media/tuners/r820t.h
@@ -21,7 +21,6 @@
#ifndef R820T_H
#define R820T_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
enum r820t_chip {
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index 5f1a60bf7ced..76807f5b3cf8 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -17,7 +17,6 @@
#ifndef SI2157_H
#define SI2157_H
-#include <linux/kconfig.h>
#include <media/media-device.h>
#include "dvb_frontend.h"
diff --git a/drivers/media/tuners/tda18212.h b/drivers/media/tuners/tda18212.h
index e58c9096d79c..6391dafd0c9d 100644
--- a/drivers/media/tuners/tda18212.h
+++ b/drivers/media/tuners/tda18212.h
@@ -21,7 +21,6 @@
#ifndef TDA18212_H
#define TDA18212_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
struct tda18212_config {
diff --git a/drivers/media/tuners/tda18218.h b/drivers/media/tuners/tda18218.h
index 1eacb4f84e93..076b5f2e888d 100644
--- a/drivers/media/tuners/tda18218.h
+++ b/drivers/media/tuners/tda18218.h
@@ -21,7 +21,6 @@
#ifndef TDA18218_H
#define TDA18218_H
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
struct tda18218_config {
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index f8620741bb5f..2d50e8b1dce1 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -18,11 +18,12 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/delay.h>
-#include <linux/videodev2.h>
#include "tda18271-priv.h"
#include "tda8290.h"
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+
int tda18271_debug;
module_param_named(debug, tda18271_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debug level "
@@ -646,7 +647,7 @@ static int tda18271_calc_rf_filter_curve(struct dvb_frontend *fe)
unsigned int i;
int ret;
- tda_info("tda18271: performing RF tracking filter calibration\n");
+ tda_info("performing RF tracking filter calibration\n");
/* wait for die temperature stabilization */
msleep(200);
@@ -692,12 +693,12 @@ static int tda18271c2_rf_cal_init(struct dvb_frontend *fe)
if (tda_fail(ret))
goto fail;
- tda_info("tda18271: RF tracking filter calibration complete\n");
+ tda_info("RF tracking filter calibration complete\n");
priv->cal_initialized = true;
goto end;
fail:
- tda_info("tda18271: RF tracking filter calibration failed!\n");
+ tda_info("RF tracking filter calibration failed!\n");
end:
return ret;
}
diff --git a/drivers/media/tuners/tda18271-priv.h b/drivers/media/tuners/tda18271-priv.h
index cc80f544af34..0bcc735a0427 100644
--- a/drivers/media/tuners/tda18271-priv.h
+++ b/drivers/media/tuners/tda18271-priv.h
@@ -21,6 +21,8 @@
#ifndef __TDA18271_PRIV_H__
#define __TDA18271_PRIV_H__
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index edcb4a723aa1..5050ce9be423 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -818,7 +818,7 @@ static int tda827x_initial_sleep(struct dvb_frontend *fe)
return fe->ops.tuner_ops.sleep(fe);
}
-static struct dvb_tuner_ops tda827xo_tuner_ops = {
+static const struct dvb_tuner_ops tda827xo_tuner_ops = {
.info = {
.name = "Philips TDA827X",
.frequency_min = 55000000,
@@ -834,7 +834,7 @@ static struct dvb_tuner_ops tda827xo_tuner_ops = {
.get_bandwidth = tda827x_get_bandwidth,
};
-static struct dvb_tuner_ops tda827xa_tuner_ops = {
+static const struct dvb_tuner_ops tda827xa_tuner_ops = {
.info = {
.name = "Philips TDA827XA",
.frequency_min = 44000000,
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index bf78cb9fc52c..36b0b1e1d05b 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -301,7 +301,7 @@ static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency)
return 0;
}
-static struct dvb_tuner_ops tea5761_tuner_ops = {
+static const struct dvb_tuner_ops tea5761_tuner_ops = {
.info = {
.name = "tea5761", // Philips TEA5761HN FM Radio
},
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index 36e85d81acb2..d62a6d6b1f42 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -10,6 +10,8 @@
* from their contributions on DScaler.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -370,17 +372,18 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
{
struct tuner_i2c_props i2c = { .adap = i2c_adap, .addr = i2c_addr };
unsigned char buffer[7] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
int rc;
if ((rc = tuner_i2c_xfer_recv(&i2c, buffer, 7))< 5) {
- printk(KERN_WARNING "It is not a TEA5767. Received %i bytes.\n", rc);
+ pr_warn("It is not a TEA5767. Received %i bytes.\n", rc);
return -EINVAL;
}
/* If all bytes are the same then it's a TV tuner and not a tea5767 */
if (buffer[0] == buffer[1] && buffer[0] == buffer[2] &&
buffer[0] == buffer[3] && buffer[0] == buffer[4]) {
- printk(KERN_WARNING "All bytes are equal. It is not a TEA5767\n");
+ pr_warn("All bytes are equal. It is not a TEA5767\n");
return -EINVAL;
}
@@ -390,7 +393,7 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
* Byte 5: bit 7:0 : == 0
*/
if (((buffer[3] & 0x0f) != 0x00) || (buffer[4] != 0x00)) {
- printk(KERN_WARNING "Chip ID is not zero. It is not a TEA5767\n");
+ pr_warn("Chip ID is not zero. It is not a TEA5767\n");
return -EINVAL;
}
@@ -423,7 +426,7 @@ static int tea5767_set_config (struct dvb_frontend *fe, void *priv_cfg)
return 0;
}
-static struct dvb_tuner_ops tea5767_tuner_ops = {
+static const struct dvb_tuner_ops tea5767_tuner_ops = {
.info = {
.name = "tea5767", // Philips TEA5767HN FM Radio
},
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 8e9ce144da9a..9ba9582e7765 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -1035,7 +1035,7 @@ static int simple_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
return 0;
}
-static struct dvb_tuner_ops simple_tuner_ops = {
+static const struct dvb_tuner_ops simple_tuner_ops = {
.init = simple_init,
.sleep = simple_sleep,
.set_analog_params = simple_set_params,
diff --git a/drivers/media/tuners/xc5000.h b/drivers/media/tuners/xc5000.h
index 00ba29e21fb9..336bd49eb09b 100644
--- a/drivers/media/tuners/xc5000.h
+++ b/drivers/media/tuners/xc5000.h
@@ -22,7 +22,6 @@
#ifndef __XC5000_H__
#define __XC5000_H__
-#include <linux/kconfig.h>
#include <linux/firmware.h>
struct dvb_frontend;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 3c556ee306cd..8251942bcd12 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -605,7 +605,7 @@ static void airspy_stop_streaming(struct vb2_queue *vq)
mutex_unlock(&s->v4l2_lock);
}
-static struct vb2_ops airspy_vb2_ops = {
+static const struct vb2_ops airspy_vb2_ops = {
.queue_setup = airspy_queue_setup,
.buf_queue = airspy_buf_queue,
.start_streaming = airspy_start_streaming,
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 3d6687f0407d..1e66e7828d8f 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -344,7 +344,8 @@ int au0828_rc_register(struct au0828_dev *dev)
rc->dev.parent = &dev->usbdev->dev;
rc->driver_name = "au0828-input";
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->allowed_protocols = RC_BIT_NEC | RC_BIT_RC5;
+ rc->allowed_protocols = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 |
+ RC_BIT_RC5;
/* all done */
err = rc_register_device(rc);
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 13b8387082f2..85dd9a8e83ff 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -928,7 +928,7 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
del_timer_sync(&dev->vbi_timeout);
}
-static struct vb2_ops au0828_video_qops = {
+static const struct vb2_ops au0828_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index d4bdba60b0f7..52bc42da8a4c 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -73,23 +73,34 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI,
u8 request_type = (read ? USB_DIR_IN : USB_DIR_OUT) | USB_TYPE_VENDOR;
u8 wAddress = B2C2_FLEX_PCIOFFSET_TO_INTERNALADDR(wRegOffsPCI) |
(read ? 0x80 : 0);
+ int ret;
+
+ mutex_lock(&fc_usb->data_mutex);
+ if (!read)
+ memcpy(fc_usb->data, val, sizeof(*val));
- int len = usb_control_msg(fc_usb->udev,
+ ret = usb_control_msg(fc_usb->udev,
read ? B2C2_USB_CTRL_PIPE_IN : B2C2_USB_CTRL_PIPE_OUT,
request,
request_type, /* 0xc0 read or 0x40 write */
wAddress,
0,
- val,
+ fc_usb->data,
sizeof(u32),
B2C2_WAIT_FOR_OPERATION_RDW * HZ);
- if (len != sizeof(u32)) {
+ if (ret != sizeof(u32)) {
err("error while %s dword from %d (%d).", read ? "reading" :
"writing", wAddress, wRegOffsPCI);
- return -EIO;
+ if (ret >= 0)
+ ret = -EIO;
}
- return 0;
+
+ if (read && ret >= 0)
+ memcpy(val, fc_usb->data, sizeof(*val));
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return ret;
}
/*
* DKT 010817 - add support for V8 memory read/write and flash update
@@ -100,9 +111,14 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
{
u8 request_type = USB_TYPE_VENDOR;
u16 wIndex;
- int nWaitTime, pipe, len;
+ int nWaitTime, pipe, ret;
wIndex = page << 8;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (req) {
case B2C2_USB_READ_V8_MEM:
nWaitTime = B2C2_WAIT_FOR_OPERATION_V8READ;
@@ -127,17 +143,32 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb,
deb_v8("v8mem: %02x %02x %04x %04x, len: %d\n", request_type, req,
wAddress, wIndex, buflen);
- len = usb_control_msg(fc_usb->udev, pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, pbBuffer, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wAddress,
wIndex,
- pbBuffer,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(pbBuffer, fc_usb->data, buflen);
+ }
- debug_dump(pbBuffer, len, deb_v8);
- return len == buflen ? 0 : -EIO;
+ mutex_unlock(&fc_usb->data_mutex);
+
+ debug_dump(pbBuffer, ret, deb_v8);
+ return ret;
}
#define bytes_left_to_read_on_page(paddr,buflen) \
@@ -196,29 +227,6 @@ static int flexcop_usb_get_mac_addr(struct flexcop_device *fc, int extended)
fc->dvb_adapter.proposed_mac, 6);
}
-#if 0
-static int flexcop_usb_utility_req(struct flexcop_usb *fc_usb, int set,
- flexcop_usb_utility_function_t func, u8 extra, u16 wIndex,
- u16 buflen, u8 *pvBuffer)
-{
- u16 wValue;
- u8 request_type = (set ? USB_DIR_OUT : USB_DIR_IN) | USB_TYPE_VENDOR;
- int nWaitTime = 2,
- pipe = set ? B2C2_USB_CTRL_PIPE_OUT : B2C2_USB_CTRL_PIPE_IN, len;
- wValue = (func << 8) | extra;
-
- len = usb_control_msg(fc_usb->udev,pipe,
- B2C2_USB_UTILITY,
- request_type,
- wValue,
- wIndex,
- pvBuffer,
- buflen,
- nWaitTime * HZ);
- return len == buflen ? 0 : -EIO;
-}
-#endif
-
/* usb i2c stuff */
static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
flexcop_usb_request_t req, flexcop_usb_i2c_function_t func,
@@ -226,9 +234,14 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
{
struct flexcop_usb *fc_usb = i2c->fc->bus_specific;
u16 wValue, wIndex;
- int nWaitTime,pipe,len;
+ int nWaitTime, pipe, ret;
u8 request_type = USB_TYPE_VENDOR;
+ if (buflen > sizeof(fc_usb->data)) {
+ err("Buffer size bigger than max URB control message\n");
+ return -EIO;
+ }
+
switch (func) {
case USB_FUNC_I2C_WRITE:
case USB_FUNC_I2C_MULTIWRITE:
@@ -257,15 +270,32 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
wValue & 0xff, wValue >> 8,
wIndex & 0xff, wIndex >> 8);
- len = usb_control_msg(fc_usb->udev,pipe,
+ mutex_lock(&fc_usb->data_mutex);
+
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT)
+ memcpy(fc_usb->data, buf, buflen);
+
+ ret = usb_control_msg(fc_usb->udev, pipe,
req,
request_type,
wValue,
wIndex,
- buf,
+ fc_usb->data,
buflen,
nWaitTime * HZ);
- return len == buflen ? 0 : -EREMOTEIO;
+
+ if (ret != buflen)
+ ret = -EIO;
+
+ if (ret >= 0) {
+ ret = 0;
+ if ((request_type & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ memcpy(buf, fc_usb->data, buflen);
+ }
+
+ mutex_unlock(&fc_usb->data_mutex);
+
+ return 0;
}
/* actual bus specific access functions,
@@ -516,6 +546,7 @@ static int flexcop_usb_probe(struct usb_interface *intf,
/* general flexcop init */
fc_usb = fc->bus_specific;
fc_usb->fc_dev = fc;
+ mutex_init(&fc_usb->data_mutex);
fc->read_ibi_reg = flexcop_usb_read_ibi_reg;
fc->write_ibi_reg = flexcop_usb_write_ibi_reg;
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 92529a9c4475..25ad43166e78 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -29,6 +29,10 @@ struct flexcop_usb {
u8 tmp_buffer[1023+190];
int tmp_buffer_length;
+
+ /* for URB control messages */
+ u8 data[80];
+ struct mutex data_mutex;
};
#if 0
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index 13620cdf0599..e9100a235831 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -545,18 +545,30 @@ static void free_sbufs(struct camera_data *cam)
static int write_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, registers, size);
+
+ ret = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ kfree(buf);
+ return ret;
}
/****************************************************************************
@@ -567,18 +579,32 @@ static int write_packet(struct usb_device *udev,
static int read_packet(struct usb_device *udev,
u8 request, u8 * registers, u16 start, size_t size)
{
+ unsigned char *buf;
+ int ret;
+
if (!registers || size <= 0)
return -EINVAL;
- return usb_control_msg(udev,
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
request,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
start, /* value */
0, /* index */
- registers, /* buffer */
+ buf, /* buffer */
size,
HZ);
+
+ if (ret >= 0)
+ memcpy(registers, buf, size);
+
+ kfree(buf);
+
+ return ret;
}
/******************************************************************************
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 4cd5fa91612f..8263c4b0610b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -635,7 +635,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
-static struct snd_pcm_ops snd_cx231xx_pcm_capture = {
+static const struct snd_pcm_ops snd_cx231xx_pcm_capture = {
.open = snd_cx231xx_capture_open,
.close = snd_cx231xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 491913778bcc..2f52d66b4dae 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
dev->board.agc_analog_digital_select_gpio,
analog_or_digital);
- return status;
+ if (status < 0)
+ return status;
+
+ return 0;
}
int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index c63248a18823..36bc25494319 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = {
.output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
+ .agc_analog_digital_select_gpio = 0x1c,
.tuner_sif_gpio = -1,
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
@@ -1186,12 +1186,12 @@ static void cx231xx_unregister_media_device(struct cx231xx *dev)
*/
void cx231xx_release_resources(struct cx231xx *dev)
{
+ cx231xx_ir_exit(dev);
+
cx231xx_release_analog_resources(dev);
cx231xx_remove_from_devlist(dev);
- cx231xx_ir_exit(dev);
-
/* Release I2C buses */
cx231xx_dev_uninit(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8ec05cb306d8..8b099fe1d592 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
@@ -738,14 +739,21 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
break;
}
}
- return errCode ? -EINVAL : 0;
+ if (errCode < 0) {
+ dev_err(dev->dev, "Failed to set devmode to %s: error: %i",
+ dev->mode == CX231XX_DIGITAL_MODE ? "digital" : "analog",
+ errCode);
+ return errCode;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(cx231xx_set_mode);
@@ -799,7 +807,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
case -ESHUTDOWN:
return;
default: /* error */
- cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+ cx231xx_isocdbg("urb completion error %d.\n", urb->status);
break;
}
@@ -842,8 +850,11 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
return;
+ case -EPIPE: /* stall */
+ cx231xx_isocdbg("urb completion error - device is stalled.\n");
+ return;
default: /* error */
- cx231xx_isocdbg("urb completition error %d.\n", urb->status);
+ cx231xx_isocdbg("urb completion error %d.\n", urb->status);
break;
}
@@ -867,6 +878,7 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
struct urb *urb;
int i;
+ bool broken_pipe = false;
cx231xx_isocdbg("cx231xx: called cx231xx_uninit_isoc\n");
@@ -886,12 +898,19 @@ void cx231xx_uninit_isoc(struct cx231xx *dev)
transfer_buffer[i],
urb->transfer_dma);
}
+ if (urb->status == -EPIPE) {
+ broken_pipe = true;
+ }
usb_free_urb(urb);
dev->video_mode.isoc_ctl.urb[i] = NULL;
}
dev->video_mode.isoc_ctl.transfer_buffer[i] = NULL;
}
+ if (broken_pipe) {
+ cx231xx_isocdbg("Reset endpoint to recover broken pipe.");
+ usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr);
+ }
kfree(dev->video_mode.isoc_ctl.urb);
kfree(dev->video_mode.isoc_ctl.transfer_buffer);
kfree(dma_q->p_left_data);
@@ -918,6 +937,7 @@ void cx231xx_uninit_bulk(struct cx231xx *dev)
struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq;
struct urb *urb;
int i;
+ bool broken_pipe = false;
cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n");
@@ -937,12 +957,19 @@ void cx231xx_uninit_bulk(struct cx231xx *dev)
transfer_buffer[i],
urb->transfer_dma);
}
+ if (urb->status == -EPIPE) {
+ broken_pipe = true;
+ }
usb_free_urb(urb);
dev->video_mode.bulk_ctl.urb[i] = NULL;
}
dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL;
}
+ if (broken_pipe) {
+ cx231xx_isocdbg("Reset endpoint to recover broken pipe.");
+ usb_reset_endpoint(dev->udev, dev->video_mode.end_point_addr);
+ }
kfree(dev->video_mode.bulk_ctl.urb);
kfree(dev->video_mode.bulk_ctl.transfer_buffer);
kfree(dma_q->p_left_data);
@@ -1297,15 +1324,29 @@ int cx231xx_dev_init(struct cx231xx *dev)
dev->i2c_bus[2].i2c_reserve = 0;
/* register I2C buses */
- cx231xx_i2c_register(&dev->i2c_bus[0]);
- cx231xx_i2c_register(&dev->i2c_bus[1]);
- cx231xx_i2c_register(&dev->i2c_bus[2]);
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[0]);
+ if (errCode < 0)
+ return errCode;
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[1]);
+ if (errCode < 0)
+ return errCode;
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[2]);
+ if (errCode < 0)
+ return errCode;
errCode = cx231xx_i2c_mux_create(dev);
+ if (errCode < 0) {
+ dev_err(dev->dev,
+ "%s: Failed to create I2C mux\n", __func__);
+ return errCode;
+ }
+ errCode = cx231xx_i2c_mux_register(dev, 0);
+ if (errCode < 0)
+ return errCode;
+
+ errCode = cx231xx_i2c_mux_register(dev, 1);
if (errCode < 0)
return errCode;
- cx231xx_i2c_mux_register(dev, 0);
- cx231xx_i2c_mux_register(dev, 1);
/* scan the real bus segments in the order of physical port numbers */
cx231xx_do_i2c_scan(dev, I2C_0);
@@ -1448,14 +1489,14 @@ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val,
/* set request */
if (!request) {
if (direction)
- ven_req.bRequest = VRT_GET_GPIO; /* 0x8 gpio */
+ ven_req.bRequest = VRT_GET_GPIO; /* 0x9 gpio */
else
- ven_req.bRequest = VRT_SET_GPIO; /* 0x9 gpio */
+ ven_req.bRequest = VRT_SET_GPIO; /* 0x8 gpio */
} else {
if (direction)
- ven_req.bRequest = VRT_GET_GPIE; /* 0xa gpie */
+ ven_req.bRequest = VRT_GET_GPIE; /* 0xb gpie */
else
- ven_req.bRequest = VRT_SET_GPIE; /* 0xb gpie */
+ ven_req.bRequest = VRT_SET_GPIE; /* 0xa gpie */
}
/* set index value */
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index ab2fb9fa0cd1..1417515d30eb 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -65,6 +65,7 @@ struct cx231xx_dvb {
struct dmx_frontend fe_hw;
struct dmx_frontend fe_mem;
struct dvb_net net;
+ struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
};
@@ -150,18 +151,6 @@ static struct tda18271_config pv_tda18271_config = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
-static const struct si2165_config hauppauge_930C_HD_1113xx_si2165_config = {
- .i2c_addr = 0x64,
- .chip_mode = SI2165_MODE_PLL_XTAL,
- .ref_freq_Hz = 16000000,
-};
-
-static const struct si2165_config pctv_quatro_stick_1114xx_si2165_config = {
- .i2c_addr = 0x64,
- .chip_mode = SI2165_MODE_PLL_EXT,
- .ref_freq_Hz = 24000000,
-};
-
static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
.i2c_addr = 0x59,
.qam_if_khz = 4000,
@@ -586,8 +575,14 @@ static void unregister_dvb(struct cx231xx_dvb *dvb)
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
dvb_dmxdev_release(&dvb->dmxdev);
dvb_dmx_release(&dvb->demux);
- client = dvb->i2c_client_tuner;
/* remove I2C tuner */
+ client = dvb->i2c_client_tuner;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+ /* remove I2C demod */
+ client = dvb->i2c_client_demod;
if (client) {
module_put(client->dev.driver->owner);
i2c_unregister_device(client);
@@ -749,19 +744,38 @@ static int dvb_init(struct cx231xx *dev)
break;
case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx:
+ {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct si2165_platform_data si2165_pdata;
- dev->dvb->frontend = dvb_attach(si2165_attach,
- &hauppauge_930C_HD_1113xx_si2165_config,
- demod_i2c
- );
+ /* attach demod */
+ memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+ si2165_pdata.fe = &dev->dvb->frontend;
+ si2165_pdata.chip_mode = SI2165_MODE_PLL_XTAL,
+ si2165_pdata.ref_freq_Hz = 16000000,
- if (dev->dvb->frontend == NULL) {
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2165_pdata;
+ request_module(info.type);
+ client = i2c_new_device(demod_i2c, &info);
+ if (client == NULL || client->dev.driver == NULL || dev->dvb->frontend == NULL) {
dev_err(dev->dev,
"Failed to attach SI2165 front end\n");
result = -EINVAL;
goto out_free;
}
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
/* define general-purpose callback pointer */
@@ -774,27 +788,43 @@ static int dvb_init(struct cx231xx *dev)
dev->cx231xx_reset_analog_tuner = NULL;
break;
-
+ }
case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx:
{
struct i2c_client *client;
struct i2c_board_info info;
+ struct si2165_platform_data si2165_pdata;
struct si2157_config si2157_config;
- memset(&info, 0, sizeof(struct i2c_board_info));
+ /* attach demod */
+ memset(&si2165_pdata, 0, sizeof(si2165_pdata));
+ si2165_pdata.fe = &dev->dvb->frontend;
+ si2165_pdata.chip_mode = SI2165_MODE_PLL_EXT,
+ si2165_pdata.ref_freq_Hz = 24000000,
- dev->dvb->frontend = dvb_attach(si2165_attach,
- &pctv_quatro_stick_1114xx_si2165_config,
- demod_i2c
- );
-
- if (dev->dvb->frontend == NULL) {
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2165", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2165_pdata;
+ request_module(info.type);
+ client = i2c_new_device(demod_i2c, &info);
+ if (client == NULL || client->dev.driver == NULL || dev->dvb->frontend == NULL) {
dev_err(dev->dev,
"Failed to attach SI2165 front end\n");
result = -EINVAL;
goto out_free;
}
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+
dev->dvb->frontend->ops.i2c_gate_ctrl = NULL;
/* define general-purpose callback pointer */
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index 473cd3433fe5..35e9acfe63d3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -454,7 +454,7 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
}
-static struct i2c_algorithm cx231xx_algo = {
+static const struct i2c_algorithm cx231xx_algo = {
.master_xfer = cx231xx_i2c_xfer,
.functionality = functionality,
};
@@ -608,7 +608,7 @@ struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
case I2C_1_MUX_3:
return dev->muxc->adapter[1];
default:
- return NULL;
+ BUG();
}
}
EXPORT_SYMBOL_GPL(cx231xx_get_i2c_adap);
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 09e0f58f6bb7..941ceff9b268 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1222,6 +1222,7 @@ static int af9015_rc_query(struct dvb_usb_device *d)
/* Only process key if canary killed */
if (buf[16] != 0xff && buf[0] != 0x01) {
+ enum rc_type proto;
dev_dbg(&d->udev->dev, "%s: key pressed %*ph\n",
__func__, 4, buf + 12);
@@ -1237,11 +1238,13 @@ static int af9015_rc_query(struct dvb_usb_device *d)
/* NEC */
state->rc_keycode = RC_SCANCODE_NEC(buf[12],
buf[14]);
+ proto = RC_TYPE_NEC;
} else {
/* NEC extended*/
state->rc_keycode = RC_SCANCODE_NECX(buf[12] << 8 |
buf[13],
buf[14]);
+ proto = RC_TYPE_NECX;
}
} else {
/* 32 bit NEC */
@@ -1249,8 +1252,9 @@ static int af9015_rc_query(struct dvb_usb_device *d)
buf[13] << 16 |
buf[14] << 8 |
buf[15]);
+ proto = RC_TYPE_NEC32;
}
- rc_keydown(d->rc_dev, RC_TYPE_NEC, state->rc_keycode, 0);
+ rc_keydown(d->rc_dev, proto, state->rc_keycode, 0);
} else {
dev_dbg(&d->udev->dev, "%s: no key press\n", __func__);
/* Invalidate last keypress */
@@ -1317,7 +1321,7 @@ static int af9015_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (!rc->map_name)
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_BIT_NEC;
+ rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
rc->query = af9015_rc_query;
rc->interval = 500;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index ca018cd3fcd4..8961dd732522 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1828,6 +1828,7 @@ static int af9035_rc_query(struct dvb_usb_device *d)
{
struct usb_interface *intf = d->intf;
int ret;
+ enum rc_type proto;
u32 key;
u8 buf[4];
struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf };
@@ -1842,19 +1843,22 @@ static int af9035_rc_query(struct dvb_usb_device *d)
if ((buf[0] + buf[1]) == 0xff) {
/* NEC standard 16bit */
key = RC_SCANCODE_NEC(buf[0], buf[2]);
+ proto = RC_TYPE_NEC;
} else {
/* NEC extended 24bit */
key = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]);
+ proto = RC_TYPE_NECX;
}
} else {
/* NEC full code 32bit */
key = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3]);
+ proto = RC_TYPE_NEC32;
}
dev_dbg(&intf->dev, "%*ph\n", 4, buf);
- rc_keydown(d->rc_dev, RC_TYPE_NEC, key, 0);
+ rc_keydown(d->rc_dev, proto, key, 0);
return 0;
@@ -1889,7 +1893,8 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
switch (tmp) {
case 0: /* NEC */
default:
- rc->allowed_protos = RC_BIT_NEC;
+ rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX |
+ RC_BIT_NEC32;
break;
case 1: /* RC6 */
rc->allowed_protos = RC_BIT_RC6_MCE;
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 935dbaa80ef0..50c07fe7dacb 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -208,6 +208,7 @@ static int az6007_rc_query(struct dvb_usb_device *d)
{
struct az6007_device_state *st = d_to_priv(d);
unsigned code;
+ enum rc_type proto;
az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10);
@@ -215,19 +216,23 @@ static int az6007_rc_query(struct dvb_usb_device *d)
return 0;
if ((st->data[3] ^ st->data[4]) == 0xff) {
- if ((st->data[1] ^ st->data[2]) == 0xff)
+ if ((st->data[1] ^ st->data[2]) == 0xff) {
code = RC_SCANCODE_NEC(st->data[1], st->data[3]);
- else
+ proto = RC_TYPE_NEC;
+ } else {
code = RC_SCANCODE_NECX(st->data[1] << 8 | st->data[2],
st->data[3]);
+ proto = RC_TYPE_NECX;
+ }
} else {
code = RC_SCANCODE_NEC32(st->data[1] << 24 |
st->data[2] << 16 |
st->data[3] << 8 |
st->data[4]);
+ proto = RC_TYPE_NEC32;
}
- rc_keydown(d->rc_dev, RC_TYPE_NEC, code, st->data[5]);
+ rc_keydown(d->rc_dev, proto, code, st->data[5]);
return 0;
}
@@ -236,7 +241,7 @@ static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
pr_debug("Getting az6007 Remote Control properties\n");
- rc->allowed_protos = RC_BIT_NEC;
+ rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
rc->query = az6007_rc_query;
rc->interval = 400;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 3fbb2cd19f5e..a8e6624fbe83 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -82,8 +82,6 @@ static int dvb_usbv2_i2c_init(struct dvb_usb_device *d)
ret = i2c_add_adapter(&d->i2c_adap);
if (ret < 0) {
d->i2c_adap.algo = NULL;
- dev_err(&d->udev->dev, "%s: i2c_add_adapter() failed=%d\n",
- KBUILD_MODNAME, ret);
goto err;
}
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 3721ee63b8fb..0e8fb89896c4 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -357,7 +357,8 @@ static void lme2510_int_response(struct urb *lme_urb)
ibuf[5]);
deb_info(1, "INT Key = 0x%08x", key);
- rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC, key, 0);
+ rc_keydown(adap_to_d(adap)->rc_dev, RC_TYPE_NEC32, key,
+ 0);
break;
case 0xbb:
switch (st->tuner_config) {
@@ -1242,7 +1243,7 @@ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
static int lme2510_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
- rc->allowed_protos = RC_BIT_NEC;
+ rc->allowed_protos = RC_BIT_NEC32;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 7065aca81252..e6eae9d88e9f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -21,7 +21,6 @@
#ifndef __MXL111SF_DEMOD_H__
#define __MXL111SF_DEMOD_H__
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "mxl111sf.h"
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 7d16252dbb71..f141dcc55cc9 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -466,7 +466,7 @@ static int mxl111sf_tuner_release(struct dvb_frontend *fe)
/* ------------------------------------------------------------------------- */
-static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
+static const struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
.info = {
.name = "MaxLinear MxL111SF",
#if 0
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 509b55071218..e96d9a444ed1 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -21,7 +21,6 @@
#ifndef __MXL111SF_TUNER_H__
#define __MXL111SF_TUNER_H__
-#include <linux/kconfig.h>
#include "dvb_frontend.h"
#include "mxl111sf.h"
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 6643762a9ff7..c583c638e468 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1631,22 +1631,27 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
goto err;
if (buf[4] & 0x01) {
+ enum rc_type proto;
+
if (buf[2] == (u8) ~buf[3]) {
if (buf[0] == (u8) ~buf[1]) {
/* NEC standard (16 bit) */
rc_code = RC_SCANCODE_NEC(buf[0], buf[2]);
+ proto = RC_TYPE_NEC;
} else {
/* NEC extended (24 bit) */
rc_code = RC_SCANCODE_NECX(buf[0] << 8 | buf[1],
buf[2]);
+ proto = RC_TYPE_NECX;
}
} else {
/* NEC full (32 bit) */
rc_code = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3]);
+ proto = RC_TYPE_NEC32;
}
- rc_keydown(d->rc_dev, RC_TYPE_NEC, rc_code, 0);
+ rc_keydown(d->rc_dev, proto, rc_code, 0);
ret = rtl28xxu_wr_reg(d, SYS_IRRC_SR, 1);
if (ret)
@@ -1668,7 +1673,7 @@ static int rtl2831u_get_rc_config(struct dvb_usb_device *d,
struct dvb_usb_rc *rc)
{
rc->map_name = RC_MAP_EMPTY;
- rc->allowed_protos = RC_BIT_NEC;
+ rc->allowed_protos = RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32;
rc->query = rtl2831u_rc_query;
rc->interval = 400;
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index f03b0b70c901..959fa09dfd92 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -20,10 +20,20 @@ config DVB_USB_DEBUG
Say Y if you want to enable debugging. See modinfo dvb-usb (and the
appropriate drivers) for debug levels.
+config DVB_USB_DIB3000MC
+ tristate
+ depends on DVB_USB
+ select DVB_DIB3000MC
+ help
+ This is a module with helper functions for accessing the
+ DIB3000MC from USB DVB devices. It must be a separate module
+ in case DVB_USB is built-in and DVB_DIB3000MC is a module,
+ and gets selected automatically when needed.
+
config DVB_USB_A800
tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)"
depends on DVB_USB
- select DVB_DIB3000MC
+ select DVB_USB_DIB3000MC
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
@@ -34,6 +44,7 @@ config DVB_USB_DIBUSB_MB
depends on DVB_USB
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select DVB_DIB3000MB
+ depends on DVB_DIB3000MC || !DVB_DIB3000MC
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
@@ -54,7 +65,7 @@ config DVB_USB_DIBUSB_MB_FAULTY
config DVB_USB_DIBUSB_MC
tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
depends on DVB_USB
- select DVB_DIB3000MC
+ select DVB_USB_DIB3000MC
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
Support for USB2.0 DVB-T receivers based on reference designs made by
@@ -72,7 +83,7 @@ config DVB_USB_DIB0700
select DVB_DIB7000P if MEDIA_SUBDRV_AUTOSELECT
select DVB_DIB7000M if MEDIA_SUBDRV_AUTOSELECT
select DVB_DIB8000 if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_USB_DIB3000MC if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TUNER_DIB0070 if MEDIA_SUBDRV_AUTOSELECT
@@ -99,7 +110,7 @@ config DVB_USB_UMT_010
tristate "HanfTek UMT-010 DVB-T USB2.0 support"
depends on DVB_USB
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
- select DVB_DIB3000MC
+ select DVB_USB_DIB3000MC
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT
help
@@ -192,7 +203,7 @@ config DVB_USB_GP8PSK
config DVB_USB_NOVA_T_USB2
tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
depends on DVB_USB
- select DVB_DIB3000MC
+ select DVB_USB_DIB3000MC
select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index acdd1efd4e74..3b3f32b426d1 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_DVB_USB_VP7045) += dvb-usb-vp7045.o
dvb-usb-vp702x-objs := vp702x.o vp702x-fe.o
obj-$(CONFIG_DVB_USB_VP702X) += dvb-usb-vp702x.o
-dvb-usb-gp8psk-objs := gp8psk.o gp8psk-fe.o
+dvb-usb-gp8psk-objs := gp8psk.o
obj-$(CONFIG_DVB_USB_GP8PSK) += dvb-usb-gp8psk.o
dvb-usb-dtt200u-objs := dtt200u.o dtt200u-fe.o
@@ -16,20 +16,23 @@ obj-$(CONFIG_DVB_USB_DTT200U) += dvb-usb-dtt200u.o
dvb-usb-dibusb-common-objs := dibusb-common.o
+dvb-usb-dibusb-mc-common-objs := dibusb-mc-common.o
+obj-$(CONFIG_DVB_USB_DIB3000MC) += dvb-usb-dibusb-common.o dvb-usb-dibusb-mc-common.o
+
dvb-usb-a800-objs := a800.o
-obj-$(CONFIG_DVB_USB_A800) += dvb-usb-dibusb-common.o dvb-usb-a800.o
+obj-$(CONFIG_DVB_USB_A800) += dvb-usb-a800.o
dvb-usb-dibusb-mb-objs := dibusb-mb.o
obj-$(CONFIG_DVB_USB_DIBUSB_MB) += dvb-usb-dibusb-common.o dvb-usb-dibusb-mb.o
dvb-usb-dibusb-mc-objs := dibusb-mc.o
-obj-$(CONFIG_DVB_USB_DIBUSB_MC) += dvb-usb-dibusb-common.o dvb-usb-dibusb-mc.o
+obj-$(CONFIG_DVB_USB_DIBUSB_MC) += dvb-usb-dibusb-mc.o
dvb-usb-nova-t-usb2-objs := nova-t-usb2.o
-obj-$(CONFIG_DVB_USB_NOVA_T_USB2) += dvb-usb-dibusb-common.o dvb-usb-nova-t-usb2.o
+obj-$(CONFIG_DVB_USB_NOVA_T_USB2) += dvb-usb-nova-t-usb2.o
dvb-usb-umt-010-objs := umt-010.o
-obj-$(CONFIG_DVB_USB_UMT_010) += dvb-usb-dibusb-common.o dvb-usb-umt-010.o
+obj-$(CONFIG_DVB_USB_UMT_010) += dvb-usb-umt-010.o
dvb-usb-m920x-objs := m920x.o
obj-$(CONFIG_DVB_USB_M920X) += dvb-usb-m920x.o
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index efa782ed6e2d..7853261906b1 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -52,17 +52,15 @@ u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
struct af9005_device_state {
u8 sequence;
int led_state;
+ unsigned char data[256];
};
static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
int readwrite, int type, u8 * values, int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16] = { 0 };
- u8 ibuf[17] = { 0 };
- u8 command;
- int i;
- int ret;
+ u8 command, seq;
+ int i, ret;
if (len < 1) {
err("generic read/write, less than 1 byte. Makes no sense.");
@@ -73,16 +71,17 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
return -EINVAL;
}
- obuf[0] = 14; /* rest of buffer length low */
- obuf[1] = 0; /* rest of buffer length high */
+ mutex_lock(&d->data_mutex);
+ st->data[0] = 14; /* rest of buffer length low */
+ st->data[1] = 0; /* rest of buffer length high */
- obuf[2] = AF9005_REGISTER_RW; /* register operation */
- obuf[3] = 12; /* rest of buffer length */
+ st->data[2] = AF9005_REGISTER_RW; /* register operation */
+ st->data[3] = 12; /* rest of buffer length */
- obuf[4] = st->sequence++; /* sequence number */
+ st->data[4] = seq = st->sequence++; /* sequence number */
- obuf[5] = (u8) (reg >> 8); /* register address */
- obuf[6] = (u8) (reg & 0xff);
+ st->data[5] = (u8) (reg >> 8); /* register address */
+ st->data[6] = (u8) (reg & 0xff);
if (type == AF9005_OFDM_REG) {
command = AF9005_CMD_OFDM_REG;
@@ -96,51 +95,52 @@ static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
command |= readwrite;
if (readwrite == AF9005_CMD_WRITE)
for (i = 0; i < len; i++)
- obuf[8 + i] = values[i];
+ st->data[8 + i] = values[i];
else if (type == AF9005_TUNER_REG)
/* read command for tuner, the first byte contains the i2c address */
- obuf[8] = values[0];
- obuf[7] = command;
+ st->data[8] = values[0];
+ st->data[7] = command;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 17, 0);
if (ret)
- return ret;
+ goto ret;
/* sanity check */
- if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+ if (st->data[2] != AF9005_REGISTER_RW_ACK) {
err("generic read/write, wrong reply code.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[3] != 0x0d) {
+ if (st->data[3] != 0x0d) {
err("generic read/write, wrong length in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- if (ibuf[4] != obuf[4]) {
+ if (st->data[4] != seq) {
err("generic read/write, wrong sequence in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
/*
- Windows driver doesn't check these fields, in fact sometimes
- the register in the reply is different that what has been sent
-
- if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
- err("generic read/write, wrong register in reply.");
- return -EIO;
- }
- if (ibuf[7] != command) {
- err("generic read/write wrong command in reply.");
- return -EIO;
- }
+ * In thesis, both input and output buffers should have
+ * identical values for st->data[5] to st->data[8].
+ * However, windows driver doesn't check these fields, in fact
+ * sometimes the register in the reply is different that what
+ * has been sent
*/
- if (ibuf[16] != 0x01) {
+ if (st->data[16] != 0x01) {
err("generic read/write wrong status code in reply.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
+
if (readwrite == AF9005_CMD_READ)
for (i = 0; i < len; i++)
- values[i] = ibuf[8 + i];
+ values[i] = st->data[8 + i];
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
@@ -464,8 +464,7 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
struct af9005_device_state *st = d->priv;
int ret, i, packet_len;
- u8 buf[64];
- u8 ibuf[64];
+ u8 seq;
if (wlen < 0) {
err("send command, wlen less than 0 bytes. Makes no sense.");
@@ -480,94 +479,97 @@ int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
return -EINVAL;
}
packet_len = wlen + 5;
- buf[0] = (u8) (packet_len & 0xff);
- buf[1] = (u8) ((packet_len & 0xff00) >> 8);
-
- buf[2] = 0x26; /* packet type */
- buf[3] = wlen + 3;
- buf[4] = st->sequence++;
- buf[5] = command;
- buf[6] = wlen;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = (u8) (packet_len & 0xff);
+ st->data[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+ st->data[2] = 0x26; /* packet type */
+ st->data[3] = wlen + 3;
+ st->data[4] = seq = st->sequence++;
+ st->data[5] = command;
+ st->data[6] = wlen;
for (i = 0; i < wlen; i++)
- buf[7 + i] = wbuf[i];
- ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x27) {
+ st->data[7 + i] = wbuf[i];
+ ret = dvb_usb_generic_rw(d, st->data, wlen + 7, st->data, rlen + 7, 0);
+ if (st->data[2] != 0x27) {
err("send command, wrong reply code.");
- return -EIO;
- }
- if (ibuf[4] != buf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("send command, wrong sequence in reply.");
- return -EIO;
- }
- if (ibuf[5] != 0x01) {
+ ret = -EIO;
+ } else if (st->data[5] != 0x01) {
err("send command, wrong status code in reply.");
- return -EIO;
- }
- if (ibuf[6] != rlen) {
+ ret = -EIO;
+ } else if (st->data[6] != rlen) {
err("send command, invalid data length in reply.");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < rlen; i++)
- rbuf[i] = ibuf[i + 7];
- return 0;
+ if (!ret) {
+ for (i = 0; i < rlen; i++)
+ rbuf[i] = st->data[i + 7];
+ }
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
int len)
{
struct af9005_device_state *st = d->priv;
- u8 obuf[16], ibuf[14];
+ u8 seq;
int ret, i;
- memset(obuf, 0, sizeof(obuf));
- memset(ibuf, 0, sizeof(ibuf));
+ mutex_lock(&d->data_mutex);
- obuf[0] = 14; /* length of rest of packet low */
- obuf[1] = 0; /* length of rest of packer high */
+ memset(st->data, 0, sizeof(st->data));
- obuf[2] = 0x2a; /* read/write eeprom */
+ st->data[0] = 14; /* length of rest of packet low */
+ st->data[1] = 0; /* length of rest of packer high */
- obuf[3] = 12; /* size */
+ st->data[2] = 0x2a; /* read/write eeprom */
- obuf[4] = st->sequence++;
+ st->data[3] = 12; /* size */
- obuf[5] = 0; /* read */
+ st->data[4] = seq = st->sequence++;
- obuf[6] = len;
- obuf[7] = address;
- ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
- if (ret)
- return ret;
- if (ibuf[2] != 0x2b) {
+ st->data[5] = 0; /* read */
+
+ st->data[6] = len;
+ st->data[7] = address;
+ ret = dvb_usb_generic_rw(d, st->data, 16, st->data, 14, 0);
+ if (st->data[2] != 0x2b) {
err("Read eeprom, invalid reply code");
- return -EIO;
- }
- if (ibuf[3] != 10) {
+ ret = -EIO;
+ } else if (st->data[3] != 10) {
err("Read eeprom, invalid reply length");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ } else if (st->data[4] != seq) {
err("Read eeprom, wrong sequence in reply ");
- return -EIO;
- }
- if (ibuf[5] != 1) {
+ ret = -EIO;
+ } else if (st->data[5] != 1) {
err("Read eeprom, wrong status in reply ");
- return -EIO;
+ ret = -EIO;
}
- for (i = 0; i < len; i++) {
- values[i] = ibuf[6 + i];
+
+ if (!ret) {
+ for (i = 0; i < len; i++)
+ values[i] = st->data[6 + i];
}
- return 0;
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
-static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
+ u8 *buf, int size)
{
- u8 buf[FW_BULKOUT_SIZE + 2];
u16 checksum;
int act_len, i, ret;
- memset(buf, 0, sizeof(buf));
+
+ memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
switch (type) {
@@ -720,15 +722,21 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
{
int i, packets, ret, act_len;
- u8 buf[FW_BULKOUT_SIZE + 2];
+ u8 *buf;
u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x01) {
err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
packets = fw->size / FW_BULKOUT_SIZE;
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -743,28 +751,35 @@ static int af9005_download_firmware(struct usb_device *udev, const struct firmwa
buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
if (ret) {
err("firmware download failed at packet %d with code %d", i, ret);
- return ret;
+ goto err;
}
}
- ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+ ret = af9005_boot_packet(udev, FW_CONFIRM, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != (u8) (packets & 0xff)) {
err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+ ret = af9005_boot_packet(udev, FW_BOOT, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ goto err;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply, buf,
+ FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
if (reply != 0x02) {
err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
- return -EIO;
+ ret = -EIO;
+ goto err;
}
- return 0;
+err:
+ kfree(buf);
+ return ret;
}
@@ -823,53 +838,59 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
{
struct af9005_device_state *st = d->priv;
int ret, len;
-
- u8 obuf[5];
- u8 ibuf[256];
+ u8 seq;
*state = REMOTE_NO_KEY_PRESSED;
if (rc_decode == NULL) {
/* it shouldn't never come here */
return 0;
}
+
+ mutex_lock(&d->data_mutex);
+
/* deb_info("rc_query\n"); */
- obuf[0] = 3; /* rest of packet length low */
- obuf[1] = 0; /* rest of packet lentgh high */
- obuf[2] = 0x40; /* read remote */
- obuf[3] = 1; /* rest of packet length */
- obuf[4] = st->sequence++; /* sequence number */
- ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+ st->data[0] = 3; /* rest of packet length low */
+ st->data[1] = 0; /* rest of packet lentgh high */
+ st->data[2] = 0x40; /* read remote */
+ st->data[3] = 1; /* rest of packet length */
+ st->data[4] = seq = st->sequence++; /* sequence number */
+ ret = dvb_usb_generic_rw(d, st->data, 5, st->data, 256, 0);
if (ret) {
err("rc query failed");
- return ret;
+ goto ret;
}
- if (ibuf[2] != 0x41) {
+ if (st->data[2] != 0x41) {
err("rc query bad header.");
- return -EIO;
- }
- if (ibuf[4] != obuf[4]) {
+ ret = -EIO;
+ goto ret;
+ } else if (st->data[4] != seq) {
err("rc query bad sequence.");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
- len = ibuf[5];
+ len = st->data[5];
if (len > 246) {
err("rc query invalid length");
- return -EIO;
+ ret = -EIO;
+ goto ret;
}
if (len > 0) {
deb_rc("rc data (%d) ", len);
- debug_dump((ibuf + 6), len, deb_rc);
- ret = rc_decode(d, &ibuf[6], len, event, state);
+ debug_dump((st->data + 6), len, deb_rc);
+ ret = rc_decode(d, &st->data[6], len, event, state);
if (ret) {
err("rc_decode failed");
- return ret;
+ goto ret;
} else {
deb_rc("rc_decode state %x event %x\n", *state, *event);
if (*state == REMOTE_KEY_REPEAT)
*event = d->last_event;
}
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
@@ -953,10 +974,16 @@ static int af9005_identify_state(struct usb_device *udev,
int *cold)
{
int ret;
- u8 reply;
- ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ u8 reply, *buf;
+
+ buf = kmalloc(FW_BULKOUT_SIZE + 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply,
+ buf, FW_BULKOUT_SIZE + 2);
if (ret)
- return ret;
+ goto err;
deb_info("result of FW_CONFIG in identify state %d\n", reply);
if (reply == 0x01)
*cold = 1;
@@ -965,7 +992,10 @@ static int af9005_identify_state(struct usb_device *udev,
else
return -EIO;
deb_info("Identify state cold = %d\n", *cold);
- return 0;
+
+err:
+ kfree(buf);
+ return ret;
}
static struct dvb_usb_device_properties af9005_properties;
@@ -974,7 +1004,7 @@ static int af9005_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &af9005_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
enum af9005_usb_table_entry {
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 9fd1527494eb..290275bc7fde 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -41,6 +41,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct cinergyt2_state {
u8 rc_counter;
+ unsigned char data[64];
};
/* We are missing a release hook with usb_device data */
@@ -50,38 +51,57 @@ static struct dvb_usb_device_properties cinergyt2_properties;
static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
{
- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
- char result[64];
- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
- sizeof(result), 0);
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
+ st->data[1] = enable ? 1 : 0;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 64, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
{
- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
- char state[3];
- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
+ struct cinergyt2_state *st = d->priv;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_SLEEP_MODE;
+ st->data[1] = enable ? 0 : 1;
+
+ ret = dvb_usb_generic_rw(d, st->data, 2, st->data, 3, 0);
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
{
- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
- char state[3];
+ struct dvb_usb_device *d = adap->dev;
+ struct cinergyt2_state *st = d->priv;
int ret;
adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
- sizeof(state), 0);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
if (ret < 0) {
deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
"state info\n");
}
+ mutex_unlock(&d->data_mutex);
/* Copy this pointer as we are gonna need it in the release phase */
cinergyt2_usb_device = adap->dev;
- return 0;
+ return ret;
}
static struct rc_map_table rc_map_cinergyt2_table[] = {
@@ -141,13 +161,18 @@ static int repeatable_keys[] = {
static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
struct cinergyt2_state *st = d->priv;
- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
- int i;
+ int i, ret;
*state = REMOTE_NO_KEY_PRESSED;
- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
- if (key[4] == 0xff) {
+ mutex_lock(&d->data_mutex);
+ st->data[0] = CINERGYT2_EP1_GET_RC_EVENTS;
+
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[4] == 0xff) {
/* key repeat */
st->rc_counter++;
if (st->rc_counter > RC_REPEAT_DELAY) {
@@ -157,34 +182,36 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
*event = d->last_event;
deb_rc("repeat key, event %x\n",
*event);
- return 0;
+ goto ret;
}
}
deb_rc("repeated key (non repeatable)\n");
}
- return 0;
+ goto ret;
}
/* hack to pass checksum on the custom field */
- key[2] = ~key[1];
- dvb_usb_nec_rc_key_to_event(d, key, event, state);
- if (key[0] != 0) {
+ st->data[2] = ~st->data[1];
+ dvb_usb_nec_rc_key_to_event(d, st->data, event, state);
+ if (st->data[0] != 0) {
if (*event != d->last_event)
st->rc_counter = 0;
- deb_rc("key: %*ph\n", 5, key);
+ deb_rc("key: %*ph\n", 5, st->data);
}
- return 0;
+
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int cinergyt2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return dvb_usb_device_init(intf, &cinergyt2_properties,
- THIS_MODULE, NULL, adapter_nr);
+ THIS_MODULE, NULL, adapter_nr);
}
-
static struct usb_device_id cinergyt2_usb_table[] = {
{ USB_DEVICE(USB_VID_TERRATEC, 0x0038) },
{ 0 }
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index b3ec743a7a2e..2d29b4174dba 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -139,32 +139,42 @@ static uint16_t compute_tps(struct dtv_frontend_properties *op)
struct cinergyt2_fe_state {
struct dvb_frontend fe;
struct dvb_usb_device *d;
+
+ unsigned char data[64];
+ struct mutex data_mutex;
+
+ struct dvbt_get_status_msg status;
};
static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg result;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
- sizeof(result), 0);
+ mutex_lock(&state->data_mutex);
+ state->data[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1,
+ state->data, sizeof(state->status), 0);
+ if (!ret)
+ memcpy(&state->status, state->data, sizeof(state->status));
+ mutex_unlock(&state->data_mutex);
+
if (ret < 0)
return ret;
*status = 0;
- if (0xffff - le16_to_cpu(result.gain) > 30)
+ if (0xffff - le16_to_cpu(state->status.gain) > 30)
*status |= FE_HAS_SIGNAL;
- if (result.lock_bits & (1 << 6))
+ if (state->status.lock_bits & (1 << 6))
*status |= FE_HAS_LOCK;
- if (result.lock_bits & (1 << 5))
+ if (state->status.lock_bits & (1 << 5))
*status |= FE_HAS_SYNC;
- if (result.lock_bits & (1 << 4))
+ if (state->status.lock_bits & (1 << 4))
*status |= FE_HAS_CARRIER;
- if (result.lock_bits & (1 << 1))
+ if (state->status.lock_bits & (1 << 1))
*status |= FE_HAS_VITERBI;
if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
@@ -177,34 +187,16 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
-
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0)
- return ret;
- *ber = le32_to_cpu(status.viterbi_error_rate);
+ *ber = le32_to_cpu(state->status.viterbi_error_rate);
return 0;
}
static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
- ret);
- return ret;
- }
- *unc = le32_to_cpu(status.uncorrected_block_count);
+ *unc = le32_to_cpu(state->status.uncorrected_block_count);
return 0;
}
@@ -212,35 +204,16 @@ static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
u16 *strength)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_signal_strength() Failed!"
- " (Error=%d)\n", ret);
- return ret;
- }
- *strength = (0xffff - le16_to_cpu(status.gain));
+ *strength = (0xffff - le16_to_cpu(state->status.gain));
return 0;
}
static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_get_status_msg status;
- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
- int ret;
- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
- sizeof(status), 0);
- if (ret < 0) {
- err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
- return ret;
- }
- *snr = (status.snr << 8) | status.snr;
+ *snr = (state->status.snr << 8) | state->status.snr;
return 0;
}
@@ -266,34 +239,36 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct cinergyt2_fe_state *state = fe->demodulator_priv;
- struct dvbt_set_parameters_msg param;
- char result[2];
+ struct dvbt_set_parameters_msg *param;
int err;
- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
- param.tps = cpu_to_le16(compute_tps(fep));
- param.freq = cpu_to_le32(fep->frequency / 1000);
- param.flags = 0;
+ mutex_lock(&state->data_mutex);
+
+ param = (void *)state->data;
+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
+ param->tps = cpu_to_le16(compute_tps(fep));
+ param->freq = cpu_to_le32(fep->frequency / 1000);
+ param->flags = 0;
switch (fep->bandwidth_hz) {
default:
case 8000000:
- param.bandwidth = 8;
+ param->bandwidth = 8;
break;
case 7000000:
- param.bandwidth = 7;
+ param->bandwidth = 7;
break;
case 6000000:
- param.bandwidth = 6;
+ param->bandwidth = 6;
break;
}
- err = dvb_usb_generic_rw(state->d,
- (char *)&param, sizeof(param),
- result, sizeof(result), 0);
+ err = dvb_usb_generic_rw(state->d, state->data, sizeof(*param),
+ state->data, 2, 0);
if (err < 0)
err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
+ mutex_unlock(&state->data_mutex);
return (err < 0) ? err : 0;
}
@@ -315,6 +290,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
s->d = d;
memcpy(&s->fe.ops, &cinergyt2_fe_ops, sizeof(struct dvb_frontend_ops));
s->fe.demodulator_priv = s;
+ mutex_init(&s->data_mutex);
return &s->fe;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 907ac01ae297..243403081fa5 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -45,9 +45,6 @@
#include "si2168.h"
#include "si2157.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 80
-
/* debug */
static int dvb_usb_cxusb_debug;
module_param_named(debug, dvb_usb_cxusb_debug, int, 0644);
@@ -61,23 +58,27 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[MAX_XFER_SIZE];
+ struct cxusb_state *st = d->priv;
+ int ret, wo;
- if (1 + wlen > sizeof(sndbuf)) {
- warn("i2c wr: len=%d is too big!\n",
- wlen);
+ if (1 + wlen > MAX_XFER_SIZE) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
- memset(sndbuf, 0, 1+wlen);
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = cmd;
+ memcpy(&st->data[1], wbuf, wlen);
if (wo)
- return dvb_usb_generic_write(d, sndbuf, 1+wlen);
+ ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
else
- return dvb_usb_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen, 0);
+ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
+ rbuf, rlen, 0);
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
/* GPIO */
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 527ff7905e15..18acda19527a 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -28,10 +28,15 @@
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 80
+
struct cxusb_state {
u8 gpio_write_state[3];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
+
+ unsigned char data[MAX_XFER_SIZE];
};
#endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 26797979ebce..47ce9d5de4c6 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -213,7 +213,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
usb_rcvctrlpipe(d->udev, 0),
REQUEST_NEW_I2C_READ,
USB_TYPE_VENDOR | USB_DIR_IN,
- value, index, msg[i].buf,
+ value, index, st->buf,
msg[i].len,
USB_CTRL_GET_TIMEOUT);
if (result < 0) {
@@ -221,6 +221,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
break;
}
+ if (msg[i].len > sizeof(st->buf)) {
+ deb_info("buffer too small to fit %d bytes\n",
+ msg[i].len);
+ return -EIO;
+ }
+
+ memcpy(msg[i].buf, st->buf, msg[i].len);
+
deb_data("<<< ");
debug_dump(msg[i].buf, msg[i].len, deb_data);
@@ -238,6 +246,13 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* I2C ctrl + FE bus; */
st->buf[3] = ((gen_mode << 6) & 0xC0) |
((bus_mode << 4) & 0x30);
+
+ if (msg[i].len > sizeof(st->buf) - 4) {
+ deb_info("i2c message to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+
/* The Actual i2c payload */
memcpy(&st->buf[4], msg[i].buf, msg[i].len);
@@ -283,6 +298,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* fill in the address */
st->buf[1] = msg[i].addr << 1;
/* fill the buffer */
+ if (msg[i].len > sizeof(st->buf) - 2) {
+ deb_info("i2c xfer to big: %d\n",
+ msg[i].len);
+ return -EIO;
+ }
memcpy(&st->buf[2], msg[i].buf, msg[i].len);
/* write/read request */
@@ -292,13 +312,20 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
/* special thing in the current firmware: when length is zero the read-failed */
len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2,
- msg[i+1].buf, msg[i+1].len);
+ st->buf, msg[i + 1].len);
if (len <= 0) {
deb_info("I2C read failed on address 0x%02x\n",
msg[i].addr);
break;
}
+ if (msg[i + 1].len > sizeof(st->buf)) {
+ deb_info("i2c xfer buffer to small for %d\n",
+ msg[i].len);
+ return -EIO;
+ }
+ memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
+
msg[i+1].len = len;
i++;
@@ -677,7 +704,7 @@ static void dib0700_rc_urb_completion(struct urb *purb)
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
enum rc_type protocol;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;
deb_info("%s()\n", __func__);
@@ -710,7 +737,6 @@ static void dib0700_rc_urb_completion(struct urb *purb)
switch (d->props.rc.core.protocol) {
case RC_BIT_NEC:
- protocol = RC_TYPE_NEC;
toggle = 0;
/* NEC protocol sends repeat code as 0 0 0 FF */
@@ -719,7 +745,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
poll_reply->nec.data == 0x00 &&
poll_reply->nec.not_data == 0xff) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}
if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
@@ -728,16 +755,19 @@ static void dib0700_rc_urb_completion(struct urb *purb)
poll_reply->nec.not_system << 16 |
poll_reply->nec.data << 8 |
poll_reply->nec.not_data);
+ protocol = RC_TYPE_NEC32;
} else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) {
deb_data("NEC extended protocol\n");
keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 |
poll_reply->nec.not_system,
poll_reply->nec.data);
+ protocol = RC_TYPE_NECX;
} else {
deb_data("NEC normal protocol\n");
keycode = RC_SCANCODE_NEC(poll_reply->nec.system,
poll_reply->nec.data);
+ protocol = RC_TYPE_NEC;
}
break;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 0857b56e652c..ef1b8ee75c57 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -508,8 +508,6 @@ static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap)
#define DEFAULT_RC_INTERVAL 50
-static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
-
/*
* This function is used only when firmware is < 1.20 version. Newer
* firmwares use bulk mode, with functions implemented at dib0700_core,
@@ -517,7 +515,6 @@ static u8 rc_request[] = { REQUEST_POLL_RC, 0 };
*/
static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
{
- u8 key[4];
enum rc_type protocol;
u32 scancode;
u8 toggle;
@@ -532,39 +529,43 @@ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d)
return 0;
}
- i = dib0700_ctrl_rd(d, rc_request, 2, key, 4);
+ st->buf[0] = REQUEST_POLL_RC;
+ st->buf[1] = 0;
+
+ i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4);
if (i <= 0) {
err("RC Query Failed");
- return -1;
+ return -EIO;
}
/* losing half of KEY_0 events from Philipps rc5 remotes.. */
- if (key[0] == 0 && key[1] == 0 && key[2] == 0 && key[3] == 0)
+ if (st->buf[0] == 0 && st->buf[1] == 0
+ && st->buf[2] == 0 && st->buf[3] == 0)
return 0;
- /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)key[3-2],(int)key[3-3],(int)key[3-1],(int)key[3]); */
+ /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */
dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */
switch (d->props.rc.core.protocol) {
case RC_BIT_NEC:
/* NEC protocol sends repeat code as 0 0 0 FF */
- if ((key[3-2] == 0x00) && (key[3-3] == 0x00) &&
- (key[3] == 0xff)) {
+ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) &&
+ (st->buf[3] == 0xff)) {
rc_repeat(d->rc_dev);
return 0;
}
protocol = RC_TYPE_NEC;
- scancode = RC_SCANCODE_NEC(key[3-2], key[3-3]);
+ scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]);
toggle = 0;
break;
default:
/* RC-5 protocol changes toggle bit on new keypress */
protocol = RC_TYPE_RC5;
- scancode = RC_SCANCODE_RC5(key[3-2], key[3-3]);
- toggle = key[3-1];
+ scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]);
+ toggle = st->buf[3 - 1];
break;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 6eea4e68891d..de3ee2547479 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -9,7 +9,6 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
-#include <linux/kconfig.h>
#include "dibusb.h"
/* Max transfer size done by I2C transfer functions */
@@ -63,72 +62,117 @@ EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b[3];
+ u8 *b;
int ret;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
- ret = dvb_usb_generic_write(d,b,3);
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
msleep(10);
+
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b[3] = { 0 };
int ret;
+ u8 *b;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
- return ret;
+ goto ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
- if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
- return ret;
+ ret = dvb_usb_generic_write(adap->dev, b, 2);
+ if (ret < 0)
+ goto ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
- return dvb_usb_generic_write(adap->dev,b,3);
+ ret = dvb_usb_generic_write(adap->dev, b, 3);
+
+ret:
+ kfree(b);
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- if (onoff) {
- u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
- return dvb_usb_generic_write(d,b,3);
- } else
+ u8 *b;
+ int ret;
+
+ if (!onoff)
return 0;
+
+ b = kmalloc(3, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = DIBUSB_REQ_SET_IOCTL;
+ b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
+ b[2] = DIBUSB_IOCTL_POWER_WAKEUP;
+
+ ret = dvb_usb_generic_write(d, b, 3);
+
+ kfree(b);
+
+ return ret;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
- u8 sndbuf[MAX_XFER_SIZE]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
+ u8 *sndbuf;
+ int ret, wo, len;
+
/* write only ? */
- int wo = (rbuf == NULL || rlen == 0),
- len = 2 + wlen + (wo ? 0 : 2);
+ wo = (rbuf == NULL || rlen == 0);
+
+ len = 2 + wlen + (wo ? 0 : 2);
+
+ sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL);
+ if (!sndbuf)
+ return -ENOMEM;
- if (4 + wlen > sizeof(sndbuf)) {
+ if (4 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto ret;
}
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
- memcpy(&sndbuf[2],wbuf,wlen);
+ memcpy(&sndbuf[2], wbuf, wlen);
if (!wo) {
- sndbuf[wlen+2] = (rlen >> 8) & 0xff;
- sndbuf[wlen+3] = rlen & 0xff;
+ sndbuf[wlen + 2] = (rlen >> 8) & 0xff;
+ sndbuf[wlen + 3] = rlen & 0xff;
}
- return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
+ ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0);
+
+ret:
+ kfree(sndbuf);
+ return ret;
}
/*
@@ -184,164 +228,6 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
}
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
-#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
-
-/* 3000MC/P stuff */
-// Config Adjacent channels Perf -cal22
-static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
- .band_caps = BAND_VHF | BAND_UHF,
- .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
-
- .agc1_max = 48497,
- .agc1_min = 23593,
- .agc2_max = 46531,
- .agc2_min = 24904,
-
- .agc1_pt1 = 0x65,
- .agc1_pt2 = 0x69,
-
- .agc1_slope1 = 0x51,
- .agc1_slope2 = 0x27,
-
- .agc2_pt1 = 0,
- .agc2_pt2 = 0x33,
-
- .agc2_slope1 = 0x35,
- .agc2_slope2 = 0x37,
-};
-
-static struct dib3000mc_config stk3000p_dib3000p_config = {
- &dib3000p_mt2060_agc_config,
-
- .max_time = 0x196,
- .ln_adc_level = 0x1cc7,
-
- .output_mpeg2_in_188_bytes = 1,
-
- .agc_command1 = 1,
- .agc_command2 = 1,
-};
-
-static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
- .band_caps = BAND_VHF | BAND_UHF,
- .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
-
- .agc1_max = 56361,
- .agc1_min = 22282,
- .agc2_max = 47841,
- .agc2_min = 36045,
-
- .agc1_pt1 = 0x3b,
- .agc1_pt2 = 0x6b,
-
- .agc1_slope1 = 0x55,
- .agc1_slope2 = 0x1d,
-
- .agc2_pt1 = 0,
- .agc2_pt2 = 0x0a,
-
- .agc2_slope1 = 0x95,
- .agc2_slope2 = 0x1e,
-};
-
-static struct dib3000mc_config mod3000p_dib3000p_config = {
- &dib3000p_panasonic_agc_config,
-
- .max_time = 0x51,
- .ln_adc_level = 0x1cc7,
-
- .output_mpeg2_in_188_bytes = 1,
-
- .agc_command1 = 1,
- .agc_command2 = 1,
-};
-
-int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
-{
- if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
- le16_to_cpu(adap->dev->udev->descriptor.idProduct) ==
- USB_PID_LITEON_DVB_T_WARM) {
- msleep(1000);
- }
-
- adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
- &adap->dev->i2c_adap,
- DEFAULT_DIB3000P_I2C_ADDRESS,
- &mod3000p_dib3000p_config);
- if ((adap->fe_adap[0].fe) == NULL)
- adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
- &adap->dev->i2c_adap,
- DEFAULT_DIB3000MC_I2C_ADDRESS,
- &mod3000p_dib3000p_config);
- if ((adap->fe_adap[0].fe) != NULL) {
- if (adap->priv != NULL) {
- struct dibusb_state *st = adap->priv;
- st->ops.pid_parse = dib3000mc_pid_parse;
- st->ops.pid_ctrl = dib3000mc_pid_control;
- }
- return 0;
- }
- return -ENODEV;
-}
-EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
-
-static struct mt2060_config stk3000p_mt2060_config = {
- 0x60
-};
-
-int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
-{
- struct dibusb_state *st = adap->priv;
- u8 a,b;
- u16 if1 = 1220;
- struct i2c_adapter *tun_i2c;
-
- // First IF calibration for Liteon Sticks
- if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
- le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) {
-
- dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
- dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
-
- if (a == 0x00)
- if1 += b;
- else if (a == 0x80)
- if1 -= b;
- else
- warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
-
- } else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM &&
- le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) {
- u8 desc;
- dibusb_read_eeprom_byte(adap->dev, 7, &desc);
- if (desc == 2) {
- a = 127;
- do {
- dibusb_read_eeprom_byte(adap->dev, a, &desc);
- a--;
- } while (a > 7 && (desc == 0xff || desc == 0x00));
- if (desc & 0x80)
- if1 -= (0xff - desc);
- else
- if1 += desc;
- }
- }
-
- tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
- if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
- /* not found - use panasonic pll parameters */
- if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
- return -ENOMEM;
- } else {
- st->mt2060_present = 1;
- /* set the correct parameters for the dib3000p */
- dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
- }
- return 0;
-}
-EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
-#endif
-
/*
* common remote control stuff
*/
@@ -478,11 +364,27 @@ EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
- return 0;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+
+ ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ dvb_usb_nec_rc_key_to_event(d, buf, event, state);
+
+ if (buf[0] != 0)
+ deb_info("key: %*ph\n", 5, buf);
+
+ kfree(buf);
+
+ret:
+ return ret;
}
EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
new file mode 100644
index 000000000000..d66f56cc46a5
--- /dev/null
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -0,0 +1,168 @@
+/* Common methods for dibusb-based-receivers.
+ *
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation, version 2.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+
+#include <linux/kconfig.h>
+#include "dibusb.h"
+
+/* 3000MC/P stuff */
+// Config Adjacent channels Perf -cal22
+static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
+ .band_caps = BAND_VHF | BAND_UHF,
+ .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
+
+ .agc1_max = 48497,
+ .agc1_min = 23593,
+ .agc2_max = 46531,
+ .agc2_min = 24904,
+
+ .agc1_pt1 = 0x65,
+ .agc1_pt2 = 0x69,
+
+ .agc1_slope1 = 0x51,
+ .agc1_slope2 = 0x27,
+
+ .agc2_pt1 = 0,
+ .agc2_pt2 = 0x33,
+
+ .agc2_slope1 = 0x35,
+ .agc2_slope2 = 0x37,
+};
+
+static struct dib3000mc_config stk3000p_dib3000p_config = {
+ &dib3000p_mt2060_agc_config,
+
+ .max_time = 0x196,
+ .ln_adc_level = 0x1cc7,
+
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_command1 = 1,
+ .agc_command2 = 1,
+};
+
+static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
+ .band_caps = BAND_VHF | BAND_UHF,
+ .setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
+
+ .agc1_max = 56361,
+ .agc1_min = 22282,
+ .agc2_max = 47841,
+ .agc2_min = 36045,
+
+ .agc1_pt1 = 0x3b,
+ .agc1_pt2 = 0x6b,
+
+ .agc1_slope1 = 0x55,
+ .agc1_slope2 = 0x1d,
+
+ .agc2_pt1 = 0,
+ .agc2_pt2 = 0x0a,
+
+ .agc2_slope1 = 0x95,
+ .agc2_slope2 = 0x1e,
+};
+
+static struct dib3000mc_config mod3000p_dib3000p_config = {
+ &dib3000p_panasonic_agc_config,
+
+ .max_time = 0x51,
+ .ln_adc_level = 0x1cc7,
+
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_command1 = 1,
+ .agc_command2 = 1,
+};
+
+int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) ==
+ USB_PID_LITEON_DVB_T_WARM) {
+ msleep(1000);
+ }
+
+ adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+ &adap->dev->i2c_adap,
+ DEFAULT_DIB3000P_I2C_ADDRESS,
+ &mod3000p_dib3000p_config);
+ if ((adap->fe_adap[0].fe) == NULL)
+ adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
+ &adap->dev->i2c_adap,
+ DEFAULT_DIB3000MC_I2C_ADDRESS,
+ &mod3000p_dib3000p_config);
+ if ((adap->fe_adap[0].fe) != NULL) {
+ if (adap->priv != NULL) {
+ struct dibusb_state *st = adap->priv;
+ st->ops.pid_parse = dib3000mc_pid_parse;
+ st->ops.pid_ctrl = dib3000mc_pid_control;
+ }
+ return 0;
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
+
+static struct mt2060_config stk3000p_mt2060_config = {
+ 0x60
+};
+
+int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dibusb_state *st = adap->priv;
+ u8 a,b;
+ u16 if1 = 1220;
+ struct i2c_adapter *tun_i2c;
+
+ // First IF calibration for Liteon Sticks
+ if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_LITEON &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_LITEON_DVB_T_WARM) {
+
+ dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
+ dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
+
+ if (a == 0x00)
+ if1 += b;
+ else if (a == 0x80)
+ if1 -= b;
+ else
+ warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
+
+ } else if (le16_to_cpu(adap->dev->udev->descriptor.idVendor) == USB_VID_DIBCOM &&
+ le16_to_cpu(adap->dev->udev->descriptor.idProduct) == USB_PID_DIBCOM_MOD3001_WARM) {
+ u8 desc;
+ dibusb_read_eeprom_byte(adap->dev, 7, &desc);
+ if (desc == 2) {
+ a = 127;
+ do {
+ dibusb_read_eeprom_byte(adap->dev, a, &desc);
+ a--;
+ } while (a > 7 && (desc == 0xff || desc == 0x00));
+ if (desc & 0x80)
+ if1 -= (0xff - desc);
+ else
+ if1 += desc;
+ }
+ }
+
+ tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
+ if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
+ /* not found - use panasonic pll parameters */
+ if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
+ return -ENOMEM;
+ } else {
+ st->mt2060_present = 1;
+ /* set the correct parameters for the dib3000p */
+ dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
diff --git a/drivers/media/usb/dvb-usb/dibusb.h b/drivers/media/usb/dvb-usb/dibusb.h
index 3f82163d8ab8..697be2a17ade 100644
--- a/drivers/media/usb/dvb-usb/dibusb.h
+++ b/drivers/media/usb/dvb-usb/dibusb.h
@@ -96,6 +96,9 @@
#define DIBUSB_IOCTL_CMD_ENABLE_STREAM 0x01
#define DIBUSB_IOCTL_CMD_DISABLE_STREAM 0x02
+/* Max transfer size done by I2C transfer functions */
+#define MAX_XFER_SIZE 64
+
struct dibusb_state {
struct dib_fe_xfer_ops ops;
int mt2060_present;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 63134335c994..4284f6984dc1 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -28,22 +28,26 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int digitv_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 vv, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 sndbuf[7],rcvbuf[7];
- memset(sndbuf,0,7); memset(rcvbuf,0,7);
+ struct digitv_state *st = d->priv;
+ int ret, wo;
- sndbuf[0] = cmd;
- sndbuf[1] = vv;
- sndbuf[2] = wo ? wlen : rlen;
+ wo = (rbuf == NULL || rlen == 0); /* write-only */
+
+ memset(st->sndbuf, 0, 7);
+ memset(st->rcvbuf, 0, 7);
+
+ st->sndbuf[0] = cmd;
+ st->sndbuf[1] = vv;
+ st->sndbuf[2] = wo ? wlen : rlen;
if (wo) {
- memcpy(&sndbuf[3],wbuf,wlen);
- dvb_usb_generic_write(d,sndbuf,7);
+ memcpy(&st->sndbuf[3], wbuf, wlen);
+ ret = dvb_usb_generic_write(d, st->sndbuf, 7);
} else {
- dvb_usb_generic_rw(d,sndbuf,7,rcvbuf,7,10);
- memcpy(rbuf,&rcvbuf[3],rlen);
+ ret = dvb_usb_generic_rw(d, st->sndbuf, 7, st->rcvbuf, 7, 10);
+ memcpy(rbuf, &st->rcvbuf[3], rlen);
}
- return 0;
+ return ret;
}
/* I2C */
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 908c09f4966b..581e09c25491 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -5,7 +5,10 @@
#include "dvb-usb.h"
struct digitv_state {
- int is_nxt6000;
+ int is_nxt6000;
+
+ unsigned char sndbuf[7];
+ unsigned char rcvbuf[7];
};
/* protocol (from usblogging and the SDK:
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index c09332bd99cb..f5c042baa254 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -18,17 +18,28 @@ struct dtt200u_fe_state {
struct dtv_frontend_properties fep;
struct dvb_frontend frontend;
+
+ unsigned char data[80];
+ struct mutex data_mutex;
};
static int dtt200u_fe_read_status(struct dvb_frontend *fe,
enum fe_status *stat)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 st = GET_TUNE_STATUS, b[3];
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_TUNE_STATUS;
- dvb_usb_generic_rw(state->d,&st,1,b,3,0);
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret < 0) {
+ *stat = 0;
+ mutex_unlock(&state->data_mutex);
+ return ret;
+ }
- switch (b[0]) {
+ switch (state->data[0]) {
case 0x01:
*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
@@ -41,51 +52,86 @@ static int dtt200u_fe_read_status(struct dvb_frontend *fe,
*stat = 0;
break;
}
+ mutex_unlock(&state->data_mutex);
return 0;
}
static int dtt200u_fe_read_ber(struct dvb_frontend* fe, u32 *ber)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_VIT_ERR_CNT,b[3];
- dvb_usb_generic_rw(state->d,&bw,1,b,3,0);
- *ber = (b[0] << 16) | (b[1] << 8) | b[2];
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_VIT_ERR_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 3, 0);
+ if (ret >= 0)
+ *ber = (state->data[0] << 16) | (state->data[1] << 8) | state->data[2];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_RS_UNCOR_BLK_CNT,b[2];
+ int ret;
- dvb_usb_generic_rw(state->d,&bw,1,b,2,0);
- *unc = (b[0] << 8) | b[1];
- return 0;
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_RS_UNCOR_BLK_CNT;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 2, 0);
+ if (ret >= 0)
+ *unc = (state->data[0] << 8) | state->data[1];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_AGC, b;
- dvb_usb_generic_rw(state->d,&bw,1,&b,1,0);
- *strength = (b << 8) | b;
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_AGC;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *strength = (state->data[0] << 8) | state->data[0];
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_read_snr(struct dvb_frontend* fe, u16 *snr)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 bw = GET_SNR,br;
- dvb_usb_generic_rw(state->d,&bw,1,&br,1,0);
- *snr = ~((br << 8) | br);
- return 0;
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = GET_SNR;
+
+ ret = dvb_usb_generic_rw(state->d, state->data, 1, state->data, 1, 0);
+ if (ret >= 0)
+ *snr = ~((state->data[0] << 8) | state->data[0]);
+
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_init(struct dvb_frontend* fe)
{
struct dtt200u_fe_state *state = fe->demodulator_priv;
- u8 b = SET_INIT;
- return dvb_usb_generic_write(state->d,&b,1);
+ int ret;
+
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_INIT;
+
+ ret = dvb_usb_generic_write(state->d, state->data, 1);
+ mutex_unlock(&state->data_mutex);
+
+ return ret;
}
static int dtt200u_fe_sleep(struct dvb_frontend* fe)
@@ -105,39 +151,40 @@ static int dtt200u_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dtt200u_fe_state *state = fe->demodulator_priv;
- int i;
- enum fe_status st;
+ int ret;
u16 freq = fep->frequency / 250000;
- u8 bwbuf[2] = { SET_BANDWIDTH, 0 },freqbuf[3] = { SET_RF_FREQ, 0, 0 };
+ mutex_lock(&state->data_mutex);
+ state->data[0] = SET_BANDWIDTH;
switch (fep->bandwidth_hz) {
case 8000000:
- bwbuf[1] = 8;
+ state->data[1] = 8;
break;
case 7000000:
- bwbuf[1] = 7;
+ state->data[1] = 7;
break;
case 6000000:
- bwbuf[1] = 6;
+ state->data[1] = 6;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto ret;
}
- dvb_usb_generic_write(state->d,bwbuf,2);
+ ret = dvb_usb_generic_write(state->d, state->data, 2);
+ if (ret < 0)
+ goto ret;
- freqbuf[1] = freq & 0xff;
- freqbuf[2] = (freq >> 8) & 0xff;
- dvb_usb_generic_write(state->d,freqbuf,3);
+ state->data[0] = SET_RF_FREQ;
+ state->data[1] = freq & 0xff;
+ state->data[2] = (freq >> 8) & 0xff;
+ ret = dvb_usb_generic_write(state->d, state->data, 3);
+ if (ret < 0)
+ goto ret;
- for (i = 0; i < 30; i++) {
- msleep(20);
- dtt200u_fe_read_status(fe, &st);
- if (st & FE_TIMEDOUT)
- continue;
- }
-
- return 0;
+ret:
+ mutex_unlock(&state->data_mutex);
+ return ret;
}
static int dtt200u_fe_get_frontend(struct dvb_frontend* fe,
@@ -169,6 +216,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
deb_info("attaching frontend dtt200u\n");
state->d = d;
+ mutex_init(&state->data_mutex);
memcpy(&state->frontend.ops,&dtt200u_fe_ops,sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index be633ece4194..fcbff7fb0c4e 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -20,72 +20,115 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2 (or-able))." DVB_USB
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtt200u_state {
+ unsigned char data[80];
+};
+
static int dtt200u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
- u8 b = SET_INIT;
+ struct dtt200u_state *st = d->priv;
+ int ret = 0;
+
+ mutex_lock(&d->data_mutex);
+
+ st->data[0] = SET_INIT;
if (onoff)
- dvb_usb_generic_write(d,&b,2);
+ ret = dvb_usb_generic_write(d, st->data, 2);
- return 0;
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
- u8 b_streaming[2] = { SET_STREAMING, onoff };
- u8 b_rst_pid = RESET_PID_FILTER;
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
- dvb_usb_generic_write(adap->dev, b_streaming, 2);
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_STREAMING;
+ st->data[1] = onoff;
- if (onoff == 0)
- dvb_usb_generic_write(adap->dev, &b_rst_pid, 1);
- return 0;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 2);
+ if (ret < 0)
+ goto ret;
+
+ if (onoff)
+ goto ret;
+
+ st->data[0] = RESET_PID_FILTER;
+ ret = dvb_usb_generic_write(adap->dev, st->data, 1);
+
+ret:
+ mutex_unlock(&d->data_mutex);
+
+ return ret;
}
static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
- u8 b_pid[4];
+ struct dvb_usb_device *d = adap->dev;
+ struct dtt200u_state *st = d->priv;
+ int ret;
+
pid = onoff ? pid : 0;
- b_pid[0] = SET_PID_FILTER;
- b_pid[1] = index;
- b_pid[2] = pid & 0xff;
- b_pid[3] = (pid >> 8) & 0x1f;
+ mutex_lock(&d->data_mutex);
+ st->data[0] = SET_PID_FILTER;
+ st->data[1] = index;
+ st->data[2] = pid & 0xff;
+ st->data[3] = (pid >> 8) & 0x1f;
+
+ ret = dvb_usb_generic_write(adap->dev, st->data, 4);
+ mutex_unlock(&d->data_mutex);
- return dvb_usb_generic_write(adap->dev, b_pid, 4);
+ return ret;
}
static int dtt200u_rc_query(struct dvb_usb_device *d)
{
- u8 key[5],cmd = GET_RC_CODE;
+ struct dtt200u_state *st = d->priv;
u32 scancode;
+ int ret;
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = GET_RC_CODE;
- dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- if (key[0] == 1) {
- scancode = key[1];
- if ((u8) ~key[1] != key[2]) {
+ ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 5, 0);
+ if (ret < 0)
+ goto ret;
+
+ if (st->data[0] == 1) {
+ enum rc_type proto = RC_TYPE_NEC;
+
+ scancode = st->data[1];
+ if ((u8) ~st->data[1] != st->data[2]) {
/* Extended NEC */
scancode = scancode << 8;
- scancode |= key[2];
+ scancode |= st->data[2];
+ proto = RC_TYPE_NECX;
}
scancode = scancode << 8;
- scancode |= key[3];
+ scancode |= st->data[3];
/* Check command checksum is ok */
- if ((u8) ~key[3] == key[4])
- rc_keydown(d->rc_dev, RC_TYPE_NEC, scancode, 0);
+ if ((u8) ~st->data[3] == st->data[4])
+ rc_keydown(d->rc_dev, proto, scancode, 0);
else
rc_keyup(d->rc_dev);
- } else if (key[0] == 2) {
+ } else if (st->data[0] == 2) {
rc_repeat(d->rc_dev);
} else {
rc_keyup(d->rc_dev);
}
- if (key[0] != 0)
- deb_info("key: %*ph\n", 5, key);
+ if (st->data[0] != 0)
+ deb_info("st->data: %*ph\n", 5, st->data);
- return 0;
+ret:
+ mutex_unlock(&d->data_mutex);
+ return ret;
}
static int dtt200u_frontend_attach(struct dvb_usb_adapter *adap)
@@ -137,6 +180,8 @@ static struct dvb_usb_device_properties dtt200u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-dtt200u-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -187,6 +232,8 @@ static struct dvb_usb_device_properties wt220u_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-02.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -237,6 +284,8 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-fc03.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -287,6 +336,8 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-zl0353-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.adapter = {
{
@@ -337,6 +388,8 @@ static struct dvb_usb_device_properties wt220u_miglia_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-wt220u-miglia-01.fw",
+ .size_of_priv = sizeof(struct dtt200u_state),
+
.num_adapters = 1,
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 3d11df41cac0..c60fb54f445f 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -31,9 +31,14 @@ module_param_named(debug, dvb_usb_dtv5100_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+struct dtv5100_state {
+ unsigned char data[80];
+};
+
static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
+ struct dtv5100_state *st = d->priv;
u8 request;
u8 type;
u16 value;
@@ -60,9 +65,10 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
}
index = (addr << 8) + wbuf[0];
+ memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
- type, value, index, rbuf, rlen,
+ type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@@ -176,7 +182,7 @@ static struct dvb_usb_device_properties dtv5100_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
- .size_of_priv = 0,
+ .size_of_priv = sizeof(struct dtv5100_state),
.num_adapters = 1,
.adapter = {{
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 3896ba9a4179..84308569e7dc 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -142,6 +142,7 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
{
int ret = 0;
+ mutex_init(&d->data_mutex);
mutex_init(&d->usb_mutex);
mutex_init(&d->i2c_mutex);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 639c4678c65b..107255b08b2b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -404,8 +404,12 @@ struct dvb_usb_adapter {
* Powered is in/decremented for each call to modify the state.
* @udev: pointer to the device's struct usb_device.
*
- * @usb_mutex: semaphore of USB control messages (reading needs two messages)
- * @i2c_mutex: semaphore for i2c-transfers
+ * @data_mutex: mutex to protect the data structure used to store URB data
+ * @usb_mutex: mutex of USB control messages (reading needs two messages).
+ * Please notice that this mutex is used internally at the generic
+ * URB control functions. So, drivers using dvb_usb_generic_rw() and
+ * derivated functions should not lock it internally.
+ * @i2c_mutex: mutex for i2c-transfers
*
* @i2c_adap: device's i2c_adapter if it uses I2CoverUSB
*
@@ -433,6 +437,7 @@ struct dvb_usb_device {
int powered;
/* locking */
+ struct mutex data_mutex;
struct mutex usb_mutex;
/* i2c */
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 5fb0c650926e..2c720cb2fb00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -852,7 +852,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
if (i && !state->initialized) {
state->initialized = 1;
/* reset board */
- dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
}
return 0;
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 5d0384dd45b5..993bb7a72985 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -15,6 +15,7 @@
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "gp8psk.h"
+#include "gp8psk-fe.h"
/* debug */
static char bcm4500_firmware[] = "dvb-usb-gp8psk-02.fw";
@@ -24,37 +25,19 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DV
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6));
-}
-
-static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
-{
- return (gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1));
-}
-
-static void gp8psk_info(struct dvb_usb_device *d)
-{
- u8 fpga_vers, fw_vers[6];
-
- if (!gp8psk_get_fw_version(d, fw_vers))
- info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
- fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
- 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
- else
- info("failed to get FW version");
-
- if (!gp8psk_get_fpga_version(d, &fpga_vers))
- info("FPGA Version = %i", fpga_vers);
- else
- info("failed to get FPGA version");
-}
+struct gp8psk_state {
+ unsigned char data[80];
+};
-int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen)
+static int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret = 0,try = 0;
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
@@ -63,7 +46,7 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
usb_rcvctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_IN,
- value,index,b,blen,
+ value, index, st->data, blen,
2000);
deb_info("reading number %d (ret: %d)\n",try,ret);
try++;
@@ -72,8 +55,10 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
if (ret < 0 || ret != blen) {
warn("usb in %d operation failed.", req);
ret = -EIO;
- } else
+ } else {
ret = 0;
+ memcpy(b, st->data, blen);
+ }
deb_xfer("in: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
@@ -83,22 +68,27 @@ int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8
return ret;
}
-int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
+static int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
u16 index, u8 *b, int blen)
{
+ struct gp8psk_state *st = d->priv;
int ret;
deb_xfer("out: req. %x, val: %x, ind: %x, buffer: ",req,value,index);
debug_dump(b,blen,deb_xfer);
+ if (blen > sizeof(st->data))
+ return -EIO;
+
if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
return ret;
+ memcpy(st->data, b, blen);
if (usb_control_msg(d->udev,
usb_sndctrlpipe(d->udev,0),
req,
USB_TYPE_VENDOR | USB_DIR_OUT,
- value,index,b,blen,
+ value, index, st->data, blen,
2000) != blen) {
warn("usb out operation failed.");
ret = -EIO;
@@ -109,6 +99,34 @@ int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
return ret;
}
+
+static int gp8psk_get_fw_version(struct dvb_usb_device *d, u8 *fw_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FW_VERS, 0, 0, fw_vers, 6);
+}
+
+static int gp8psk_get_fpga_version(struct dvb_usb_device *d, u8 *fpga_vers)
+{
+ return gp8psk_usb_in_op(d, GET_FPGA_VERS, 0, 0, fpga_vers, 1);
+}
+
+static void gp8psk_info(struct dvb_usb_device *d)
+{
+ u8 fpga_vers, fw_vers[6];
+
+ if (!gp8psk_get_fw_version(d, fw_vers))
+ info("FW Version = %i.%02i.%i (0x%x) Build %4i/%02i/%02i",
+ fw_vers[2], fw_vers[1], fw_vers[0], GP8PSK_FW_VERS(fw_vers),
+ 2000 + fw_vers[5], fw_vers[4], fw_vers[3]);
+ else
+ info("failed to get FW version");
+
+ if (!gp8psk_get_fpga_version(d, &fpga_vers))
+ info("FPGA Version = %i", fpga_vers);
+ else
+ info("failed to get FPGA version");
+}
+
static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
{
int ret;
@@ -143,6 +161,11 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
err("failed to load bcm4500 firmware.");
goto out_free;
}
+ if (buflen > 64) {
+ err("firmare chunk size bigger than 64 bytes.");
+ goto out_free;
+ }
+
memcpy(buf, ptr, buflen);
if (dvb_usb_generic_write(d, buf, buflen)) {
err("failed to load bcm4500 firmware.");
@@ -206,10 +229,13 @@ static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff)
return 0;
}
-int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
+static int gp8psk_bcm4500_reload(struct dvb_usb_device *d)
{
u8 buf;
int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct);
+
+ deb_xfer("reloading firmware\n");
+
/* Turn off 8psk power */
if (gp8psk_usb_in_op(d, BOOT_8PSK, 0, 0, &buf, 1))
return -EINVAL;
@@ -228,9 +254,47 @@ static int gp8psk_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return gp8psk_usb_out_op(adap->dev, ARM_TRANSFER, onoff, 0 , NULL, 0);
}
+/* Callbacks for gp8psk-fe.c */
+
+static int gp8psk_fe_in(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_in_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_out(void *priv, u8 req, u16 value,
+ u16 index, u8 *b, int blen)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_usb_out_op(d, req, value, index, b, blen);
+}
+
+static int gp8psk_fe_reload(void *priv)
+{
+ struct dvb_usb_device *d = priv;
+
+ return gp8psk_bcm4500_reload(d);
+}
+
+const struct gp8psk_fe_ops gp8psk_fe_ops = {
+ .in = gp8psk_fe_in,
+ .out = gp8psk_fe_out,
+ .reload = gp8psk_fe_reload,
+};
+
static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
{
- adap->fe_adap[0].fe = gp8psk_fe_attach(adap->dev);
+ struct dvb_usb_device *d = adap->dev;
+ int id = le16_to_cpu(d->udev->descriptor.idProduct);
+ int is_rev1;
+
+ is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+
+ adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
+ &gp8psk_fe_ops, d, is_rev1);
return 0;
}
@@ -265,6 +329,8 @@ static struct dvb_usb_device_properties gp8psk_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-gp8psk-01.fw",
+ .size_of_priv = sizeof(struct gp8psk_state),
+
.num_adapters = 1,
.adapter = {
{
diff --git a/drivers/media/usb/dvb-usb/gp8psk.h b/drivers/media/usb/dvb-usb/gp8psk.h
index ed32b9da4843..d8975b866dee 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.h
+++ b/drivers/media/usb/dvb-usb/gp8psk.h
@@ -24,58 +24,6 @@ extern int dvb_usb_gp8psk_debug;
#define deb_info(args...) dprintk(dvb_usb_gp8psk_debug,0x01,args)
#define deb_xfer(args...) dprintk(dvb_usb_gp8psk_debug,0x02,args)
#define deb_rc(args...) dprintk(dvb_usb_gp8psk_debug,0x04,args)
-#define deb_fe(args...) dprintk(dvb_usb_gp8psk_debug,0x08,args)
-
-/* Twinhan Vendor requests */
-#define TH_COMMAND_IN 0xC0
-#define TH_COMMAND_OUT 0xC1
-
-/* gp8psk commands */
-
-#define GET_8PSK_CONFIG 0x80 /* in */
-#define SET_8PSK_CONFIG 0x81
-#define I2C_WRITE 0x83
-#define I2C_READ 0x84
-#define ARM_TRANSFER 0x85
-#define TUNE_8PSK 0x86
-#define GET_SIGNAL_STRENGTH 0x87 /* in */
-#define LOAD_BCM4500 0x88
-#define BOOT_8PSK 0x89 /* in */
-#define START_INTERSIL 0x8A /* in */
-#define SET_LNB_VOLTAGE 0x8B
-#define SET_22KHZ_TONE 0x8C
-#define SEND_DISEQC_COMMAND 0x8D
-#define SET_DVB_MODE 0x8E
-#define SET_DN_SWITCH 0x8F
-#define GET_SIGNAL_LOCK 0x90 /* in */
-#define GET_FW_VERS 0x92
-#define GET_SERIAL_NUMBER 0x93 /* in */
-#define USE_EXTRA_VOLT 0x94
-#define GET_FPGA_VERS 0x95
-#define CW3K_INIT 0x9d
-
-/* PSK_configuration bits */
-#define bm8pskStarted 0x01
-#define bm8pskFW_Loaded 0x02
-#define bmIntersilOn 0x04
-#define bmDVBmode 0x08
-#define bm22kHz 0x10
-#define bmSEL18V 0x20
-#define bmDCtuned 0x40
-#define bmArmed 0x80
-
-/* Satellite modulation modes */
-#define ADV_MOD_DVB_QPSK 0 /* DVB-S QPSK */
-#define ADV_MOD_TURBO_QPSK 1 /* Turbo QPSK */
-#define ADV_MOD_TURBO_8PSK 2 /* Turbo 8PSK (also used for Trellis 8PSK) */
-#define ADV_MOD_TURBO_16QAM 3 /* Turbo 16QAM (also used for Trellis 8PSK) */
-
-#define ADV_MOD_DCII_C_QPSK 4 /* Digicipher II Combo */
-#define ADV_MOD_DCII_I_QPSK 5 /* Digicipher II I-stream */
-#define ADV_MOD_DCII_Q_QPSK 6 /* Digicipher II Q-stream */
-#define ADV_MOD_DCII_C_OQPSK 7 /* Digicipher II offset QPSK */
-#define ADV_MOD_DSS_QPSK 8 /* DSS (DIRECTV) QPSK */
-#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
@@ -86,15 +34,4 @@ extern int dvb_usb_gp8psk_debug;
#define PRODUCT_STRING_READ 0x0D
#define FW_BCD_VERSION_READ 0x14
-/* firmware revision id's */
-#define GP8PSK_FW_REV1 0x020604
-#define GP8PSK_FW_REV2 0x020704
-#define GP8PSK_FW_VERS(_fw_vers) ((_fw_vers)[2]<<0x10 | (_fw_vers)[1]<<0x08 | (_fw_vers)[0])
-
-extern struct dvb_frontend * gp8psk_fe_attach(struct dvb_usb_device *d);
-extern int gp8psk_usb_in_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen);
-extern int gp8psk_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value,
- u16 index, u8 *b, int blen);
-extern int gp8psk_bcm4500_reload(struct dvb_usb_device *d);
-
#endif
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index fc7569e2728d..1babd3341910 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -74,22 +74,31 @@ static struct rc_map_table rc_map_haupp_table[] = {
*/
static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom;
+ u8 *buf, data, toggle, custom;
u16 raw;
- int i;
+ int i, ret;
struct dibusb_device_state *st = d->priv;
- dvb_usb_generic_rw(d,cmd,2,key,5,0);
+ buf = kmalloc(5, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = DIBUSB_REQ_POLL_REMOTE;
+ buf[1] = 0x35;
+ ret = dvb_usb_generic_rw(d, buf, 2, buf, 5, 0);
+ if (ret < 0)
+ goto ret;
*state = REMOTE_NO_KEY_PRESSED;
- switch (key[0]) {
+ switch (buf[0]) {
case DIBUSB_RC_HAUPPAUGE_KEY_PRESSED:
- raw = ((key[1] << 8) | key[2]) >> 3;
+ raw = ((buf[1] << 8) | buf[2]) >> 3;
toggle = !!(raw & 0x800);
data = raw & 0x3f;
custom = (raw >> 6) & 0x1f;
- deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",key[1],key[2],key[3],custom,data,toggle);
+ deb_rc("raw key code 0x%02x, 0x%02x, 0x%02x to c: %02x d: %02x toggle: %d\n",
+ buf[1], buf[2], buf[3], custom, data, toggle);
for (i = 0; i < ARRAY_SIZE(rc_map_haupp_table); i++) {
if (rc5_data(&rc_map_haupp_table[i]) == data &&
@@ -117,7 +126,9 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
break;
}
- return 0;
+ret:
+ kfree(buf);
+ return ret;
}
static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index c05de1b088a4..07fa08be9e99 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,48 +97,53 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
+
+ unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
unsigned int rlen;
int ret;
- BUG_ON(NULL == data && 0 != (write_len | read_len));
- BUG_ON(write_len > 64 - 4);
- BUG_ON(read_len > 64 - 4);
+ if (!data || (write_len > 64 - 4) || (read_len > 64 - 4)) {
+ err("%s: transfer data invalid", __func__);
+ return -EIO;
+ }
+ mutex_lock(&state->ca_mutex);
id = state->c++;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = cmd;
- buf[3] = write_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = cmd;
+ state->data[3] = write_len;
- memcpy(buf + 4, data, write_len);
+ memcpy(state->data + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
- buf, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
+ state->data, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
- memcpy(data, buf + 4, read_len);
+ memcpy(data, state->data + 4, read_len);
+ mutex_unlock(&state->ca_mutex);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -405,52 +410,53 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 buf[64];
u8 id;
int ret;
+ mutex_lock(&state->ca_mutex);
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- buf[0] = SYNC_BYTE_OUT;
- buf[1] = id;
- buf[2] = PCTV_CMD_I2C;
- buf[3] = snd_len + 3;
- buf[4] = addr << 1;
- buf[5] = snd_len;
- buf[6] = rcv_len;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_I2C;
+ state->data[3] = snd_len + 3;
+ state->data[4] = addr << 1;
+ state->data[5] = snd_len;
+ state->data[6] = rcv_len;
- memcpy(buf + 7, snd_buf, snd_len);
+ memcpy(state->data + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
- buf, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
+ state->data, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != buf[0] || id != buf[1])
+ if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (buf[5] < snd_len || buf[6] < rcv_len)
+ if (state->data[5] < snd_len || state->data[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, buf + 7, rcv_len);
+ memcpy(rcv_buf, state->data + 7, rcv_len);
+ mutex_unlock(&state->ca_mutex);
return rcv_len;
failed:
- err("I2C error %d; %02X %02X %02X %02X %02X -> "
- "%02X %02X %02X %02X %02X.",
+ err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- buf[0], buf[1], buf[4], buf[5], buf[6]);
+ 7, state->data);
+ mutex_unlock(&state->ca_mutex);
return ret;
}
@@ -499,8 +505,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b0[] = { 0xaa, 0, PCTV_CMD_RESET, 1, 0 };
- u8 rx[PCTV_ANSWER_LEN];
+ u8 *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -511,6 +516,11 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
+ rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ mutex_lock(&state->ca_mutex);
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -518,65 +528,75 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- b0[1] = state->c++;
+ state->data[0] = 0xaa;
+ state->data[1] = state->c++;
+ state->data[2] = PCTV_CMD_RESET;
+ state->data[3] = 1;
+ state->data[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
- b0[1] = state->c++;
- b0[4] = 1;
+ state->data[1] = state->c++;
+ state->data[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, b0, sizeof(b0), rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
- return ret;
+ goto ret;
state->initialized = 1;
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ kfree(rx);
+ return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 b[CMD_BUFFER_SIZE];
- u8 rx[PCTV_ANSWER_LEN];
int ret, i;
- u8 id = state->c++;
+ u8 id;
+
+ mutex_lock(&state->ca_mutex);
+ id = state->c++;
/* prepare command header */
- b[0] = SYNC_BYTE_OUT;
- b[1] = id;
- b[2] = PCTV_CMD_IR;
- b[3] = 0;
+ state->data[0] = SYNC_BYTE_OUT;
+ state->data[1] = id;
+ state->data[2] = PCTV_CMD_IR;
+ state->data[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 4,
+ state->data, PCTV_ANSWER_LEN, 0);
if (ret != 0)
- return ret;
+ goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
- for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", rx[i+3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
+ for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", state->data[i + 3]);
info("\n");
}
- if ((rx[3] == 9) && (rx[12] & 0x01)) {
+ if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, rx[6], rx[7]);
+ __func__, state->data[6], state->data[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
rc_keyup(d->rc_dev);
state->last_rc_key = 0;
}
-
- return 0;
+ret:
+ mutex_unlock(&state->ca_mutex);
+ return ret;
}
static int pctv452e_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index d9f3262bf071..4706628a3ed5 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -89,9 +89,13 @@ struct technisat_usb2_state {
static int technisat_usb2_i2c_access(struct usb_device *udev,
u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
{
- u8 b[64];
+ u8 *b;
int ret, actual_length;
+ b = kmalloc(64, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
deb_i2c("i2c-access: %02x, tx: ", device_addr);
debug_dump(tx, txlen, deb_i2c);
deb_i2c(" ");
@@ -123,7 +127,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (ret < 0) {
err("i2c-error: out failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
ret = usb_bulk_msg(udev,
@@ -131,7 +135,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
b, 64, &actual_length, 1000);
if (ret < 0) {
err("i2c-error: in failed %02x = %d", device_addr, ret);
- return -ENODEV;
+ goto err;
}
if (b[0] != I2C_STATUS_OK) {
@@ -140,7 +144,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (!(b[0] == I2C_STATUS_NAK &&
device_addr == 0x60
/* && device_is_technisat_usb2 */))
- return -ENODEV;
+ goto err;
}
deb_i2c("status: %d, ", b[0]);
@@ -154,7 +158,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
deb_i2c("\n");
- return 0;
+err:
+ kfree(b);
+ return ret;
}
static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 78f3687772bf..e11fe46a547c 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -695,7 +695,7 @@ static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
/*
* register/unregister code and data
*/
-static struct snd_pcm_ops snd_em28xx_pcm_capture = {
+static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
.open = snd_em28xx_capture_open,
.close = snd_em28xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 1a9e1e556706..8b690ac908a4 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -855,7 +855,7 @@ static u32 functionality(struct i2c_adapter *i2c_adap)
return 0;
}
-static struct i2c_algorithm em28xx_algo = {
+static const struct i2c_algorithm em28xx_algo = {
.master_xfer = em28xx_i2c_xfer,
.functionality = functionality,
};
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 7968695217f3..1f7fa059eb34 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1204,7 +1204,7 @@ buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static struct vb2_ops em28xx_video_qops = {
+static const struct vb2_ops em28xx_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
index 55addfa855d4..c084bf794b56 100644
--- a/drivers/media/usb/go7007/go7007-i2c.c
+++ b/drivers/media/usb/go7007/go7007-i2c.c
@@ -191,7 +191,7 @@ static u32 go7007_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_SMBUS_BYTE_DATA;
}
-static struct i2c_algorithm go7007_algo = {
+static const struct i2c_algorithm go7007_algo = {
.smbus_xfer = go7007_smbus_xfer,
.master_xfer = go7007_i2c_master_xfer,
.functionality = go7007_functionality,
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index 14d3f8c1ce4a..ed9bcaf08d5e 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1032,7 +1032,7 @@ static u32 go7007_usb_functionality(struct i2c_adapter *adapter)
return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK;
}
-static struct i2c_algorithm go7007_usb_algo = {
+static const struct i2c_algorithm go7007_usb_algo = {
.master_xfer = go7007_usb_i2c_master_xfer,
.functionality = go7007_usb_functionality,
};
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index af8458996d91..4eaba0c24629 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -477,7 +477,7 @@ static void go7007_stop_streaming(struct vb2_queue *q)
go7007_write_addr(go, 0x3c82, 0x000d);
}
-static struct vb2_ops go7007_video_qops = {
+static const struct vb2_ops go7007_video_qops = {
.queue_setup = go7007_queue_setup,
.buf_queue = go7007_buf_queue,
.buf_prepare = go7007_buf_prepare,
diff --git a/drivers/media/usb/go7007/snd-go7007.c b/drivers/media/usb/go7007/snd-go7007.c
index d22d7d574672..070871fb1fc4 100644
--- a/drivers/media/usb/go7007/snd-go7007.c
+++ b/drivers/media/usb/go7007/snd-go7007.c
@@ -198,7 +198,7 @@ static struct page *go7007_snd_pcm_page(struct snd_pcm_substream *substream,
return vmalloc_to_page(substream->runtime->dma_area + offset);
}
-static struct snd_pcm_ops go7007_snd_capture_ops = {
+static const struct snd_pcm_ops go7007_snd_capture_ops = {
.open = go7007_snd_capture_open,
.close = go7007_snd_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/gspca/finepix.c b/drivers/media/usb/gspca/finepix.c
index 52bdb569760b..ae9a55d7bbbb 100644
--- a/drivers/media/usb/gspca/finepix.c
+++ b/drivers/media/usb/gspca/finepix.c
@@ -41,7 +41,6 @@ struct usb_fpix {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct work_struct work_struct;
- struct workqueue_struct *work_thread;
};
/* Delay after which claim the next frame. If the delay is too small,
@@ -226,9 +225,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* Again, reset bulk in endpoint */
usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe);
- /* Start the workqueue function to do the streaming */
- dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
- queue_work(dev->work_thread, &dev->work_struct);
+ schedule_work(&dev->work_struct);
return 0;
}
@@ -241,9 +238,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* wait for the work queue to terminate */
mutex_unlock(&gspca_dev->usb_lock);
- destroy_workqueue(dev->work_thread);
+ flush_work(&dev->work_struct);
mutex_lock(&gspca_dev->usb_lock);
- dev->work_thread = NULL;
}
/* Table of supported USB devices */
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index 5b481fa43099..ac295f04bd18 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -45,7 +45,6 @@ struct sd {
const struct v4l2_pix_format *cap_mode;
/* Driver stuff */
struct work_struct work_struct;
- struct workqueue_struct *work_thread;
u8 frame_brightness;
int block_size; /* block size of camera */
int vga; /* 1 if vga cam, 0 if cif cam */
@@ -477,9 +476,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -1;
}
- /* Start the workqueue function to do the streaming */
- sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
- queue_work(sd->work_thread, &sd->work_struct);
+ schedule_work(&sd->work_struct);
return 0;
}
@@ -493,8 +490,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* wait for the work queue to terminate */
mutex_unlock(&gspca_dev->usb_lock);
/* This waits for sq905c_dostream to finish */
- destroy_workqueue(dev->work_thread);
- dev->work_thread = NULL;
+ flush_work(&dev->work_struct);
mutex_lock(&gspca_dev->usb_lock);
}
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index fd1c8706d86a..d49d76ec1421 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -54,7 +54,6 @@ struct sd {
u32 exposure;
struct work_struct work;
- struct workqueue_struct *work_thread;
u32 pktsz; /* (used by pkt_scan) */
u16 npkt;
@@ -2485,7 +2484,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->pktsz = sd->npkt = 0;
sd->nchg = sd->short_mark = 0;
- sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
return gspca_dev->usb_err;
}
@@ -2569,12 +2567,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->work_thread != NULL) {
- mutex_unlock(&gspca_dev->usb_lock);
- destroy_workqueue(sd->work_thread);
- mutex_lock(&gspca_dev->usb_lock);
- sd->work_thread = NULL;
- }
+ mutex_unlock(&gspca_dev->usb_lock);
+ flush_work(&sd->work);
+ mutex_lock(&gspca_dev->usb_lock);
}
static void do_autogain(struct gspca_dev *gspca_dev)
@@ -2785,7 +2780,7 @@ marker_found:
new_qual = QUALITY_MAX;
if (new_qual != sd->quality) {
sd->quality = new_qual;
- queue_work(sd->work_thread, &sd->work);
+ schedule_work(&sd->work);
}
}
} else {
diff --git a/drivers/media/usb/gspca/vicam.c b/drivers/media/usb/gspca/vicam.c
index 103f6c4236b0..8860510c2f9c 100644
--- a/drivers/media/usb/gspca/vicam.c
+++ b/drivers/media/usb/gspca/vicam.c
@@ -47,7 +47,6 @@ MODULE_FIRMWARE(VICAM_FIRMWARE);
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct work_struct work_struct;
- struct workqueue_struct *work_thread;
};
/* The vicam sensor has a resolution of 512 x 244, with I believe square
@@ -278,9 +277,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (ret < 0)
return ret;
- /* Start the workqueue function to do the streaming */
- sd->work_thread = create_singlethread_workqueue(MODULE_NAME);
- queue_work(sd->work_thread, &sd->work_struct);
+ schedule_work(&sd->work_struct);
return 0;
}
@@ -294,8 +291,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* wait for the work queue to terminate */
mutex_unlock(&gspca_dev->usb_lock);
/* This waits for vicam_dostream to finish */
- destroy_workqueue(dev->work_thread);
- dev->work_thread = NULL;
+ flush_work(&dev->work_struct);
mutex_lock(&gspca_dev->usb_lock);
if (gspca_dev->present)
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index c2c8d12e9498..d9a525260511 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -129,7 +129,7 @@ struct hackrf_dev {
struct list_head rx_buffer_list;
struct list_head tx_buffer_list;
spinlock_t buffer_list_lock; /* Protects buffer_list */
- unsigned sequence; /* Buffer sequence counter */
+ unsigned int sequence; /* Buffer sequence counter */
unsigned int vb_full; /* vb is full and packets dropped */
unsigned int vb_empty; /* vb is empty and packets dropped */
@@ -891,7 +891,7 @@ static void hackrf_stop_streaming(struct vb2_queue *vq)
mutex_unlock(&dev->v4l2_lock);
}
-static struct vb2_ops hackrf_vb2_ops = {
+static const struct vb2_ops hackrf_vb2_ops = {
.queue_setup = hackrf_queue_setup,
.buf_queue = hackrf_buf_queue,
.start_streaming = hackrf_start_streaming,
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index a38f58c4c6bf..9b641c4d4431 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -55,7 +55,7 @@ struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5;
+ init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32;
init_data->name = "HD-PVR";
init_data->polling_interval = 405; /* ms, duplicated from Windows */
hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
@@ -180,7 +180,7 @@ static u32 hdpvr_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm hdpvr_algo = {
+static const struct i2c_algorithm hdpvr_algo = {
.master_xfer = hdpvr_transfer,
.functionality = hdpvr_functionality,
};
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 6d43d75493ea..474c11e1d495 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 367eb7e2a31d..bb3d31e2a0b5 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -897,7 +897,7 @@ static void msi2500_stop_streaming(struct vb2_queue *vq)
mutex_unlock(&dev->v4l2_lock);
}
-static struct vb2_ops msi2500_vb2_ops = {
+static const struct vb2_ops msi2500_vb2_ops = {
.queue_setup = msi2500_queue_setup,
.buf_queue = msi2500_buf_queue,
.start_streaming = msi2500_start_streaming,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
index 60141b16d731..23473a21319c 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw-internal.h
@@ -170,7 +170,6 @@ struct pvr2_hdw {
const struct pvr2_device_desc *hdw_desc;
/* Kernel worker thread handling */
- struct workqueue_struct *workqueue;
struct work_struct workpoll; /* Update driver state */
/* Video spigot */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index fe20fe4f2330..1eb4f7ba2967 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2624,7 +2624,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
hdw->name[cnt1] = 0;
- hdw->workqueue = create_singlethread_workqueue(hdw->name);
INIT_WORK(&hdw->workpoll,pvr2_hdw_worker_poll);
pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
@@ -2651,11 +2650,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
del_timer_sync(&hdw->decoder_stabilization_timer);
del_timer_sync(&hdw->encoder_run_timer);
del_timer_sync(&hdw->encoder_wait_timer);
- if (hdw->workqueue) {
- flush_workqueue(hdw->workqueue);
- destroy_workqueue(hdw->workqueue);
- hdw->workqueue = NULL;
- }
+ flush_work(&hdw->workpoll);
usb_free_urb(hdw->ctl_read_urb);
usb_free_urb(hdw->ctl_write_urb);
kfree(hdw->ctl_read_buffer);
@@ -2712,11 +2707,7 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
{
if (!hdw) return;
pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw);
- if (hdw->workqueue) {
- flush_workqueue(hdw->workqueue);
- destroy_workqueue(hdw->workqueue);
- hdw->workqueue = NULL;
- }
+ flush_work(&hdw->workpoll);
del_timer_sync(&hdw->quiescent_timer);
del_timer_sync(&hdw->decoder_stabilization_timer);
del_timer_sync(&hdw->encoder_run_timer);
@@ -4443,7 +4434,7 @@ static void pvr2_hdw_quiescent_timeout(unsigned long data)
hdw->state_decoder_quiescent = !0;
trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent);
hdw->state_stale = !0;
- queue_work(hdw->workqueue,&hdw->workpoll);
+ schedule_work(&hdw->workpoll);
}
@@ -4454,7 +4445,7 @@ static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data)
hdw->state_decoder_ready = !0;
trace_stbit("state_decoder_ready", hdw->state_decoder_ready);
hdw->state_stale = !0;
- queue_work(hdw->workqueue, &hdw->workpoll);
+ schedule_work(&hdw->workpoll);
}
@@ -4465,7 +4456,7 @@ static void pvr2_hdw_encoder_wait_timeout(unsigned long data)
hdw->state_encoder_waitok = !0;
trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok);
hdw->state_stale = !0;
- queue_work(hdw->workqueue,&hdw->workpoll);
+ schedule_work(&hdw->workpoll);
}
@@ -4477,7 +4468,7 @@ static void pvr2_hdw_encoder_run_timeout(unsigned long data)
hdw->state_encoder_runok = !0;
trace_stbit("state_encoder_runok",hdw->state_encoder_runok);
hdw->state_stale = !0;
- queue_work(hdw->workqueue,&hdw->workpoll);
+ schedule_work(&hdw->workpoll);
}
}
@@ -4991,7 +4982,7 @@ static void pvr2_hdw_state_sched(struct pvr2_hdw *hdw)
if (hdw->state_stale) return;
hdw->state_stale = !0;
trace_stbit("state_stale",hdw->state_stale);
- queue_work(hdw->workqueue,&hdw->workpoll);
+ schedule_work(&hdw->workpoll);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 14321d0a1833..6da5fb544817 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -596,7 +596,8 @@ static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5;
+ init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
+ RC_BIT_RC6_6A_32;
init_data->name = hdw->hdw_desc->description;
/* IR Receiver */
info.addr = 0x71;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 81f788b7b242..2cc4d2b6f810 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -719,64 +719,85 @@ static int pvr2_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap)
return ret;
}
-static int pvr2_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+static int pvr2_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_cropcap cap;
int val = 0;
int ret;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
- if (ret != 0)
- return -EINVAL;
- crop->c.left = val;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
- if (ret != 0)
- return -EINVAL;
- crop->c.top = val;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
- if (ret != 0)
- return -EINVAL;
- crop->c.width = val;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
- if (ret != 0)
+
+ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
+ if (ret != 0)
+ return -EINVAL;
+ sel->r.left = val;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
+ if (ret != 0)
+ return -EINVAL;
+ sel->r.top = val;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
+ if (ret != 0)
+ return -EINVAL;
+ sel->r.width = val;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
+ if (ret != 0)
+ return -EINVAL;
+ sel->r.height = val;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ ret = pvr2_hdw_get_cropcap(hdw, &cap);
+ sel->r = cap.defrect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ ret = pvr2_hdw_get_cropcap(hdw, &cap);
+ sel->r = cap.bounds;
+ break;
+ default:
return -EINVAL;
- crop->c.height = val;
- return 0;
+ }
+ return ret;
}
-static int pvr2_s_crop(struct file *file, void *priv, const struct v4l2_crop *crop)
+static int pvr2_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL),
- crop->c.left);
+ sel->r.left);
if (ret != 0)
return -EINVAL;
ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT),
- crop->c.top);
+ sel->r.top);
if (ret != 0)
return -EINVAL;
ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW),
- crop->c.width);
+ sel->r.width);
if (ret != 0)
return -EINVAL;
ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH),
- crop->c.height);
+ sel->r.height);
if (ret != 0)
return -EINVAL;
return 0;
@@ -798,8 +819,8 @@ static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
.vidioc_enumaudio = pvr2_enumaudio,
.vidioc_enum_input = pvr2_enum_input,
.vidioc_cropcap = pvr2_cropcap,
- .vidioc_s_crop = pvr2_s_crop,
- .vidioc_g_crop = pvr2_g_crop,
+ .vidioc_s_selection = pvr2_s_selection,
+ .vidioc_g_selection = pvr2_g_selection,
.vidioc_g_input = pvr2_g_input,
.vidioc_s_input = pvr2_s_input,
.vidioc_g_frequency = pvr2_g_frequency,
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index c4454c928776..ff657644b6b3 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -707,7 +707,7 @@ static void stop_streaming(struct vb2_queue *vq)
mutex_unlock(&pdev->v4l2_lock);
}
-static struct vb2_ops pwc_vb_queue_ops = {
+static const struct vb2_ops pwc_vb_queue_ops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 9458eb0ef66f..f7bb78c1873c 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -717,7 +717,7 @@ static void buffer_queue(struct vb2_buffer *vb)
static int start_streaming(struct vb2_queue *vq, unsigned int count);
static void stop_streaming(struct vb2_queue *vq);
-static struct vb2_ops s2255_video_qops = {
+static const struct vb2_ops s2255_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -1901,19 +1901,30 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request,
s32 TransferBufferLength, int bOut)
{
int r;
+ unsigned char *buf;
+
+ buf = kmalloc(TransferBufferLength, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
if (!bOut) {
r = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
Request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
+
+ if (r >= 0)
+ memcpy(TransferBuffer, buf, TransferBufferLength);
} else {
+ memcpy(buf, TransferBuffer, TransferBufferLength);
r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- Value, Index, TransferBuffer,
+ Value, Index, buf,
TransferBufferLength, HZ * 5);
}
+ kfree(buf);
return r;
}
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 850cf285ada8..3f2517be02bb 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -235,7 +235,7 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm algo = {
+static const struct i2c_algorithm algo = {
.master_xfer = stk1160_i2c_xfer,
.functionality = functionality,
};
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 5fab3bee8c74..a005d262392a 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -742,7 +742,7 @@ static void stop_streaming(struct vb2_queue *vq)
stk1160_stop_streaming(dev);
}
-static struct vb2_ops stk1160_video_qops = {
+static const struct vb2_ops stk1160_video_qops = {
.queue_setup = queue_setup,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index db200c9d796d..22a9aae16291 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -147,20 +147,26 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
{
struct usb_device *udev = dev->udev;
+ unsigned char *buf;
int ret;
+ buf = kmalloc(sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x00,
index,
- (u8 *) value,
+ buf,
sizeof(u8),
500);
- if (ret < 0)
- return ret;
- else
- return 0;
+ if (ret >= 0)
+ memcpy(value, buf, sizeof(u8));
+
+ kfree(buf);
+ return ret;
}
static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index e21c7aacecb6..f16fbd1f9f51 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -388,7 +388,7 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
/*
* operators
*/
-static struct snd_pcm_ops snd_tm6000_pcm_ops = {
+static const struct snd_pcm_ops snd_tm6000_pcm_ops = {
.open = snd_tm6000_pcm_open,
.close = snd_tm6000_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e36e24cb3a6..4e7671a3a1e4 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -206,7 +206,7 @@ static void ttusb_dec_set_model(struct ttusb_dec *dec,
static void ttusb_dec_handle_irq( struct urb *urb)
{
- struct ttusb_dec * dec = urb->context;
+ struct ttusb_dec *dec = urb->context;
char *buffer = dec->irq_buffer;
int retval;
@@ -227,25 +227,31 @@ static void ttusb_dec_handle_irq( struct urb *urb)
goto exit;
}
- if( (buffer[0] == 0x1) && (buffer[2] == 0x15) ) {
- /* IR - Event */
- /* this is an fact a bit too simple implementation;
+ if ((buffer[0] == 0x1) && (buffer[2] == 0x15)) {
+ /*
+ * IR - Event
+ *
+ * this is an fact a bit too simple implementation;
* the box also reports a keyrepeat signal
* (with buffer[3] == 0x40) in an intervall of ~100ms.
* But to handle this correctly we had to imlemenent some
* kind of timer which signals a 'key up' event if no
* keyrepeat signal is received for lets say 200ms.
* this should/could be added later ...
- * for now lets report each signal as a key down and up*/
- dprintk("%s:rc signal:%d\n", __func__, buffer[4]);
- input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
- input_sync(dec->rc_input_dev);
- input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
- input_sync(dec->rc_input_dev);
+ * for now lets report each signal as a key down and up
+ */
+ if (buffer[4] - 1 < ARRAY_SIZE(rc_keys)) {
+ dprintk("%s:rc signal:%d\n", __func__, buffer[4]);
+ input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 1);
+ input_sync(dec->rc_input_dev);
+ input_report_key(dec->rc_input_dev, rc_keys[buffer[4] - 1], 0);
+ input_sync(dec->rc_input_dev);
+ }
}
-exit: retval = usb_submit_urb(urb, GFP_ATOMIC);
- if(retval)
+exit:
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
printk("%s - usb_commit_urb failed with result: %d\n",
__func__, retval);
}
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 1965ff1b1f12..9db31db7d9ac 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -332,7 +332,7 @@ static snd_pcm_uframes_t snd_usbtv_pointer(struct snd_pcm_substream *substream)
return chip->snd_buffer_pos;
}
-static struct snd_pcm_ops snd_usbtv_pcm_ops = {
+static const struct snd_pcm_ops snd_usbtv_pcm_ops = {
.open = snd_usbtv_pcm_open,
.close = snd_usbtv_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 2a089756c988..6cbe4a245c9f 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -689,7 +689,7 @@ static void usbtv_stop_streaming(struct vb2_queue *vq)
usbtv_stop(usbtv);
}
-static struct vb2_ops usbtv_vb2_ops = {
+static const struct vb2_ops usbtv_vb2_ops = {
.queue_setup = usbtv_queue_setup,
.buf_queue = usbtv_buf_queue,
.start_streaming = usbtv_start_streaming,
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 773fefb52d7a..77edd206d345 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -177,7 +177,7 @@ static void uvc_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&queue->irqlock, flags);
}
-static struct vb2_ops uvc_queue_qops = {
+static const struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 29b3436d0910..367523a3c774 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
- depends on PCI && BUILD_DOCSRC
+ depends on PCI
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
---help---
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index a4b224d92572..5bada202b2d3 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -119,13 +119,6 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
return ret;
}
- ret = v4l2_subdev_call(sd, core, registered_async);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
- return ret;
- }
-
if (list_empty(&notifier->waiting) && notifier->complete)
return notifier->complete(notifier);
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 5b808500e7e7..57cfe26a393f 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -291,7 +291,7 @@ struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
- if (spi && sd == NULL)
+ if (!sd)
spi_unregister_device(spi);
return sd;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index f7abfad9ad23..adc2147fcff7 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -361,6 +361,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Scalable Baseline",
"Scalable High",
"Scalable High Intra",
+ "Stereo High",
"Multiview High",
NULL,
};
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index e6da353b39bc..8be561ab2615 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -527,6 +527,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -573,7 +574,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
- if (is_vid) {
+ if (is_vid || is_tch) {
/* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
ops->vidioc_enum_fmt_vid_cap_mplane ||
@@ -662,7 +663,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
}
- if (is_vid || is_vbi || is_sdr) {
+ if (is_vid || is_vbi || is_sdr || is_tch) {
/* ioctls valid for video, vbi or sdr */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
@@ -675,7 +676,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi) {
+ if (is_vid || is_vbi || is_tch) {
/* ioctls valid for video or vbi */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
@@ -751,6 +752,10 @@ static int video_register_media_controller(struct video_device *vdev, int type)
intf_type = MEDIA_INTF_T_V4L_SWRADIO;
vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO;
break;
+ case VFL_TYPE_TOUCH:
+ intf_type = MEDIA_INTF_T_V4L_TOUCH;
+ vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+ break;
case VFL_TYPE_RADIO:
intf_type = MEDIA_INTF_T_V4L_RADIO;
/*
@@ -854,6 +859,9 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Use device name 'swradio' because 'sdr' was already taken. */
name_base = "swradio";
break;
+ case VFL_TYPE_TOUCH:
+ name_base = "v4l-touch";
+ break;
default:
printk(KERN_ERR "%s called with unknown type: %d\n",
__func__, type);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 06fa5f1b2cff..62bbed76dbbc 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -160,12 +160,9 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
int err;
/* Check for valid input */
- if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
+ if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0])
return -EINVAL;
- /* Warn if we apparently re-register a subdev */
- WARN_ON(sd->v4l2_dev != NULL);
-
/*
* The reason to acquire the module here is to avoid unloading
* a module of sub-device which is registered to a media
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 889de0a32152..730a7c392c1d 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -306,7 +306,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
bt->il_vsync, bt->il_vbackporch);
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
- pr_info("%s: flags (0x%x):%s%s%s%s%s%s\n", dev_prefix, bt->flags,
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s\n", dev_prefix, bt->flags,
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
" REDUCED_BLANKING" : "",
((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
@@ -318,12 +318,15 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
(bt->flags & V4L2_DV_FL_HALF_LINE) ?
" HALF_LINE" : "",
(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
- " CE_VIDEO" : "");
- pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
+ " CE_VIDEO" : "",
+ (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
+ " FIRST_FIELD_EXTRA_LINE" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
(bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
- (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "");
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
+ (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
}
EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51a0fa144392..c52d94c018bb 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -924,6 +924,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -932,7 +933,7 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (is_vid && is_rx &&
+ if ((is_vid || is_tch) && is_rx &&
(ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
return 0;
break;
@@ -1243,6 +1244,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break;
case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break;
+ case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit signed deltas"; break;
+ case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit signed deltas"; break;
+ case V4L2_TCH_FMT_TU16: descr = "16-bit unsigned touch data"; break;
+ case V4L2_TCH_FMT_TU08: descr = "8-bit unsigned touch data"; break;
default:
/* Compressed formats */
@@ -1309,13 +1314,14 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret = -EINVAL;
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_vid_cap))
+ if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_enum_fmt_vid_cap))
break;
ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
break;
@@ -1362,6 +1368,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret;
@@ -1392,7 +1399,7 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_vid_cap))
+ if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_g_fmt_vid_cap))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
@@ -1451,6 +1458,21 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1458,6 +1480,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret;
@@ -1469,17 +1492,19 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap))
+ if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_s_fmt_vid_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.pix);
ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (is_tch)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_cap_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_vid_overlay))
@@ -1507,7 +1532,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!is_tx || !is_vid || !ops->vidioc_s_fmt_vid_out_overlay))
@@ -1545,6 +1570,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret;
@@ -1553,7 +1579,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
switch (p->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap))
+ if (unlikely(!is_rx || (!is_vid && !is_tch) || !ops->vidioc_try_fmt_vid_cap))
break;
CLEAR_AFTER_FIELD(p, fmt.pix);
ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
@@ -1563,7 +1589,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_cap_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_vid_overlay))
@@ -1591,7 +1617,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_mplane))
break;
- CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!is_tx || !is_vid || !ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 61d56c940f80..6bc27e7b2a33 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -76,9 +76,6 @@ static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
return &m2m_ctx->cap_q_ctx;
}
-/**
- * v4l2_m2m_get_vq() - return vb2_queue for the given type
- */
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
@@ -92,9 +89,6 @@ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
-/**
- * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
- */
void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
@@ -113,10 +107,6 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
-/**
- * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
- * return it
- */
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b;
@@ -140,10 +130,6 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
* Scheduling handlers
*/
-/**
- * v4l2_m2m_get_curr_priv() - return driver private data for the currently
- * running instance or NULL if no instance is running
- */
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
@@ -188,26 +174,6 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
-/**
- * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
- * the pending job queue and add it if so.
- * @m2m_ctx: m2m context assigned to the instance to be checked
- *
- * There are three basic requirements an instance has to meet to be able to run:
- * 1) at least one source buffer has to be queued,
- * 2) at least one destination buffer has to be queued,
- * 3) streaming has to be on.
- *
- * If a queue is buffered (for example a decoder hardware ringbuffer that has
- * to be drained before doing streamoff), allow scheduling without v4l2 buffers
- * on that queue.
- *
- * There may also be additional, custom requirements. In such case the driver
- * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
- * return 1 if the instance is ready.
- * An example of the above could be an instance that requires more than one
- * src/dst buffer per transaction.
- */
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
@@ -311,18 +277,6 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-/**
- * v4l2_m2m_job_finish() - inform the framework that a job has been finished
- * and have it clean up
- *
- * Called by a driver to yield back the device after it has finished with it.
- * Should be called as soon as possible after reaching a state which allows
- * other instances to take control of the device.
- *
- * This function has to be called only after device_run() callback has been
- * called on the driver. To prevent recursion, it should not be called directly
- * from the device_run() callback though.
- */
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
@@ -350,9 +304,6 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
-/**
- * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
- */
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -370,11 +321,6 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
-/**
- * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
- *
- * See v4l2_m2m_mmap() documentation for details.
- */
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
@@ -400,10 +346,6 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
-/**
- * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
@@ -419,10 +361,6 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
-/**
- * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
@@ -433,10 +371,6 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
-/**
- * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
@@ -452,10 +386,6 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
-/**
- * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
- * on the type
- */
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_create_buffers *create)
{
@@ -466,10 +396,6 @@ int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
-/**
- * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
- * the type
- */
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_exportbuffer *eb)
{
@@ -479,9 +405,7 @@ int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
return vb2_expbuf(vq, eb);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
-/**
- * v4l2_m2m_streamon() - turn on streaming for a video queue
- */
+
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
@@ -497,9 +421,6 @@ int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
-/**
- * v4l2_m2m_streamoff() - turn off streaming for a video queue
- */
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
@@ -540,14 +461,6 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-/**
- * v4l2_m2m_poll() - poll replacement, for destination buffers only
- *
- * Call from the driver's poll() function. Will poll both queues. If a buffer
- * is available to dequeue (with dqbuf) from the source queue, this will
- * indicate that a non-blocking write can be performed, while read will be
- * returned in case of the destination queue.
- */
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
@@ -626,16 +539,6 @@ end:
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
-/**
- * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
- *
- * Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
- * thus applications) receive modified offsets.
- */
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma)
{
@@ -653,11 +556,6 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
-/**
- * v4l2_m2m_init() - initialize per-driver m2m data
- *
- * Usually called from driver's probe() function.
- */
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
@@ -679,26 +577,12 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_init);
-/**
- * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
- *
- * Usually called from driver's remove() function.
- */
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
{
kfree(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_release);
-/**
- * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
- * @priv - driver's instance private data
- * @m2m_dev - a previously initialized m2m_dev struct
- * @vq_init - a callback for queue type-specific initialization function to be
- * used for initializing videobuf_queues
- *
- * Usually called from driver's open() function.
- */
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
@@ -744,11 +628,6 @@ err:
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
-/**
- * v4l2_m2m_ctx_release() - release m2m context
- *
- * Usually called from driver's release() function.
- */
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
/* wait until the current context is dequeued from job_queue */
@@ -761,11 +640,6 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
-/**
- * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
- *
- * Call from buf_queue(), videobuf_queue_ops callback.
- */
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf)
{
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index ca8ffeb56d72..21900202ff83 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -198,6 +198,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
void *mem_priv;
int plane;
+ int ret = -ENOMEM;
/*
* Allocate memory for all planes in this buffer
@@ -209,8 +210,11 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
q->dma_attrs, size, dma_dir, q->gfp_flags);
- if (IS_ERR_OR_NULL(mem_priv))
+ if (IS_ERR(mem_priv)) {
+ if (mem_priv)
+ ret = PTR_ERR(mem_priv);
goto free;
+ }
/* Associate allocator private data with this plane */
vb->planes[plane].mem_priv = mem_priv;
@@ -224,7 +228,7 @@ free:
vb->planes[plane - 1].mem_priv = NULL;
}
- return -ENOMEM;
+ return ret;
}
/**
@@ -524,10 +528,6 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
return 0;
}
-/**
- * vb2_buffer_in_use() - return true if the buffer is in use and
- * the queue cannot be freed (by the means of REQBUFS(0)) call
- */
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
{
unsigned int plane;
@@ -560,16 +560,6 @@ static bool __buffers_in_use(struct vb2_queue *q)
return false;
}
-/**
- * vb2_core_querybuf() - query video buffer information
- * @q: videobuf queue
- * @index: id number of the buffer
- * @pb: buffer struct passed from userspace
- *
- * Should be called from vidioc_querybuf ioctl handler in driver.
- * The passed buffer should have been verified.
- * This function fills the relevant information for the userspace.
- */
void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
{
call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
@@ -616,10 +606,6 @@ static int __verify_dmabuf_ops(struct vb2_queue *q)
return 0;
}
-/**
- * vb2_verify_memory_type() - Check whether the memory type and buffer type
- * passed to a buffer operation are compatible with the queue.
- */
int vb2_verify_memory_type(struct vb2_queue *q,
enum vb2_memory memory, unsigned int type)
{
@@ -666,30 +652,6 @@ int vb2_verify_memory_type(struct vb2_queue *q,
}
EXPORT_SYMBOL(vb2_verify_memory_type);
-/**
- * vb2_core_reqbufs() - Initiate streaming
- * @q: videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
- *
- * Should be called from vidioc_reqbufs ioctl handler of a driver.
- * This function:
- * 1) verifies streaming parameters passed from the userspace,
- * 2) sets up the queue,
- * 3) negotiates number of buffers and planes per buffer with the driver
- * to be used during streaming,
- * 4) allocates internal buffer structures (struct vb2_buffer), according to
- * the agreed parameters,
- * 5) for MMAP memory type, allocates actual video memory, using the
- * memory handling/allocation routines provided during queue initialization
- *
- * If req->count is 0, all the memory will be freed instead.
- * If the queue has been allocated previously (by a previous vb2_reqbufs) call
- * and the queue is not busy, memory will be reallocated.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_reqbufs handler in driver.
- */
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int *count)
{
@@ -815,22 +777,6 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
-/**
- * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
- * @q: videobuf2 queue
- * @memory: memory type
- * @count: requested buffer count
- * @parg: parameter passed to device driver
- *
- * Should be called from vidioc_create_bufs ioctl handler of a driver.
- * This function:
- * 1) verifies parameter sanity
- * 2) calls the .queue_setup() queue operation
- * 3) performs any necessary memory allocations
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_create_bufs handler in driver.
- */
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
unsigned int *count, unsigned requested_planes,
const unsigned requested_sizes[])
@@ -920,14 +866,6 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
}
EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
-/**
- * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the address is to be returned
- *
- * This function returns a kernel virtual address of a given plane if
- * such a mapping exist, NULL otherwise.
- */
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
@@ -938,17 +876,6 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
-/**
- * vb2_plane_cookie() - Return allocator specific cookie for the given plane
- * @vb: vb2_buffer to which the plane in question belongs to
- * @plane_no: plane number for which the cookie is to be returned
- *
- * This function returns an allocator specific cookie for a given plane if
- * available, NULL otherwise. The allocator should provide some simple static
- * inline function, which would convert this cookie to the allocator specific
- * type that can be used directly by the driver to access the buffer. This can
- * be for example physical address, pointer to scatter list or IOMMU mapping.
- */
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
{
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
@@ -958,26 +885,6 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
-/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
- * @vb: vb2_buffer returned from the driver
- * @state: either VB2_BUF_STATE_DONE if the operation finished successfully,
- * VB2_BUF_STATE_ERROR if the operation finished with an error or
- * VB2_BUF_STATE_QUEUED if the driver wants to requeue buffers.
- * If start_streaming fails then it should return buffers with state
- * VB2_BUF_STATE_QUEUED to put them back into the queue.
- *
- * This function should be called by the driver after a hardware operation on
- * a buffer is finished and the buffer may be returned to userspace. The driver
- * cannot use this buffer anymore until it is queued back to it by videobuf
- * by the means of buf_queue callback. Only buffers previously queued to the
- * driver by buf_queue can be passed to this function.
- *
- * While streaming a buffer can only be returned in state DONE or ERROR.
- * The start_streaming op can also return them in case the DMA engine cannot
- * be started for some reason. In that case the buffers should be returned with
- * state QUEUED.
- */
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
@@ -1036,18 +943,6 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
-/**
- * vb2_discard_done() - discard all buffers marked as DONE
- * @q: videobuf2 queue
- *
- * This function is intended to be used with suspend/resume operations. It
- * discards all 'done' buffers as they would be too old to be requested after
- * resume.
- *
- * Drivers must stop the hardware and synchronize with interrupt handlers and/or
- * delayed works before calling this function to make sure no buffer will be
- * touched by the driver and/or hardware.
- */
void vb2_discard_done(struct vb2_queue *q)
{
struct vb2_buffer *vb;
@@ -1136,10 +1031,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
q->alloc_devs[plane] ? : q->dev,
planes[plane].m.userptr,
planes[plane].length, dma_dir);
- if (IS_ERR_OR_NULL(mem_priv)) {
+ if (IS_ERR(mem_priv)) {
dprintk(1, "failed acquiring userspace "
"memory for plane %d\n", plane);
- ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
+ ret = PTR_ERR(mem_priv);
goto err;
}
vb->planes[plane].mem_priv = mem_priv;
@@ -1228,8 +1123,10 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
planes[plane].length = dbuf->size;
if (planes[plane].length < vb->planes[plane].min_length) {
- dprintk(1, "invalid dmabuf length for plane %d\n",
- plane);
+ dprintk(1, "invalid dmabuf length %u for plane %d, "
+ "minimum length %u\n",
+ planes[plane].length, plane,
+ vb->planes[plane].min_length);
dma_buf_put(dbuf);
ret = -EINVAL;
goto err;
@@ -1271,9 +1168,10 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
vb->planes[plane].mem_priv = mem_priv;
}
- /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
- * really we want to do this just before the DMA, not while queueing
- * the buffer(s)..
+ /*
+ * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+ * here instead just before the DMA, while queueing the buffer(s) so
+ * userspace knows sooner rather than later if the dma-buf map fails.
*/
for (plane = 0; plane < vb->num_planes; ++plane) {
ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
@@ -1377,22 +1275,6 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
return ret;
}
-/**
- * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
- * to the kernel
- * @q: videobuf2 queue
- * @index: id number of the buffer
- * @pb: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function calls buf_prepare callback in the driver (if provided),
- * in which driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
{
struct vb2_buffer *vb;
@@ -1481,24 +1363,6 @@ static int vb2_start_streaming(struct vb2_queue *q)
return ret;
}
-/**
- * vb2_core_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @index: id number of the buffer
- * @pb: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function:
- * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
{
struct vb2_buffer *vb;
@@ -1679,15 +1543,6 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
return ret;
}
-/**
- * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
- * @q: videobuf2 queue
- *
- * This function will wait until all buffers that have been given to the driver
- * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
- * wait_prepare, wait_finish pair. It is intended to be called with all locks
- * taken, for example from stop_streaming() callback.
- */
int vb2_wait_for_all_buffers(struct vb2_queue *q)
{
if (!q->streaming) {
@@ -1725,27 +1580,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
}
}
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * The passed buffer should have been verified.
- * This function:
- * 1) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 2) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
bool nonblocking)
{
@@ -1909,19 +1743,6 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
}
EXPORT_SYMBOL_GPL(vb2_core_streamon);
-/**
- * vb2_queue_error() - signal a fatal error on the queue
- * @q: videobuf2 queue
- *
- * Flag that a fatal unrecoverable error has occurred and wake up all processes
- * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
- * buffers will return -EIO.
- *
- * The error flag will be cleared when cancelling the queue, either from
- * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
- * function before starting the stream, otherwise the error flag will remain set
- * until the queue is released when closing the device node.
- */
void vb2_queue_error(struct vb2_queue *q)
{
q->error = 1;
@@ -1984,19 +1805,6 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
return -EINVAL;
}
-/**
- * vb2_core_expbuf() - Export a buffer as a file descriptor
- * @q: videobuf2 queue
- * @fd: file descriptor associated with DMABUF (set by driver) *
- * @type: buffer type
- * @index: id number of the buffer
- * @plane: index of the plane to be exported, 0 for single plane queues
- * @flags: flags for newly created file, currently only O_CLOEXEC is
- * supported, refer to manual of open syscall for more details
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
- */
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
unsigned int index, unsigned int plane, unsigned int flags)
{
@@ -2068,25 +1876,6 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
}
EXPORT_SYMBOL_GPL(vb2_core_expbuf);
-/**
- * vb2_mmap() - map video buffers into application address space
- * @q: videobuf2 queue
- * @vma: vma passed to the mmap file operation handler in the driver
- *
- * Should be called from mmap file operation handler of a driver.
- * This function maps one plane of one of the available video buffers to
- * userspace. To map whole video memory allocated on reqbufs, this function
- * has to be called once per each plane per each buffer previously allocated.
- *
- * When the userspace application calls mmap, it passes to it an offset returned
- * to it earlier by the means of vidioc_querybuf handler. That offset acts as
- * a "cookie", which is then used to identify the plane to be mapped.
- * This function finds a plane with a matching offset and a mapping is performed
- * by the means of a provided memory operation.
- *
- * The return values from this function are intended to be directly returned
- * from the mmap handler in driver.
- */
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
{
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
@@ -2188,17 +1977,6 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
#endif
-/**
- * vb2_core_queue_init() - initialize a videobuf2 queue
- * @q: videobuf2 queue; this structure should be allocated in driver
- *
- * The vb2_queue structure should be allocated by the driver. The driver is
- * responsible of clearing it's content and setting initial values for some
- * required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
- */
int vb2_core_queue_init(struct vb2_queue *q)
{
/*
@@ -2228,14 +2006,6 @@ EXPORT_SYMBOL_GPL(vb2_core_queue_init);
static int __vb2_init_fileio(struct vb2_queue *q, int read);
static int __vb2_cleanup_fileio(struct vb2_queue *q);
-/**
- * vb2_core_queue_release() - stop streaming, release the queue and free memory
- * @q: videobuf2 queue
- *
- * This function stops streaming and performs necessary clean ups, including
- * freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
- */
void vb2_core_queue_release(struct vb2_queue *q)
{
__vb2_cleanup_fileio(q);
@@ -2246,22 +2016,6 @@ void vb2_core_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
-/**
- * vb2_core_poll() - implements poll userspace operation
- * @q: videobuf2 queue
- * @file: file argument passed to the poll file operation handler
- * @wait: wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait)
{
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 59fa204b15f3..fb6a177be461 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -141,6 +141,9 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
{
struct vb2_dc_buf *buf;
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -493,6 +496,9 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -673,6 +679,9 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
if (dbuf->size < size)
return ERR_PTR(-EFAULT);
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index bd82d709ee82..ecff8f492c4f 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -104,11 +104,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
int ret;
int num_pages;
- if (WARN_ON(dev == NULL))
- return NULL;
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
- return NULL;
+ return ERR_PTR(-ENOMEM);
buf->vaddr = NULL;
buf->dma_dir = dma_dir;
@@ -166,7 +167,7 @@ fail_pages_alloc:
kfree(buf->pages);
fail_pages_array_alloc:
kfree(buf);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
static void vb2_dma_sg_put(void *buf_priv)
@@ -224,9 +225,12 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
struct sg_table *sgt;
struct frame_vector *vec;
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
- return NULL;
+ return ERR_PTR(-ENOMEM);
buf->vaddr = NULL;
buf->dev = dev;
@@ -266,7 +270,7 @@ userptr_fail_sgtable:
vb2_destroy_framevec(vec);
userptr_fail_pfnvec:
kfree(buf);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
/*
@@ -606,6 +610,9 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
if (dbuf->size < size)
return ERR_PTR(-EFAULT);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 9cfbb6e4bc28..52ef8833f6b6 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -483,13 +483,6 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL(vb2_querybuf);
-/**
- * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies
- * the memory and type values.
- * @q: videobuf2 queue
- * @req: struct passed from userspace to vidioc_reqbufs handler
- * in driver
- */
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
@@ -498,21 +491,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
-/**
- * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
{
int ret;
@@ -528,13 +506,6 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);
-/**
- * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies
- * the memory and type values.
- * @q: videobuf2 queue
- * @create: creation parameters, passed from userspace to vidioc_create_bufs
- * handler in driver
- */
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
{
unsigned requested_planes = 1;
@@ -586,23 +557,6 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
-/**
- * vb2_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
int ret;
@@ -617,27 +571,6 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
-/**
- * vb2_dqbuf() - Dequeue a buffer to the userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_dqbuf handler
- * in driver
- * @nonblocking: if true, this call will not sleep waiting for a buffer if no
- * buffers ready for dequeuing are present. Normally the driver
- * would be passing (file->f_flags & O_NONBLOCK) here
- *
- * Should be called from vidioc_dqbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_finish callback in the driver (if provided), in which
- * driver can perform any additional operations that may be required before
- * returning the buffer to userspace, such as cache sync,
- * 3) the buffer struct members are filled with relevant information for
- * the userspace.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_dqbuf handler in driver.
- */
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
int ret;
@@ -664,19 +597,6 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
-/**
- * vb2_streamon - start streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamon handler
- *
- * Should be called from vidioc_streamon handler of a driver.
- * This function:
- * 1) verifies current state
- * 2) passes any previously queued buffers to the driver and starts streaming
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_streamon handler in the driver.
- */
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
if (vb2_fileio_is_active(q)) {
@@ -687,21 +607,6 @@ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
}
EXPORT_SYMBOL_GPL(vb2_streamon);
-/**
- * vb2_streamoff - stop streaming
- * @q: videobuf2 queue
- * @type: type argument passed from userspace to vidioc_streamoff handler
- *
- * Should be called from vidioc_streamoff handler of a driver.
- * This function:
- * 1) verifies current state,
- * 2) stop streaming and dequeues any queued buffers, including those previously
- * passed to the driver (after waiting for the driver to finish).
- *
- * This call can be used for pausing playback.
- * The return values from this function are intended to be directly returned
- * from vidioc_streamoff handler in the driver
- */
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
{
if (vb2_fileio_is_active(q)) {
@@ -712,15 +617,6 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
}
EXPORT_SYMBOL_GPL(vb2_streamoff);
-/**
- * vb2_expbuf() - Export a buffer as a file descriptor
- * @q: videobuf2 queue
- * @eb: export buffer structure passed from userspace to vidioc_expbuf
- * handler in driver
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_expbuf handler in driver.
- */
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
{
return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
@@ -728,17 +624,6 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
}
EXPORT_SYMBOL_GPL(vb2_expbuf);
-/**
- * vb2_queue_init() - initialize a videobuf2 queue
- * @q: videobuf2 queue; this structure should be allocated in driver
- *
- * The vb2_queue structure should be allocated by the driver. The driver is
- * responsible of clearing it's content and setting initial values for some
- * required entries before calling this function.
- * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
- * to the struct vb2_queue description in include/media/videobuf2-core.h
- * for more information.
- */
int vb2_queue_init(struct vb2_queue *q)
{
/*
@@ -779,39 +664,12 @@ int vb2_queue_init(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_init);
-/**
- * vb2_queue_release() - stop streaming, release the queue and free memory
- * @q: videobuf2 queue
- *
- * This function stops streaming and performs necessary clean ups, including
- * freeing video buffer memory. The driver is responsible for freeing
- * the vb2_queue structure itself.
- */
void vb2_queue_release(struct vb2_queue *q)
{
vb2_core_queue_release(q);
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
-/**
- * vb2_poll() - implements poll userspace operation
- * @q: videobuf2 queue
- * @file: file argument passed to the poll file operation handler
- * @wait: wait argument passed to the poll file operation handler
- *
- * This function implements poll file operation handler for a driver.
- * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
- * be informed that the file descriptor of a video device is available for
- * reading.
- * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
- * will be reported as available for writing.
- *
- * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
- * pending events.
- *
- * The return values from this function are intended to be directly returned
- * from poll handler in driver.
- */
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index c2820a6e164d..ab3227b75c84 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -41,7 +41,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
if (!buf)
- return NULL;
+ return ERR_PTR(-ENOMEM);
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
@@ -53,7 +53,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
if (!buf->vaddr) {
pr_debug("vmalloc of size %ld failed\n", buf->size);
kfree(buf);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
atomic_inc(&buf->refcount);
@@ -77,17 +77,20 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
int n_pages, offset, i;
+ int ret = -ENOMEM;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
- return NULL;
+ return ERR_PTR(-ENOMEM);
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
- if (IS_ERR(vec))
+ if (IS_ERR(vec)) {
+ ret = PTR_ERR(vec);
goto fail_pfnvec_create;
+ }
buf->vec = vec;
n_pages = frame_vector_count(vec);
if (frame_vector_to_pages(vec) < 0) {
@@ -117,7 +120,7 @@ fail_map:
fail_pfnvec_create:
kfree(buf);
- return NULL;
+ return ERR_PTR(ret);
}
static void vb2_vmalloc_put_userptr(void *buf_priv)
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index f87ad6f5d2dc..b5ed3bd082b5 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -410,10 +410,7 @@ static int at91sam9_ebi_init(struct at91_ebi *ebi)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int sama5d3_ebi_init(struct at91_ebi *ebi)
@@ -441,10 +438,7 @@ static int sama5d3_ebi_init(struct at91_ebi *ebi)
field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np,
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 53a341f3b305..12080b05e3e6 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -53,12 +53,10 @@ static const struct of_device_id atmel_ramc_of_match[] = {
static int atmel_ramc_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct at91_ramc_caps *caps;
struct clk *clk;
- match = of_match_device(atmel_ramc_of_match, &pdev->dev);
- caps = match->data;
+ caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
clk = devm_clk_get(&pdev->dev, "ddrck");
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index f00f3e742265..5457c361ad58 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -350,8 +350,8 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
- enum gpmc_clk_domain cd)
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
{
return ticks * gpmc_get_clk_period(cs, cd) / 1000;
}
@@ -2143,9 +2143,7 @@ err_child_fail:
ret = -ENODEV;
err_cs:
- if (waitpin_desc)
- gpiochip_free_own_desc(waitpin_desc);
-
+ gpiochip_free_own_desc(waitpin_desc);
err:
gpmc_cs_free(cs);
@@ -2265,7 +2263,7 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
gpmc->gpio_chip.get = gpmc_gpio_get;
gpmc->gpio_chip.base = -1;
- ret = gpiochip_add(&gpmc->gpio_chip);
+ ret = devm_gpiochip_add_data(gpmc->dev, &gpmc->gpio_chip, NULL);
if (ret < 0) {
dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
return ret;
@@ -2274,11 +2272,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
return 0;
}
-static void gpmc_gpio_exit(struct gpmc_device *gpmc)
-{
- gpiochip_remove(&gpmc->gpio_chip);
-}
-
static int gpmc_probe(struct platform_device *pdev)
{
int rc;
@@ -2365,15 +2358,13 @@ static int gpmc_probe(struct platform_device *pdev)
rc = gpmc_setup_irq(gpmc);
if (rc) {
dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
- goto setup_irq_failed;
+ goto gpio_init_failed;
}
gpmc_probe_dt_children(pdev);
return 0;
-setup_irq_failed:
- gpmc_gpio_exit(gpmc);
gpio_init_failed:
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
@@ -2387,7 +2378,6 @@ static int gpmc_remove(struct platform_device *pdev)
struct gpmc_device *gpmc = platform_get_drvdata(pdev);
gpmc_free_irq(gpmc);
- gpmc_gpio_exit(gpmc);
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530385..2e3cf012ef48 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
int rc;
if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
host->req->error);
}
} while (!rc);
+ pm_runtime_put(ms_dev(host));
}
}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
}
out:
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(ms_dev(host));
/* power-on delay */
if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
int err;
for (;;) {
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
/* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
}
poll_again:
+ pm_runtime_put(ms_dev(host));
if (host->eject)
break;
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 3228fd182a99..9ff243970e93 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
-static const struct intel_lpss_platform_info kbl_info = {
- .clk_rate = 120000000,
-};
-
-static const struct intel_lpss_platform_info kbl_uart_info = {
- .clk_rate = 120000000,
- .clk_con_id = "baudclk",
-};
-
-static const struct intel_lpss_platform_info kbl_i2c_info = {
- .clk_rate = 133000000,
-};
-
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
/* KBL-H */
- { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info },
- { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info },
- { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info },
- { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info },
- { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 41b113875d64..70c646b0097d 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
- /* Put the device into reset state */
- writel(0, lpss->priv + LPSS_PRIV_RESETS);
-
return 0;
}
EXPORT_SYMBOL_GPL(intel_lpss_suspend);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 43e54b7e908f..f9a8c5203873 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 {
BXTWC_THRM2_IRQ,
BXTWC_BCU_IRQ,
BXTWC_ADC_IRQ,
+ BXTWC_USBC_IRQ,
BXTWC_CHGR0_IRQ,
BXTWC_CHGR1_IRQ,
BXTWC_GPIO0_IRQ,
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = {
};
static struct resource usbc_resources[] = {
- DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"),
+ DEFINE_RES_IRQ(BXTWC_USBC_IRQ),
};
static struct resource charger_resources[] = {
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 3ac486a597f3..c57e407020f1 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
clones[i]);
}
+ put_device(dev);
+
return 0;
}
EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index cfdae8a3d779..b0c7bcdaf5df 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe)
if (ret < 0)
return ret;
+ msleep(10);
+
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941f85..2e5233b60971 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (ctx->status == STARTED)
goto out; /* already started */
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc)
+ goto out;
+
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -239,7 +247,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
+ cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0c97..5e506c19108a 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
put_pid(ctx->glpid);
cxl_ctx_put();
+
+ /* Decrease the attached context count on the adapter */
+ cxl_adapter_context_put(ctx->afu->adapter);
return 0;
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 344a0ff8f8c7..a144073593fa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -162,7 +162,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SPAP_V 0x0000000000000001ULL
/****** CXL_PSL_Control ****************************************************/
-#define CXL_PSL_Control_tb 0x0000000000000001ULL
+#define CXL_PSL_Control_tb (0x1ull << (63-63))
+#define CXL_PSL_Control_Fr (0x1ull << (63-31))
+#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29))
+#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29))
/****** CXL_PSL_DLCNTL *****************************************************/
#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
@@ -615,6 +618,14 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+
+ /*
+ * number of contexts mapped on to this card. Possible values are:
+ * >0: Number of contexts mapped and new one can be mapped.
+ * 0: No active contexts and new ones can be mapped.
+ * -1: No contexts mapped and new ones cannot be mapped.
+ */
+ atomic_t contexts_num;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -854,6 +865,7 @@ int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
int cxl_check_error(struct cxl_afu *afu);
int cxl_afu_slbia(struct cxl_afu *afu);
int cxl_tlb_slb_invalidate(struct cxl *adapter);
+int cxl_data_cache_flush(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
@@ -940,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b157f..77080cc5fa0a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
+ /*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
@@ -205,11 +215,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a24d..3e102cd6ed91 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24aa3..62e0dfb5f15b 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
- return adapter;
+ /* start with context lock taken */
+ atomic_set(&adapter->contexts_num, -1);
+ return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
return 0;
}
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+ return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+ atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+ int rc;
+ /* no active contexts -> contexts_num == 0 */
+ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+ return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+ /*
+ * contexts lock taken -> contexts_num == -1
+ * If not true then show a warning and force reset the lock.
+ * This will happen when context_unlock was requested without
+ * doing a context_lock.
+ */
+ if (val != -1) {
+ atomic_set(&adapter->contexts_num, 0);
+ WARN(1, "Adapter context unlocked with %d active contexts",
+ val);
+ }
+}
+
static int __init init_cxl(void)
{
int rc = 0;
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index e606fdc4bc9c..a217a74ccc98 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -290,6 +290,37 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
return 0;
}
+int cxl_data_cache_flush(struct cxl *adapter)
+{
+ u64 reg;
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("Flushing data cache\n");
+
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ reg |= CXL_PSL_Control_Fr;
+ cxl_p1_write(adapter, CXL_PSL_Control, reg);
+
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
+ return -EBUSY;
+ }
+
+ if (!cxl_ops->link_ok(adapter, NULL)) {
+ dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
+ return -EIO;
+ }
+ cpu_relax();
+ reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ }
+
+ reg &= ~CXL_PSL_Control_Fr;
+ cxl_p1_write(adapter, CXL_PSL_Control, reg);
+ return 0;
+}
+
static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
{
int rc;
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index edc458395f68..ec175ea5dfba 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -460,7 +460,7 @@ int cxl_of_probe(struct platform_device *pdev)
struct device_node *afu_np = NULL;
struct cxl *adapter = NULL;
int ret;
- int slice, slice_ok;
+ int slice = 0, slice_ok = 0;
pr_devel("in %s\n", __func__);
@@ -476,13 +476,13 @@ int cxl_of_probe(struct platform_device *pdev)
}
/* init afu */
- slice_ok = 0;
- for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) {
+ for_each_child_of_node(np, afu_np) {
if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
slice, ret);
else
slice_ok++;
+ slice++;
}
if (slice_ok == 0) {
@@ -490,8 +490,6 @@ int cxl_of_probe(struct platform_device *pdev)
adapter->slices = 0;
}
- if (afu_np)
- of_node_put(afu_np);
return 0;
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 6f0c4ac4b649..e96be9ca4e60 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1239,6 +1239,9 @@ int cxl_pci_reset(struct cxl *adapter)
dev_info(&dev->dev, "CXL reset\n");
+ /* the adapter is about to be reset, so ignore errors */
+ cxl_data_cache_flush(adapter);
+
/* pcie_warm_reset requests a fundamental pci reset which includes a
* PERST assert/deassert. PERST triggers a loading of the image
* if "user" or "factory" is selected in sysfs */
@@ -1484,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
return 0;
err:
@@ -1530,11 +1535,11 @@ static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
{
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
/* Mellanox CX-4 */
- dev_info(&adapter->dev, "Device uses an XSL\n");
+ dev_info(&dev->dev, "Device uses an XSL\n");
adapter->native->sl_ops = &xsl_ops;
adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else {
- dev_info(&adapter->dev, "Device uses a PSL\n");
+ dev_info(&dev->dev, "Device uses a PSL\n");
adapter->native->sl_ops = &psl_ops;
}
}
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f158f..a8b6d6a635e9 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
int val;
rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1))
+ if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
- if ((rc = cxl_ops->adapter_reset(adapter)))
- return rc;
- return count;
+ /*
+ * See if we can lock the context mapping that's only allowed
+ * when there are no contexts attached to the adapter. Once
+ * taken this will also prevent any context from getting activated.
+ */
+ if (val == 1) {
+ rc = cxl_adapter_context_lock(adapter);
+ if (rc)
+ goto out;
+
+ rc = cxl_ops->adapter_reset(adapter);
+ /* In case reset failed release context lock */
+ if (rc)
+ cxl_adapter_context_unlock(adapter);
+
+ } else if (val == -1) {
+ /* Perform a forced adapter reset */
+ rc = cxl_ops->adapter_reset(adapter);
+ }
+
+out:
+ return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 3cdf8e1ca0ad..051b14766ef9 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -593,6 +593,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
+ u8 test_byte;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -743,6 +744,18 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ i2c_set_clientdata(client, at24);
+
+ /*
+ * Perform a one-byte test read to verify that the
+ * chip is functional.
+ */
+ err = at24_read(at24, 0, &test_byte, 1);
+ if (err) {
+ err = -ENODEV;
+ goto err_clients;
+ }
+
at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable;
@@ -764,8 +777,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto err_clients;
}
- i2c_set_clientdata(client, at24);
-
dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
chip.byte_len, client->name,
writable ? "writable" : "read-only", at24->write_max);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8a679ecc8fd1..fc2794b513fa 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
- goto err_out1;
+ goto err_out2;
}
}
return 0;
+ err_out2:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0;
+ sgl->sgl_size = 0;
return -ENOMEM;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 9c677f3f3c26..520f58439080 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -144,7 +144,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = get_next_ino();
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
}
return ret;
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index e9e6ea3ab73c..75b9d4ac8b1e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -178,7 +178,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e6e5e55a12ed..60415a2bfcbd 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
- if (hhisr & IPC_HHIER_SEC && aliveness)
+ if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
- else
+ } else {
ipc_isr = 0;
+ hhisr &= ~IPC_HHIER_SEC;
+ }
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
pinned_pages->nr_pages = get_user_pages(
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 1525870f460a..33741ad4a74a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index a8cee33ae8d2..b3fa738ae005 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle,
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID) {
+ pr_warn("Failed to get context ID\n");
+ result = VMCI_ERROR_NO_RESOURCES;
+ goto free_mem;
+ }
+
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
@@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
- if (vmci_guest_code_active()) {
+ if (!hlist_unhashed(&entry->node)) {
int result;
dbell_index_table_remove(entry);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 896be150e28f..d7eaf1eb11e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.4.0-k");
+MODULE_VERSION("1.1.5.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..709a872ed484 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
#include <asm/uaccess.h>
#include "queue.h"
+#include "block.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->data;
struct mmc_packed *packed = mqrq->packed;
bool do_rel_wr, do_data_tag;
- u32 *packed_cmd_hdr;
+ __le32 *packed_cmd_hdr;
u8 hdr_blocks;
u8 i = 1;
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 5a8dc5a76e0d..3678220964fe 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2347,7 +2347,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_async_req areq;
+ struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
@@ -2363,8 +2363,8 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- areq.mrq = mrq;
- areq.err_check = mmc_test_check_result_async;
+ test_areq.areq.mrq = mrq;
+ test_areq.areq.err_check = mmc_test_check_result_async;
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2378,7 +2378,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_req(host, &areq, &ret);
+ mmc_start_req(host, &test_areq.areq, &ret);
if (ret)
goto out_free;
} else {
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 3c15a75bae86..342f1e3f301e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
struct mmc_packed {
struct list_head list;
- u32 cmd_hdr[1024];
+ __le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3486bc7fbb64..df19777068a6 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -26,6 +26,8 @@
#include "mmc_ops.h"
#include "sd_ops.h"
+#define DEFAULT_CMD6_TIMEOUT_MS 500
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -571,6 +573,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->erased_byte = 0x0;
/* eMMC v4.5 or later */
+ card->ext_csd.generic_cmd6_time = DEFAULT_CMD6_TIMEOUT_MS;
if (card->ext_csd.rev >= 6) {
card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
@@ -1263,6 +1266,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
err = mmc_select_bus_width(card);
if (err < 0)
goto out_err;
@@ -1272,6 +1285,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
err = mmc_switch_status(card);
if (err)
goto out_err;
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793e84..dbbc4303bdd0 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Get registers' physical base address */
- host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ /* Get registers' physical base address */
+ host->phy_regs = regs->start;
+
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4fcbc4012ed0..50a674be6655 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2940,7 +2940,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(-ENOMEM);
/* find reset controller when exist */
- pdata->rstc = devm_reset_control_get_optional(dev, NULL);
+ pdata->rstc = devm_reset_control_get_optional(dev, "reset");
if (IS_ERR(pdata->rstc)) {
if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index d839147e591d..44ecebd1ea8c 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -661,13 +661,13 @@ static int mxs_mmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mmc);
+ spin_lock_init(&host->lock);
+
ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
dev_name(&pdev->dev), host);
if (ret)
goto out_free_dma;
- spin_lock_init(&host->lock);
-
ret = mmc_add_host(mmc);
if (ret)
goto out_free_dma;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 4106295527b9..6e9c0f8fddb1 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
mutex_lock(&ucr->dev_mutex);
- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
- mutex_unlock(&ucr->dev_mutex);
- return;
- }
-
sd_set_power_mode(host, ios->power_mode);
sd_set_bus_width(host, ios->bus_width);
sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
container_of(work, struct rtsx_usb_sdmmc, led_work);
struct rtsx_ucr *ucr = host->ucr;
+ pm_runtime_get_sync(sdmmc_dev(host));
mutex_lock(&ucr->dev_mutex);
if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
rtsx_usb_turn_on_led(ucr);
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(sdmmc_dev(host));
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f54fd8755c8..7123ef96ed18 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
+ reg == SDHCI_INT_STATUS)) {
if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
esdhc_clrset_le(host, 0xffff, val, reg);
}
+static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 val;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL:
+ val = readl(host->ioaddr + reg);
+
+ ret = val & SDHCI_CTRL_LED;
+ ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
+ ret |= (val & ESDHC_CTRL_4BITBUS);
+ ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
+ return ret;
+ }
+
+ return readb(host->ioaddr + reg);
+}
+
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
+ .read_b = esdhc_readb_le,
.write_l = esdhc_writel_le,
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 8ef44a2a2fd9..90ed2e12d345 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -647,6 +647,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
if (msm_host->pwr_irq < 0) {
dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
msm_host->pwr_irq);
+ ret = msm_host->pwr_irq;
goto clk_disable;
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index da8e40af6f85..410a55b1c25f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
}
-void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
{
u8 ctrl;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
}
}
+static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ /*
+ * Plese don't switch to 1V8 as arasan,5.1 doesn't
+ * actually refer to this setting to indicate the
+ * signal voltage and the state machine will be broken
+ * actually if we force to enable 1V8. That's something
+ * like broken quirk but we could work around here.
+ */
+ return 0;
+ case MMC_SIGNAL_VOLTAGE_330:
+ case MMC_SIGNAL_VOLTAGE_120:
+ /* We don't support 3V3 and 1V2 */
+ break;
+ }
+
+ return -EINVAL;
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_arasan_hs400_enhanced_strobe;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_arasan_voltage_switch;
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 72a1f1f5180a..1d9e00a00e9f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,6 +32,14 @@
#include "sdhci-pci.h"
#include "sdhci-pci-o2micro.h"
+static int sdhci_pci_enable_dma(struct sdhci_host *host);
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
+static void sdhci_pci_hw_reset(struct sdhci_host *host);
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
+#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ int cntr;
+ u8 reg;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (mode == MMC_POWER_OFF)
+ return;
+
+ /*
+ * Bus power might not enable after D3 -> D0 transition due to the
+ * present state not yet having propagated. Retry for up to 2ms.
+ */
+ for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ if (reg & SDHCI_POWER_ON)
+ break;
+ udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
+ reg |= SDHCI_POWER_ON;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ }
+}
+
+static const struct sdhci_ops sdhci_intel_byt_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .select_drive_strength = sdhci_pci_select_drive_strength,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
host->hw_name = "PCI";
- host->ops = &sdhci_pci_ops;
+ host->ops = chip->fixes && chip->fixes->ops ?
+ chip->fixes->ops :
+ &sdhci_pci_ops;
host->quirks = chip->quirks;
host->quirks2 = chip->quirks2;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c7c08b93223..6bccf56bc5ff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
+
+ const struct sdhci_ops *ops;
};
struct sdhci_pci_slot {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index dd1938d341f7..d0f5c05fbc19 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
struct mmc_host *mmc = host->mmc;
u8 pwr = host->pwr;
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_noreg(host, mode, vdd);
if (host->pwr == pwr)
return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 48055666c655..42ef3ebb1d8c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* host->clock is in Hz. target_timeout is in us.
* Hence, us = 1000000 * cycles / Hz. Round up.
*/
- val = 1000000 * data->timeout_clks;
+ val = 1000000ULL * data->timeout_clks;
if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
/* Initially, a command has no error */
cmd->error = 0;
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd->flags |= MMC_RSP_BUSY;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}
-void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
u8 pwr = 0;
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
mdelay(10);
}
}
-EXPORT_SYMBOL_GPL(sdhci_set_power);
+EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
-static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
- struct mmc_host *mmc = host->mmc;
-
- if (host->ops->set_power)
- host->ops->set_power(host, mode, vdd);
- else if (!IS_ERR(mmc->supply.vmmc))
- sdhci_set_power_reg(host, mode, vdd);
+ if (IS_ERR(host->mmc->supply.vmmc))
+ sdhci_set_power_noreg(host, mode, vdd);
else
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_reg(host, mode, vdd);
}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
/*****************************************************************************\
* *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- __sdhci_set_power(host, ios->power_mode, ios->vdd);
+ if (host->ops->set_power)
+ host->ops->set_power(host, ios->power_mode, ios->vdd);
+ else
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2082,6 +2086,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for Buffer Read Ready interrupt during tuning procedure, falling back to fixed sampling clock\n");
+
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl &= ~SDHCI_CTRL_TUNED_CLK;
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
@@ -2282,10 +2290,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
mrq = host->mrqs_done[i];
- if (mrq) {
- host->mrqs_done[i] = NULL;
+ if (mrq)
break;
- }
}
if (!mrq) {
@@ -2316,6 +2322,17 @@ static bool sdhci_request_done(struct sdhci_host *host)
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
@@ -2323,10 +2340,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- if (!host->cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- if (!host->data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
@@ -2334,6 +2349,8 @@ static bool sdhci_request_done(struct sdhci_host *host)
if (!sdhci_has_requests(host))
sdhci_led_deactivate(host);
+ host->mrqs_done[i] = NULL;
+
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
@@ -2409,7 +2426,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
if (!host->cmd) {
/*
@@ -2453,11 +2470,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION)
- *mask &= ~SDHCI_INT_DATA_END;
-
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
@@ -2513,9 +2525,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (!host->data) {
struct mmc_command *data_cmd = host->data_cmd;
- if (data_cmd)
- host->data_cmd = NULL;
-
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
@@ -2523,11 +2532,13 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
*/
if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ host->data_cmd = NULL;
data_cmd->error = -ETIMEDOUT;
sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
+ host->data_cmd = NULL;
/*
* Some cards handle busy-end interrupt
* before the command completed, so make
@@ -2680,8 +2691,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
- &intmask);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
@@ -2914,6 +2924,10 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+ if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+ mmc->ops->hs400_enhanced_strobe)
+ mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+
spin_lock_irqsave(&host->lock, flags);
host->runtime_suspended = false;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c722cd23205c..766df17fb7eb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 392f9eff5fb7..5bcc896a48c3 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -78,7 +78,7 @@ config MTD_PHYSMAP_OF_VERSATILE
bool "Support ARM Versatile physmap OF"
depends on MTD_PHYSMAP_OF
depends on MFD_SYSCON
- default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || REALVIEW_DT)
+ default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW)
help
This provides some extra DT physmap parsing for the ARM Versatile
platforms, basically to add a VPP (write protection) callback so
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e3936b847c6b..d46e4adf6d2b 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -39,7 +39,6 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/reboot.h>
-#include <linux/kconfig.h>
#include <linux/leds.h>
#include <linux/mtd/mtd.h>
@@ -376,6 +375,110 @@ static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
}
/**
+ * mtd_wunit_to_pairing_info - get pairing information of a wunit
+ * @mtd: pointer to new MTD device info structure
+ * @wunit: write unit we are interested in
+ * @info: returned pairing information
+ *
+ * Retrieve pairing information associated to the wunit.
+ * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
+ * paired together, and where programming a page may influence the page it is
+ * paired with.
+ * The notion of page is replaced by the term wunit (write-unit) to stay
+ * consistent with the ->writesize field.
+ *
+ * The @wunit argument can be extracted from an absolute offset using
+ * mtd_offset_to_wunit(). @info is filled with the pairing information attached
+ * to @wunit.
+ *
+ * From the pairing info the MTD user can find all the wunits paired with
+ * @wunit using the following loop:
+ *
+ * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
+ * info.pair = i;
+ * mtd_pairing_info_to_wunit(mtd, &info);
+ * ...
+ * }
+ */
+int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
+ struct mtd_pairing_info *info)
+{
+ int npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+
+ if (wunit < 0 || wunit >= npairs)
+ return -EINVAL;
+
+ if (mtd->pairing && mtd->pairing->get_info)
+ return mtd->pairing->get_info(mtd, wunit, info);
+
+ info->group = 0;
+ info->pair = wunit;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
+
+/**
+ * mtd_wunit_to_pairing_info - get wunit from pairing information
+ * @mtd: pointer to new MTD device info structure
+ * @info: pairing information struct
+ *
+ * Returns a positive number representing the wunit associated to the info
+ * struct, or a negative error code.
+ *
+ * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
+ * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
+ * doc).
+ *
+ * It can also be used to only program the first page of each pair (i.e.
+ * page attached to group 0), which allows one to use an MLC NAND in
+ * software-emulated SLC mode:
+ *
+ * info.group = 0;
+ * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
+ * for (info.pair = 0; info.pair < npairs; info.pair++) {
+ * wunit = mtd_pairing_info_to_wunit(mtd, &info);
+ * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
+ * mtd->writesize, &retlen, buf + (i * mtd->writesize));
+ * }
+ */
+int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info)
+{
+ int ngroups = mtd_pairing_groups(mtd);
+ int npairs = mtd_wunit_per_eb(mtd) / ngroups;
+
+ if (!info || info->pair < 0 || info->pair >= npairs ||
+ info->group < 0 || info->group >= ngroups)
+ return -EINVAL;
+
+ if (mtd->pairing && mtd->pairing->get_wunit)
+ return mtd->pairing->get_wunit(mtd, info);
+
+ return info->pair;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
+
+/**
+ * mtd_pairing_groups - get the number of pairing groups
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Returns the number of pairing groups.
+ *
+ * This number is usually equal to the number of bits exposed by a single
+ * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
+ * to iterate over all pages of a given pair.
+ */
+int mtd_pairing_groups(struct mtd_info *mtd)
+{
+ if (!mtd->pairing || !mtd->pairing->ngroups)
+ return 1;
+
+ return mtd->pairing->ngroups;
+}
+EXPORT_SYMBOL_GPL(mtd_pairing_groups);
+
+/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
*
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1f13e32556f8..fccdd49bb964 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -30,7 +30,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
-#include <linux/kconfig.h>
#include "mtdcore.h"
@@ -317,6 +316,18 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
return res;
}
+static int part_get_device(struct mtd_info *mtd)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+ return part->master->_get_device(part->master);
+}
+
+static void part_put_device(struct mtd_info *mtd)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+ part->master->_put_device(part->master);
+}
+
static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -397,6 +408,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
+ slave->mtd.pairing = master->pairing;
slave->mtd.name = name;
slave->mtd.owner = master->owner;
@@ -463,6 +475,12 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd._block_isbad = part_block_isbad;
if (master->_block_markbad)
slave->mtd._block_markbad = part_block_markbad;
+
+ if (master->_get_device)
+ slave->mtd._get_device = part_get_device;
+ if (master->_put_device)
+ slave->mtd._put_device = part_put_device;
+
slave->mtd._erase = part_erase;
slave->master = master;
slave->offset = part->offset;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 21ff58099f3b..7b7a887b4709 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -88,11 +88,11 @@ config MTD_NAND_AMS_DELTA
Support for NAND flash on Amstrad E3 (Delta).
config MTD_NAND_OMAP2
- tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
- depends on ARCH_OMAP2PLUS
+ tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
+ depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
help
- Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
- platforms.
+ Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+ and Keystone platforms.
config MTD_NAND_OMAP_BCH
depends on MTD_NAND_OMAP2
@@ -428,7 +428,7 @@ config MTD_NAND_ORION
config MTD_NAND_FSL_ELBC
tristate "NAND support for Freescale eLBC controllers"
- depends on PPC
+ depends on FSL_SOC
select FSL_LBC
help
Various Freescale chips, including the 8313, include a NAND Flash
@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
- depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
+ depends on FSL_SOC || ARCH_LAYERSCAPE
select FSL_IFC
select MEMORY
help
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 37da4236ab90..3962f55bd034 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -761,8 +761,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- spin_lock_init(&info->controller.lock);
- init_waitqueue_head(&info->controller.wq);
+ nand_hw_control_init(&info->controller);
info->device = &pdev->dev;
info->platform = plat;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 8eb2c64df38c..9d2424bfdbf5 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1336,7 +1336,7 @@ static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
u32 *flash_cache = (u32 *)ctrl->flash_cache;
int i;
- brcmnand_soc_data_bus_prepare(ctrl->soc);
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
/*
* Must cache the FLASH_CACHE now, since changes in
@@ -1349,7 +1349,7 @@ static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
*/
flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
- brcmnand_soc_data_bus_unprepare(ctrl->soc);
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
if (host->hwcfg.sector_size_1k)
@@ -1565,12 +1565,12 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
brcmnand_waitfunc(mtd, chip);
if (likely(buf)) {
- brcmnand_soc_data_bus_prepare(ctrl->soc);
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
for (j = 0; j < FC_WORDS; j++, buf++)
*buf = brcmnand_read_fc(ctrl, j);
- brcmnand_soc_data_bus_unprepare(ctrl->soc);
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
}
if (oob)
@@ -1815,12 +1815,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
if (buf) {
- brcmnand_soc_data_bus_prepare(ctrl->soc);
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
for (j = 0; j < FC_WORDS; j++, buf++)
brcmnand_write_fc(ctrl, j, *buf);
- brcmnand_soc_data_bus_unprepare(ctrl->soc);
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
} else if (oob) {
for (j = 0; j < FC_WORDS; j++)
brcmnand_write_fc(ctrl, j, 0xffffffff);
@@ -2370,8 +2370,7 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
init_completion(&ctrl->done);
init_completion(&ctrl->dma_done);
- spin_lock_init(&ctrl->controller.lock);
- init_waitqueue_head(&ctrl->controller.wq);
+ nand_hw_control_init(&ctrl->controller);
INIT_LIST_HEAD(&ctrl->host_list);
/* NAND register range */
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
index ef5eabba88e5..5c44cd4aba87 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/brcmnand/brcmnand.h
@@ -23,19 +23,22 @@ struct dev_pm_ops;
struct brcmnand_soc {
bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
- void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare);
+ void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+ bool is_param);
};
-static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc)
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+ bool is_param)
{
if (soc && soc->prepare_data_bus)
- soc->prepare_data_bus(soc, true);
+ soc->prepare_data_bus(soc, true, is_param);
}
-static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc)
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+ bool is_param)
{
if (soc && soc->prepare_data_bus)
- soc->prepare_data_bus(soc, false);
+ soc->prepare_data_bus(soc, false, is_param);
}
static inline u32 brcmnand_readl(void __iomem *addr)
diff --git a/drivers/mtd/nand/brcmnand/iproc_nand.c b/drivers/mtd/nand/brcmnand/iproc_nand.c
index 585596c549b2..4c6ae113664d 100644
--- a/drivers/mtd/nand/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/brcmnand/iproc_nand.c
@@ -74,7 +74,8 @@ static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
spin_unlock_irqrestore(&priv->idm_lock, flags);
}
-static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare)
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
+ bool is_param)
{
struct iproc_nand_soc *priv =
container_of(soc, struct iproc_nand_soc, soc);
@@ -86,10 +87,19 @@ static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare)
val = brcmnand_readl(mmio);
- if (prepare)
- val |= IPROC_NAND_APB_LE_MODE;
- else
+ /*
+ * In the case of BE or when dealing with NAND data, alway configure
+ * the APB bus to LE mode before accessing the FIFO and back to BE mode
+ * after the access is done
+ */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
+ if (prepare)
+ val |= IPROC_NAND_APB_LE_MODE;
+ else
+ val &= ~IPROC_NAND_APB_LE_MODE;
+ } else { /* when in LE accessing the parameter page, keep APB in BE */
val &= ~IPROC_NAND_APB_LE_MODE;
+ }
brcmnand_writel(val, mmio);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 47316998017f..7af2a3cd949e 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -1249,8 +1249,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
nand->controller = &nand->hwcontrol;
- spin_lock_init(&nand->controller->lock);
- init_waitqueue_head(&nand->controller->wq);
+ nand_hw_control_init(nand->controller);
/* methods */
nand->cmdfunc = docg4_command;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 60a88f24c6b3..113f76e59937 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -879,8 +879,7 @@ static int fsl_elbc_nand_probe(struct platform_device *pdev)
}
elbc_fcm_ctrl->counter++;
- spin_lock_init(&elbc_fcm_ctrl->controller.lock);
- init_waitqueue_head(&elbc_fcm_ctrl->controller.wq);
+ nand_hw_control_init(&elbc_fcm_ctrl->controller);
fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
} else {
elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4e9e5fd8faf3..0a177b1bfe3e 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -987,8 +987,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
ifc_nand_ctrl->addr = NULL;
fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
- spin_lock_init(&ifc_nand_ctrl->controller.lock);
- init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
+ nand_hw_control_init(&ifc_nand_ctrl->controller);
} else {
ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 0f68a99fc4ad..141bd70a49c2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -161,7 +161,7 @@ int gpmi_init(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
ret = gpmi_reset_block(r->gpmi_regs, false);
if (ret)
goto err_out;
@@ -197,6 +197,7 @@ int gpmi_init(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
@@ -270,7 +271,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ret = gpmi_enable_clk(this);
if (ret)
- goto err_out;
+ return ret;
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -308,6 +309,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
gpmi_disable_clk(this);
return 0;
err_out:
+ gpmi_disable_clk(this);
return ret;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 6e461560c6a8..6c062b8251d2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -318,7 +318,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
return -EINVAL;
}
- geo->page_size = mtd->writesize + mtd->oobsize;
+ geo->page_size = mtd->writesize + geo->metadata_size +
+ (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
geo->payload_size = mtd->writesize;
/*
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index 175f67da25af..a39bb70175ee 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -368,9 +368,8 @@ static int jz4780_nand_probe(struct platform_device *pdev)
nfc->dev = dev;
nfc->num_banks = num_banks;
- spin_lock_init(&nfc->controller.lock);
+ nand_hw_control_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
- init_waitqueue_head(&nfc->controller.wq);
ret = jz4780_nand_init_chips(nfc, pdev);
if (ret) {
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index d54f666417e1..dbf256217b3e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -86,6 +86,8 @@ struct mtk_ecc {
struct completion done;
struct mutex lock;
u32 sectors;
+
+ u8 eccdata[112];
};
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
@@ -366,9 +368,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
- u8 *p;
- u32 len, i, val;
- int ret = 0;
+ u32 len;
+ int ret;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
ret = dma_mapping_error(ecc->dev, addr);
@@ -393,14 +394,12 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
- p = data + bytes;
- /* write the parity bytes generated by the ECC back to the OOB region */
- for (i = 0; i < len; i++) {
- if ((i % 4) == 0)
- val = readl(ecc->regs + ECC_ENCPAR(i / 4));
- p[i] = (val >> ((i % 4) * 8)) & 0xff;
- }
+ /* write the parity bytes generated by the ECC back to temp buffer */
+ __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4));
+
+ /* copy into possibly unaligned OOB region with actual length */
+ memcpy(data + bytes, ecc->eccdata, len);
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 57cbe2b83849..d7f724b24fd7 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -152,6 +152,9 @@ struct mxc_nand_devtype_data {
void (*select_chip)(struct mtd_info *mtd, int chip);
int (*correct_data)(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc);
+ int (*setup_data_interface)(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only);
/*
* On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
@@ -1012,6 +1015,82 @@ static void preset_v1(struct mtd_info *mtd)
writew(0x4, NFC_V1_V2_WRPROT);
}
+static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int tRC_min_ns, tRC_ps, ret;
+ unsigned long rate, rate_round;
+ const struct nand_sdr_timings *timings;
+ u16 config1;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
+ config1 = readw(NFC_V1_V2_CONFIG1);
+
+ tRC_min_ns = timings->tRC_min / 1000;
+ rate = 1000000000 / tRC_min_ns;
+
+ /*
+ * For tRC < 30ns we have to use EDO mode. In this case the controller
+ * does one access per clock cycle. Otherwise the controller does one
+ * access in two clock cycles, thus we have to double the rate to the
+ * controller.
+ */
+ if (tRC_min_ns < 30) {
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000);
+ } else {
+ rate *= 2;
+ rate_round = clk_round_rate(host->clk, rate);
+ config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
+ tRC_ps = 1000000000 / (rate_round / 1000 / 2);
+ }
+
+ /*
+ * The timing values compared against are from the i.MX25 Automotive
+ * datasheet, Table 50. NFC Timing Parameters
+ */
+ if (timings->tCLS_min > tRC_ps - 1000 ||
+ timings->tCLH_min > tRC_ps - 2000 ||
+ timings->tCS_min > tRC_ps - 1000 ||
+ timings->tCH_min > tRC_ps - 2000 ||
+ timings->tWP_min > tRC_ps - 1500 ||
+ timings->tALS_min > tRC_ps ||
+ timings->tALH_min > tRC_ps - 3000 ||
+ timings->tDS_min > tRC_ps ||
+ timings->tDH_min > tRC_ps - 5000 ||
+ timings->tWC_min > 2 * tRC_ps ||
+ timings->tWH_min > tRC_ps - 2500 ||
+ timings->tRR_min > 6 * tRC_ps ||
+ timings->tRP_min > 3 * tRC_ps / 2 ||
+ timings->tRC_min > 2 * tRC_ps ||
+ timings->tREH_min > (tRC_ps / 2) - 2500) {
+ dev_dbg(host->dev, "Timing out of bounds\n");
+ return -EINVAL;
+ }
+
+ if (check_only)
+ return 0;
+
+ ret = clk_set_rate(host->clk, rate);
+ if (ret)
+ return ret;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+
+ dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
+ config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
+ "normal");
+
+ return 0;
+}
+
static void preset_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
@@ -1239,6 +1318,57 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
}
}
+static int mxc_nand_onfi_set_features(struct mtd_info *mtd,
+ struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, subfeature_param[i]);
+
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+ host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+
+ return 0;
+}
+
+static int mxc_nand_onfi_get_features(struct mtd_info *mtd,
+ struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int i;
+
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -EINVAL;
+
+ host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
+ mxc_do_addr_cycle(mtd, addr, -1);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ *subfeature_param++ = chip->read_byte(mtd);
+
+ return 0;
+}
+
/*
* The generic flash bbt decriptors overlap with our ecc
* hardware, so define some i.MX specific ones.
@@ -1327,6 +1457,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
.ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v2,
.correct_data = mxc_nand_correct_data_v2_v3,
+ .setup_data_interface = mxc_nand_v2_setup_data_interface,
.irqpending_quirk = 0,
.needs_ip = 0,
.regs_offset = 0x1e00,
@@ -1434,7 +1565,7 @@ static const struct platform_device_id mxcnd_devtype[] = {
};
MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
-#ifdef CONFIG_OF_MTD
+#ifdef CONFIG_OF
static const struct of_device_id mxcnd_dt_ids[] = {
{
.compatible = "fsl,imx21-nand",
@@ -1513,6 +1644,8 @@ static int mxcnd_probe(struct platform_device *pdev)
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
this->read_buf = mxc_nand_read_buf;
+ this->onfi_set_features = mxc_nand_onfi_set_features;
+ this->onfi_get_features = mxc_nand_onfi_get_features;
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
@@ -1533,6 +1666,8 @@ static int mxcnd_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ this->setup_data_interface = host->devtype_data->setup_data_interface;
+
if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 77533f7f2429..3bde96a3f7bf 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -745,7 +745,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
column >>= 1;
chip->cmd_ctrl(mtd, column, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
- chip->cmd_ctrl(mtd, column >> 8, ctrl);
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!nand_opcode_8bits(command))
+ chip->cmd_ctrl(mtd, column >> 8, ctrl);
}
if (page_addr != -1) {
chip->cmd_ctrl(mtd, page_addr, ctrl);
@@ -948,6 +951,181 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
+ * nand_reset_data_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_data_interface(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_data_interface *conf;
+ int ret;
+
+ if (!chip->setup_data_interface)
+ return 0;
+
+ /*
+ * The ONFI specification says:
+ * "
+ * To transition from NV-DDR or NV-DDR2 to the SDR data
+ * interface, the host shall use the Reset (FFh) command
+ * using SDR timing mode 0. A device in any timing mode is
+ * required to recognize Reset (FFh) command issued in SDR
+ * timing mode 0.
+ * "
+ *
+ * Configure the data interface in SDR mode and set the
+ * timings to timing mode 0.
+ */
+
+ conf = nand_get_default_data_interface();
+ ret = chip->setup_data_interface(mtd, conf, false);
+ if (ret)
+ pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+ return ret;
+}
+
+/**
+ * nand_setup_data_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find and configure the best data interface and NAND timings supported by
+ * the chip and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_data_interface(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (!chip->setup_data_interface || !chip->data_interface)
+ return 0;
+
+ /*
+ * Ensure the timing mode has been changed on the chip side
+ * before changing timings on the controller side.
+ */
+ if (chip->onfi_version) {
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
+ chip->onfi_timing_mode_default,
+ };
+
+ ret = chip->onfi_set_features(mtd, chip,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ if (ret)
+ goto err;
+ }
+
+ ret = chip->setup_data_interface(mtd, chip->data_interface, false);
+err:
+ return ret;
+}
+
+/**
+ * nand_init_data_interface - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table. After this
+ * function nand_chip->data_interface is initialized with the best timing mode
+ * available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_init_data_interface(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int modes, mode, ret;
+
+ if (!chip->setup_data_interface)
+ return 0;
+
+ /*
+ * First try to identify the best timings from ONFI parameters and
+ * if the NAND does not support ONFI, fallback to the default ONFI
+ * timing mode.
+ */
+ modes = onfi_get_async_timing_mode(chip);
+ if (modes == ONFI_TIMING_MODE_UNKNOWN) {
+ if (!chip->onfi_timing_mode_default)
+ return 0;
+
+ modes = GENMASK(chip->onfi_timing_mode_default, 0);
+ }
+
+ chip->data_interface = kzalloc(sizeof(*chip->data_interface),
+ GFP_KERNEL);
+ if (!chip->data_interface)
+ return -ENOMEM;
+
+ for (mode = fls(modes) - 1; mode >= 0; mode--) {
+ ret = onfi_init_data_interface(chip, chip->data_interface,
+ NAND_SDR_IFACE, mode);
+ if (ret)
+ continue;
+
+ ret = chip->setup_data_interface(mtd, chip->data_interface,
+ true);
+ if (!ret) {
+ chip->onfi_timing_mode_default = mode;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void nand_release_data_interface(struct nand_chip *chip)
+{
+ kfree(chip->data_interface);
+}
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Returns 0 for success or negative error code otherwise
+ */
+int nand_reset(struct nand_chip *chip, int chipnr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_reset_data_interface(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird ->select_chip() dance.
+ */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->select_chip(mtd, -1);
+
+ chip->select_chip(mtd, chipnr);
+ ret = nand_setup_data_interface(chip);
+ chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
* @mtd: mtd info
* @ofs: offset to start unlock from
@@ -1016,8 +1194,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1025,7 +1201,9 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -1075,8 +1253,6 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
/* Shift to get chip number */
chipnr = ofs >> chip->chip_shift;
- chip->select_chip(mtd, chipnr);
-
/*
* Reset the chip.
* If we want to check the WP through READ STATUS and check the bit 7
@@ -1084,7 +1260,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
* some operation can also clear the bit 7 of status register
* eg. erase/program a locked block
*/
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -2162,7 +2340,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = -ENOTSUPP;
+ int ret;
ops->retlen = 0;
@@ -2173,24 +2351,18 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- nand_get_device(mtd, FL_READING);
+ if (ops->mode != MTD_OPS_PLACE_OOB &&
+ ops->mode != MTD_OPS_AUTO_OOB &&
+ ops->mode != MTD_OPS_RAW)
+ return -ENOTSUPP;
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_AUTO_OOB:
- case MTD_OPS_RAW:
- break;
-
- default:
- goto out;
- }
+ nand_get_device(mtd, FL_READING);
if (!ops->datbuf)
ret = nand_do_read_oob(mtd, from, ops);
else
ret = nand_do_read_ops(mtd, from, ops);
-out:
nand_release_device(mtd);
return ret;
}
@@ -2777,10 +2949,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
}
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
/*
* Reset the chip. Some chips (like the Toshiba TC5832DC found in one
@@ -2788,7 +2956,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
/* Check, if it is write protected */
if (nand_check_wp(mtd)) {
@@ -3191,8 +3364,7 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
if (!chip->controller) {
chip->controller = &chip->hwcontrol;
- spin_lock_init(&chip->controller->lock);
- init_waitqueue_head(&chip->controller->wq);
+ nand_hw_control_init(chip->controller);
}
}
@@ -3822,14 +3994,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
int i, maf_idx;
u8 id_data[8];
- /* Select the device */
- chip->select_chip(mtd, 0);
-
/*
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset(chip, 0);
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
@@ -4113,6 +4285,9 @@ static int nand_dt_init(struct nand_chip *chip)
if (ecc_step > 0)
chip->ecc.size = ecc_step;
+ if (of_property_read_bool(dn, "nand-ecc-maximize"))
+ chip->ecc.options |= NAND_ECC_MAXIMIZE;
+
return 0;
}
@@ -4141,6 +4316,15 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * Default functions assigned for chip_select() and
+ * cmdfunc() both expect cmd_ctrl() to be populated,
+ * so we need to check that that's the case
+ */
+ pr_err("chip.cmd_ctrl() callback is not provided");
+ return -EINVAL;
+ }
/* Set the default functions */
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
@@ -4155,13 +4339,31 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
return PTR_ERR(type);
}
+ /* Initialize the ->data_interface field. */
+ ret = nand_init_data_interface(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * Setup the data interface correctly on the chip and controller side.
+ * This explicit call to nand_setup_data_interface() is only required
+ * for the first die, because nand_reset() has been called before
+ * ->data_interface and ->default_onfi_timing_mode were set.
+ * For the other dies, nand_reset() will automatically switch to the
+ * best mode for us.
+ */
+ ret = nand_setup_data_interface(chip);
+ if (ret)
+ return ret;
+
chip->select_chip(mtd, -1);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
- chip->select_chip(mtd, i);
/* See comment in nand_get_flash_type for reset */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset(chip, i);
+
+ chip->select_chip(mtd, i);
/* Send the command for reading device ID */
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
/* Read manufacturer and device IDs */
@@ -4221,6 +4423,7 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
ecc->write_page_raw = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->write_oob = nand_write_oob_std;
+
/*
* Board driver should supply ecc.size and ecc.strength
* values to select how many bits are correctable.
@@ -4243,6 +4446,25 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
}
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
+ }
+
+ /*
+ * We can only maximize ECC config when the default layout is
+ * used, otherwise we don't know how many bytes can really be
+ * used.
+ */
+ if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
+ ecc->options & NAND_ECC_MAXIMIZE) {
+ int steps, bytes;
+
+ /* Always prefer 1k blocks over 512bytes ones */
+ ecc->size = 1024;
+ steps = mtd->writesize / ecc->size;
+
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / steps;
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
}
/* See nand_bch_init() for details. */
@@ -4601,18 +4823,16 @@ int nand_scan(struct mtd_info *mtd, int maxchips)
EXPORT_SYMBOL(nand_scan);
/**
- * nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
*/
-void nand_release(struct mtd_info *mtd)
+void nand_cleanup(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
if (chip->ecc.mode == NAND_ECC_SOFT &&
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- mtd_device_unregister(mtd);
+ nand_release_data_interface(chip);
/* Free bad block table memory */
kfree(chip->bbt);
@@ -4624,6 +4844,18 @@ void nand_release(struct mtd_info *mtd)
& NAND_BBT_DYNAMICSTRUCT)
kfree(chip->badblock_pattern);
}
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
+/**
+ * nand_release - [NAND Interface] Unregister the MTD device and free resources
+ * held by the NAND device
+ * @mtd: MTD device structure
+ */
+void nand_release(struct mtd_info *mtd)
+{
+ mtd_device_unregister(mtd);
+ nand_cleanup(mtd_to_nand(mtd));
+}
EXPORT_SYMBOL_GPL(nand_release);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2fbb523df066..7695efea65f2 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -605,6 +605,100 @@ static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
}
/**
+ * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
+ * @this: the NAND device
+ * @td: the BBT description
+ * @md: the mirror BBT descriptor
+ * @chip: the CHIP selector
+ *
+ * This functions returns a positive block number pointing a valid eraseblock
+ * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
+ * all blocks are already used of marked bad. If td->pages[chip] was already
+ * pointing to a valid block we re-use it, otherwise we search for the next
+ * valid one.
+ */
+static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md, int chip)
+{
+ int startblock, dir, page, numblocks, i;
+
+ /*
+ * There was already a version of the table, reuse the page. This
+ * applies for absolute placement too, as we have the page number in
+ * td->pages.
+ */
+ if (td->pages[chip] != -1)
+ return td->pages[chip] >>
+ (this->bbt_erase_shift - this->page_shift);
+
+ numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ if (!(td->options & NAND_BBT_PERCHIP))
+ numblocks *= this->numchips;
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+
+ page = block << (this->bbt_erase_shift - this->page_shift);
+
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ return block;
+ }
+
+ return -ENOSPC;
+}
+
+/**
+ * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
+ * @this: the NAND device
+ * @td: the BBT description
+ * @chip: the CHIP selector
+ * @block: the BBT block to mark
+ *
+ * Blocks reserved for BBT can become bad. This functions is an helper to mark
+ * such blocks as bad. It takes care of updating the in-memory BBT, marking the
+ * block as bad using a bad block marker and invalidating the associated
+ * td->pages[] entry.
+ */
+static void mark_bbt_block_bad(struct nand_chip *this,
+ struct nand_bbt_descr *td,
+ int chip, int block)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ loff_t to;
+ int res;
+
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ to = (loff_t)block << this->bbt_erase_shift;
+ res = this->block_markbad(mtd, to);
+ if (res)
+ pr_warn("nand_bbt: error %d while marking block %d bad\n",
+ res, block);
+
+ td->pages[chip] = -1;
+}
+
+/**
* write_bbt - [GENERIC] (Re)write the bad block table
* @mtd: MTD device structure
* @buf: temporary buffer
@@ -621,7 +715,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
struct nand_chip *this = mtd_to_nand(mtd);
struct erase_info einfo;
int i, res, chip = 0;
- int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int bits, page, offs, numblocks, sft, sftmsk;
int nrchips, pageoffs, ooboffs;
uint8_t msk[4];
uint8_t rcode = td->reserved_block_code;
@@ -652,46 +746,21 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
}
/* Loop through the chips */
- for (; chip < nrchips; chip++) {
- /*
- * There was already a version of the table, reuse the page
- * This applies for absolute placement too, as we have the
- * page nr. in td->pages.
- */
- if (td->pages[chip] != -1) {
- page = td->pages[chip];
- goto write;
+ while (chip < nrchips) {
+ int block;
+
+ block = get_bbt_block(this, td, md, chip);
+ if (block < 0) {
+ pr_err("No space left to write bad block table\n");
+ res = block;
+ goto outerr;
}
/*
- * Automatic placement of the bad block table. Search direction
- * top -> down?
+ * get_bbt_block() returns a block number, shift the value to
+ * get a page number.
*/
- if (td->options & NAND_BBT_LASTBLOCK) {
- startblock = numblocks * (chip + 1) - 1;
- dir = -1;
- } else {
- startblock = chip * numblocks;
- dir = 1;
- }
-
- for (i = 0; i < td->maxblocks; i++) {
- int block = startblock + dir * i;
- /* Check, if the block is bad */
- switch (bbt_get_entry(this, block)) {
- case BBT_BLOCK_WORN:
- case BBT_BLOCK_FACTORY_BAD:
- continue;
- }
- page = block <<
- (this->bbt_erase_shift - this->page_shift);
- /* Check, if the block is used by the mirror table */
- if (!md || md->pages[chip] != page)
- goto write;
- }
- pr_err("No space left to write bad block table\n");
- return -ENOSPC;
- write:
+ page = block << (this->bbt_erase_shift - this->page_shift);
/* Set up shift count and masks for the flash table */
bits = td->options & NAND_BBT_NRBITS_MSK;
@@ -787,20 +856,28 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(mtd, &einfo, 1);
- if (res < 0)
- goto outerr;
+ if (res < 0) {
+ pr_warn("nand_bbt: error while erasing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
res = scan_write_bbt(mtd, to, len, buf,
td->options & NAND_BBT_NO_OOB ? NULL :
&buf[len]);
- if (res < 0)
- goto outerr;
+ if (res < 0) {
+ pr_warn("nand_bbt: error while writing BBT block %d\n",
+ res);
+ mark_bbt_block_bad(this, td, chip, block);
+ continue;
+ }
pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
(unsigned long long)to, td->version[chip]);
/* Mark it as used */
- td->pages[chip] = page;
+ td->pages[chip++] = page;
}
return 0;
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index e81470a8ac67..13a587407be3 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -13,228 +13,246 @@
#include <linux/export.h>
#include <linux/mtd/nand.h>
-static const struct nand_sdr_timings onfi_sdr_timings[] = {
+static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 0 */
{
- .tADL_min = 200000,
- .tALH_min = 20000,
- .tALS_min = 50000,
- .tAR_min = 25000,
- .tCEA_max = 100000,
- .tCEH_min = 20000,
- .tCH_min = 20000,
- .tCHZ_max = 100000,
- .tCLH_min = 20000,
- .tCLR_min = 20000,
- .tCLS_min = 50000,
- .tCOH_min = 0,
- .tCS_min = 70000,
- .tDH_min = 20000,
- .tDS_min = 40000,
- .tFEAT_max = 1000000,
- .tIR_min = 10000,
- .tITC_max = 1000000,
- .tRC_min = 100000,
- .tREA_max = 40000,
- .tREH_min = 30000,
- .tRHOH_min = 0,
- .tRHW_min = 200000,
- .tRHZ_max = 200000,
- .tRLOH_min = 0,
- .tRP_min = 50000,
- .tRST_max = 250000000000ULL,
- .tWB_max = 200000,
- .tRR_min = 40000,
- .tWC_min = 100000,
- .tWH_min = 30000,
- .tWHR_min = 120000,
- .tWP_min = 50000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRR_min = 40000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
},
/* Mode 1 */
{
- .tADL_min = 100000,
- .tALH_min = 10000,
- .tALS_min = 25000,
- .tAR_min = 10000,
- .tCEA_max = 45000,
- .tCEH_min = 20000,
- .tCH_min = 10000,
- .tCHZ_max = 50000,
- .tCLH_min = 10000,
- .tCLR_min = 10000,
- .tCLS_min = 25000,
- .tCOH_min = 15000,
- .tCS_min = 35000,
- .tDH_min = 10000,
- .tDS_min = 20000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 50000,
- .tREA_max = 30000,
- .tREH_min = 15000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRP_min = 25000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 45000,
- .tWH_min = 15000,
- .tWHR_min = 80000,
- .tWP_min = 25000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
},
/* Mode 2 */
{
- .tADL_min = 100000,
- .tALH_min = 10000,
- .tALS_min = 15000,
- .tAR_min = 10000,
- .tCEA_max = 30000,
- .tCEH_min = 20000,
- .tCH_min = 10000,
- .tCHZ_max = 50000,
- .tCLH_min = 10000,
- .tCLR_min = 10000,
- .tCLS_min = 15000,
- .tCOH_min = 15000,
- .tCS_min = 25000,
- .tDH_min = 5000,
- .tDS_min = 15000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 35000,
- .tREA_max = 25000,
- .tREH_min = 15000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tRP_min = 17000,
- .tWC_min = 35000,
- .tWH_min = 15000,
- .tWHR_min = 80000,
- .tWP_min = 17000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
},
/* Mode 3 */
{
- .tADL_min = 100000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 50000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 25000,
- .tDH_min = 5000,
- .tDS_min = 10000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 30000,
- .tREA_max = 20000,
- .tREH_min = 10000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRP_min = 15000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 30000,
- .tWH_min = 10000,
- .tWHR_min = 80000,
- .tWP_min = 15000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
},
/* Mode 4 */
{
- .tADL_min = 70000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 30000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 20000,
- .tDH_min = 5000,
- .tDS_min = 10000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 25000,
- .tREA_max = 20000,
- .tREH_min = 10000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 5000,
- .tRP_min = 12000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 25000,
- .tWH_min = 10000,
- .tWHR_min = 80000,
- .tWP_min = 12000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
},
/* Mode 5 */
{
- .tADL_min = 70000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 30000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 15000,
- .tDH_min = 5000,
- .tDS_min = 7000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 20000,
- .tREA_max = 16000,
- .tREH_min = 7000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 5000,
- .tRP_min = 10000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 20000,
- .tWH_min = 7000,
- .tWHR_min = 80000,
- .tWP_min = 10000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
},
};
@@ -248,6 +266,46 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
return ERR_PTR(-EINVAL);
- return &onfi_sdr_timings[mode];
+ return &onfi_sdr_timings[mode].timings.sdr;
}
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
+
+/**
+ * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * given ONFI mode
+ * @iface: The data interface to be initialized
+ * @mode: The ONFI timing mode
+ */
+int onfi_init_data_interface(struct nand_chip *chip,
+ struct nand_data_interface *iface,
+ enum nand_data_interface_type type,
+ int timing_mode)
+{
+ if (type != NAND_SDR_IFACE)
+ return -EINVAL;
+
+ if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
+ return -EINVAL;
+
+ *iface = onfi_sdr_timings[timing_mode];
+
+ /*
+ * TODO: initialize timings that cannot be deduced from timing mode:
+ * tR, tPROG, tCCS, ...
+ * These information are part of the ONFI parameter page.
+ */
+
+ return 0;
+}
+EXPORT_SYMBOL(onfi_init_data_interface);
+
+/**
+ * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
+ * data interface for mode 0. This is used as default timing after
+ * reset.
+ */
+const struct nand_data_interface *nand_get_default_data_interface(void)
+{
+ return &onfi_sdr_timings[0];
+}
+EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 218c789ca7ab..28e6118362f7 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -218,8 +218,7 @@ static int ndfc_probe(struct platform_device *ofdev)
ndfc = &ndfc_ctrl[cs];
ndfc->chip_select = cs;
- spin_lock_init(&ndfc->ndfc_control.lock);
- init_waitqueue_head(&ndfc->ndfc_control.wq);
+ nand_hw_control_init(&ndfc->ndfc_control);
ndfc->ofdev = ofdev;
dev_set_drvdata(&ofdev->dev, ndfc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 436dd6dc11f4..b121bf4ed73a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1810,8 +1810,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
chip->cmdfunc = nand_cmdfunc;
}
- spin_lock_init(&chip->controller->lock);
- init_waitqueue_head(&chip->controller->wq);
+ nand_hw_control_init(chip->controller);
info->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(&pdev->dev, "failed to get nand clock\n");
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index de7d28e62d4e..57d483ac5765 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -1957,8 +1957,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
INIT_LIST_HEAD(&nandc->desc_list);
INIT_LIST_HEAD(&nandc->host_list);
- spin_lock_init(&nandc->controller.lock);
- init_waitqueue_head(&nandc->controller.wq);
+ nand_hw_control_init(&nandc->controller);
return 0;
}
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d9309cf0ce2e..d459c19d78de 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -180,7 +180,7 @@ struct s3c2410_nand_info {
enum s3c_cpu_type cpu_type;
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
struct notifier_block freq_transition;
#endif
};
@@ -701,7 +701,7 @@ static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
/* cpufreq driver support */
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
@@ -977,8 +977,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- spin_lock_init(&info->controller.lock);
- init_waitqueue_head(&info->controller.wq);
+ nand_hw_control_init(&info->controller);
/* get the clock source and enable it */
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 6fa3bcd59769..442ce619b3b6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -397,7 +397,7 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
struct dma_chan *chan;
enum dma_transfer_direction tr_dir;
dma_addr_t dma_addr;
- dma_cookie_t cookie = -EINVAL;
+ dma_cookie_t cookie;
uint32_t reg;
int ret;
@@ -423,6 +423,12 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
desc->callback = flctl_dma_complete;
desc->callback_param = flctl;
cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ ret = dma_submit_error(cookie);
+ dev_warn(&flctl->pdev->dev,
+ "DMA submit failed, falling back to PIO\n");
+ goto out;
+ }
dma_async_issue_pending(chan);
} else {
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index e414b31b71c1..8b8470c4e6d0 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1572,14 +1572,22 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
#define sunxi_nand_lookup_timing(l, p, c) \
_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
-static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
- const struct nand_sdr_timings *timings)
+static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
+ const struct nand_data_interface *conf,
+ bool check_only)
{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
+ const struct nand_sdr_timings *timings;
u32 min_clk_period = 0;
s32 tWB, tADL, tWHR, tRHW, tCAD;
long real_clk_rate;
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return -ENOTSUPP;
+
/* T1 <=> tCLS */
if (timings->tCLS_min > min_clk_period)
min_clk_period = timings->tCLS_min;
@@ -1679,6 +1687,9 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
return tRHW;
}
+ if (check_only)
+ return 0;
+
/*
* TODO: according to ONFI specs this value only applies for DDR NAND,
* but Allwinner seems to set this to 0x7. Mimic them for now.
@@ -1712,44 +1723,6 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
return 0;
}
-static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
- struct device_node *np)
-{
- struct mtd_info *mtd = nand_to_mtd(&chip->nand);
- const struct nand_sdr_timings *timings;
- int ret;
- int mode;
-
- mode = onfi_get_async_timing_mode(&chip->nand);
- if (mode == ONFI_TIMING_MODE_UNKNOWN) {
- mode = chip->nand.onfi_timing_mode_default;
- } else {
- uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
- int i;
-
- mode = fls(mode) - 1;
- if (mode < 0)
- mode = 0;
-
- feature[0] = mode;
- for (i = 0; i < chip->nsels; i++) {
- chip->nand.select_chip(mtd, i);
- ret = chip->nand.onfi_set_features(mtd, &chip->nand,
- ONFI_FEATURE_ADDR_TIMING_MODE,
- feature);
- chip->nand.select_chip(mtd, -1);
- if (ret)
- return ret;
- }
- }
-
- timings = onfi_async_timing_mode_to_sdr_timings(mode);
- if (IS_ERR(timings))
- return PTR_ERR(timings);
-
- return sunxi_nand_chip_set_timings(chip, timings);
-}
-
static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
{
@@ -1814,6 +1787,35 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
int ret;
int i;
+ if (ecc->options & NAND_ECC_MAXIMIZE) {
+ int bytes;
+
+ ecc->size = 1024;
+ nsectors = mtd->writesize / ecc->size;
+
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / nsectors;
+
+ /* 4 non-ECC bytes are added before each ECC bytes section */
+ bytes -= 4;
+
+ /* and bytes has to be even. */
+ if (bytes % 2)
+ bytes--;
+
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
+
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (strengths[i] > ecc->strength)
+ break;
+ }
+
+ if (!i)
+ ecc->strength = 0;
+ else
+ ecc->strength = strengths[i - 1];
+ }
+
if (ecc->size != 512 && ecc->size != 1024)
return -EINVAL;
@@ -1975,7 +1977,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
struct device_node *np)
{
- const struct nand_sdr_timings *timings;
struct sunxi_nand_chip *chip;
struct mtd_info *mtd;
struct nand_chip *nand;
@@ -2065,25 +2066,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
nand->read_buf = sunxi_nfc_read_buf;
nand->write_buf = sunxi_nfc_write_buf;
nand->read_byte = sunxi_nfc_read_byte;
+ nand->setup_data_interface = sunxi_nfc_setup_data_interface;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
- timings = onfi_async_timing_mode_to_sdr_timings(0);
- if (IS_ERR(timings)) {
- ret = PTR_ERR(timings);
- dev_err(dev,
- "could not retrieve timings for ONFI mode 0: %d\n",
- ret);
- return ret;
- }
-
- ret = sunxi_nand_chip_set_timings(chip, timings);
- if (ret) {
- dev_err(dev, "could not configure chip timings: %d\n", ret);
- return ret;
- }
-
ret = nand_scan_ident(mtd, nsels, NULL);
if (ret)
return ret;
@@ -2096,12 +2083,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
nand->options |= NAND_SUBPAGE_READ;
- ret = sunxi_nand_chip_init_timings(chip, np);
- if (ret) {
- dev_err(dev, "could not configure chip timings: %d\n", ret);
- return ret;
- }
-
ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
if (ret) {
dev_err(dev, "ECC init failed: %d\n", ret);
@@ -2175,8 +2156,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
return -ENOMEM;
nfc->dev = dev;
- spin_lock_init(&nfc->controller.lock);
- init_waitqueue_head(&nfc->controller.wq);
+ nand_hw_control_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 04d63f56baa4..0a14fda2e41b 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -303,8 +303,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
(gbusclk + 500000) / 1000000, hold, spw);
- spin_lock_init(&drvdata->hw_control.lock);
- init_waitqueue_head(&drvdata->hw_control.wq);
+ nand_hw_control_init(&drvdata->hw_control);
platform_set_drvdata(dev, drvdata);
txx9ndfmc_initialize(dev);
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 903becd31410..93ceea4f27d5 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -91,9 +91,132 @@
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
-/* Temporary variables used during scanning */
-static struct ubi_ec_hdr *ech;
-static struct ubi_vid_hdr *vidh;
+#define AV_FIND BIT(0)
+#define AV_ADD BIT(1)
+#define AV_FIND_OR_ADD (AV_FIND | AV_ADD)
+
+/**
+ * find_or_add_av - internal function to find a volume, add a volume or do
+ * both (find and add if missing).
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the
+ * expected operation. If only %AV_ADD is set, -EEXIST is returned
+ * if the volume already exists. If only %AV_FIND is set, NULL is
+ * returned if the volume does not exist. And if both flags are
+ * set, the helper first tries to find an existing volume, and if
+ * it does not exist it creates a new one.
+ * @created: in value used to inform the caller whether it"s a newly created
+ * volume or not.
+ *
+ * This function returns a pointer to a volume description or an ERR_PTR if
+ * the operation failed. It can also return NULL if only %AV_FIND is set and
+ * the volume does not exist.
+ */
+static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai,
+ int vol_id, unsigned int flags,
+ bool *created)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id) {
+ *created = false;
+
+ if (!(flags & AV_FIND))
+ return ERR_PTR(-EEXIST);
+
+ return av;
+ }
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ if (!(flags & AV_ADD))
+ return NULL;
+
+ /* The volume is absent - add it */
+ av = kzalloc(sizeof(*av), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->vol_id = vol_id;
+
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ *created = true;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_find_or_add_av - search for a volume in the attaching information and
+ * add one if it does not exist.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ * @created: whether the volume has been created or not
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai,
+ int vol_id, bool *created)
+{
+ return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created);
+}
+
+/**
+ * ubi_alloc_aeb - allocate an aeb element
+ * @ai: attaching information
+ * @pnum: physical eraseblock number
+ * @ec: erase counter of the physical eraseblock
+ *
+ * Allocate an aeb object and initialize the pnum and ec information.
+ * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are
+ * initialized to zero.
+ * Note that the element is not added in any list or RB tree.
+ */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+ int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return NULL;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->vol_id = UBI_UNKNOWN;
+ aeb->lnum = UBI_UNKNOWN;
+
+ return aeb;
+}
+
+/**
+ * ubi_free_aeb - free an aeb element
+ * @ai: attaching information
+ * @aeb: the element to free
+ *
+ * Free an aeb object. The caller must have removed the element from any list
+ * or RB tree.
+ */
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb)
+{
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+}
/**
* add_to_list - add physical eraseblock to a list.
@@ -131,14 +254,12 @@ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
} else
BUG();
- aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
- aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
- aeb->ec = ec;
if (to_head)
list_add(&aeb->u.list, list);
else
@@ -163,13 +284,11 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
ai->corr_peb_count += 1;
- aeb->pnum = pnum;
- aeb->ec = ec;
list_add(&aeb->u.list, &ai->corr);
return 0;
}
@@ -192,14 +311,12 @@ static int add_fastmap(struct ubi_attach_info *ai, int pnum,
{
struct ubi_ainf_peb *aeb;
- aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
- aeb->pnum = pnum;
- aeb->vol_id = be32_to_cpu(vidh->vol_id);
- aeb->sqnum = be64_to_cpu(vidh->sqnum);
- aeb->ec = ec;
+ aeb->vol_id = be32_to_cpu(vid_hdr->vol_id);
+ aeb->sqnum = be64_to_cpu(vid_hdr->sqnum);
list_add(&aeb->u.list, &ai->fastmap);
dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
@@ -294,44 +411,20 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
const struct ubi_vid_hdr *vid_hdr)
{
struct ubi_ainf_volume *av;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ bool created;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
- /* Walk the volume RB-tree to look if this volume is already present */
- while (*p) {
- parent = *p;
- av = rb_entry(parent, struct ubi_ainf_volume, rb);
-
- if (vol_id == av->vol_id)
- return av;
-
- if (vol_id > av->vol_id)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
-
- /* The volume is absent - add it */
- av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
- if (!av)
- return ERR_PTR(-ENOMEM);
+ av = ubi_find_or_add_av(ai, vol_id, &created);
+ if (IS_ERR(av) || !created)
+ return av;
- av->highest_lnum = av->leb_count = 0;
- av->vol_id = vol_id;
- av->root = RB_ROOT;
av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
av->data_pad = be32_to_cpu(vid_hdr->data_pad);
av->compat = vid_hdr->compat;
av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
- if (vol_id > ai->highest_vol_id)
- ai->highest_vol_id = vol_id;
- rb_link_node(&av->rb, parent, p);
- rb_insert_color(&av->rb, &ai->volumes);
- ai->vols_found += 1;
- dbg_bld("added volume %d", vol_id);
return av;
}
@@ -360,7 +453,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
{
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
- struct ubi_vid_hdr *vh = NULL;
+ struct ubi_vid_io_buf *vidb = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (sqnum2 == aeb->sqnum) {
@@ -403,12 +496,12 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
return bitflips << 1;
}
- vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vh)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vidb)
return -ENOMEM;
pnum = aeb->pnum;
- err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
@@ -422,7 +515,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
}
}
- vid_hdr = vh;
+ vid_hdr = ubi_get_vid_hdr(vidb);
}
/* Read the data of the copy and check the CRC */
@@ -448,7 +541,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
}
mutex_unlock(&ubi->buf_mutex);
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vidb);
if (second_is_newer)
dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
@@ -460,7 +553,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_free_vidh:
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vidb);
return err;
}
@@ -605,12 +698,10 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
if (err)
return err;
- aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
- aeb->ec = ec;
- aeb->pnum = pnum;
aeb->vol_id = vol_id;
aeb->lnum = lnum;
aeb->scrub = bitflips;
@@ -629,6 +720,21 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
}
/**
+ * ubi_add_av - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the new volume description or an
+ * ERR_PTR if the operation failed.
+ */
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id)
+{
+ bool created;
+
+ return find_or_add_av(ai, vol_id, AV_ADD, &created);
+}
+
+/**
* ubi_find_av - find volume in the attaching information.
* @ai: attaching information
* @vol_id: the requested volume ID
@@ -639,24 +745,15 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id)
{
- struct ubi_ainf_volume *av;
- struct rb_node *p = ai->volumes.rb_node;
-
- while (p) {
- av = rb_entry(p, struct ubi_ainf_volume, rb);
-
- if (vol_id == av->vol_id)
- return av;
-
- if (vol_id > av->vol_id)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
+ bool created;
- return NULL;
+ return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND,
+ &created);
}
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+ struct list_head *list);
+
/**
* ubi_remove_av - delete attaching information about a volume.
* @ai: attaching information
@@ -664,19 +761,10 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
*/
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
- struct rb_node *rb;
- struct ubi_ainf_peb *aeb;
-
dbg_bld("remove attaching information about volume %d", av->vol_id);
- while ((rb = rb_first(&av->root))) {
- aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
- rb_erase(&aeb->u.rb, &av->root);
- list_add_tail(&aeb->u.list, &ai->erase);
- }
-
rb_erase(&av->rb, &ai->volumes);
- kfree(av);
+ destroy_av(ai, av, &ai->erase);
ai->vols_found -= 1;
}
@@ -866,6 +954,9 @@ static bool vol_ignored(int vol_id)
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
int pnum, bool fast)
{
+ struct ubi_ec_hdr *ech = ai->ech;
+ struct ubi_vid_io_buf *vidb = ai->vidb;
+ struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
@@ -963,7 +1054,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* OK, we've done with the EC header, let's look at the VID header */
- err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0);
if (err < 0)
return err;
switch (err) {
@@ -1191,10 +1282,12 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
+ * @list: put the aeb elements in there if !NULL, otherwise free them
*
* This function destroys the volume attaching information.
*/
-static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av,
+ struct list_head *list)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
@@ -1214,7 +1307,10 @@ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
this->rb_right = NULL;
}
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ if (list)
+ list_add_tail(&aeb->u.list, list);
+ else
+ ubi_free_aeb(ai, aeb);
}
}
kfree(av);
@@ -1232,23 +1328,23 @@ static void destroy_ai(struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
list_del(&aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
}
/* Destroy the volume RB-tree */
@@ -1269,7 +1365,7 @@ static void destroy_ai(struct ubi_attach_info *ai)
rb->rb_right = NULL;
}
- destroy_av(ai, av);
+ destroy_av(ai, av, NULL);
}
}
@@ -1297,12 +1393,12 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
err = -ENOMEM;
- ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ech)
+ ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ai->ech)
return err;
- vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vidh)
+ ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!ai->vidb)
goto out_ech;
for (pnum = start; pnum < ubi->peb_count; pnum++) {
@@ -1351,15 +1447,15 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err)
goto out_vidh;
- ubi_free_vid_hdr(ubi, vidh);
- kfree(ech);
+ ubi_free_vid_buf(ai->vidb);
+ kfree(ai->ech);
return 0;
out_vidh:
- ubi_free_vid_hdr(ubi, vidh);
+ ubi_free_vid_buf(ai->vidb);
out_ech:
- kfree(ech);
+ kfree(ai->ech);
return err;
}
@@ -1411,12 +1507,12 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
if (!scan_ai)
goto out;
- ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
- if (!ech)
+ scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!scan_ai->ech)
goto out_ai;
- vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vidh)
+ scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!scan_ai->vidb)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
@@ -1428,8 +1524,8 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
goto out_vidh;
}
- ubi_free_vid_hdr(ubi, vidh);
- kfree(ech);
+ ubi_free_vid_buf(scan_ai->vidb);
+ kfree(scan_ai->ech);
if (scan_ai->force_full_scan)
err = UBI_NO_FASTMAP;
@@ -1449,9 +1545,9 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
return err;
out_vidh:
- ubi_free_vid_hdr(ubi, vidh);
+ ubi_free_vid_buf(scan_ai->vidb);
out_ech:
- kfree(ech);
+ kfree(scan_ai->ech);
out_ai:
destroy_ai(scan_ai);
out:
@@ -1573,6 +1669,8 @@ out_ai:
*/
static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
+ struct ubi_vid_io_buf *vidb = ai->vidb;
+ struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb);
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
@@ -1708,7 +1806,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
last_aeb = aeb;
- err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err(ubi, "VID header is not OK (%d)",
err);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index ebf46ad2d513..d1e6931c132f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -351,7 +351,6 @@ static int ubiblock_init_request(void *data, struct request *req,
static struct blk_mq_ops ubiblock_mq_ops = {
.queue_rq = ubiblock_queue_rq,
.init_request = ubiblock_init_request,
- .map_queue = blk_mq_map_queue,
};
static DEFINE_IDR(ubiblock_minor_idr);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0680516bb472..85d54f37e28f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -574,7 +574,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
for (i = ubi->vtbl_slots;
i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- kfree(ubi->volumes[i]->eba_tbl);
+ ubi_eba_replace_table(ubi->volumes[i], NULL);
kfree(ubi->volumes[i]);
}
}
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index ee2b74d1d1b5..45c329694a5e 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -416,7 +416,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
}
rsvd_bytes = (long long)vol->reserved_pebs *
- ubi->leb_size-vol->data_pad;
+ vol->usable_leb_size;
if (bytes < 0 || bytes > rsvd_bytes) {
err = -EINVAL;
break;
@@ -454,7 +454,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */
err = -EINVAL;
- if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+ if (!ubi_leb_valid(vol, req.lnum) ||
req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;
@@ -485,7 +485,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- if (lnum < 0 || lnum >= vol->reserved_pebs) {
+ if (!ubi_leb_valid(vol, lnum)) {
err = -EINVAL;
break;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ebf517271d29..388e46be6ad9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -50,6 +50,30 @@
#define EBA_RESERVED_PEBS 1
/**
+ * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
+ * @pnum: the physical eraseblock number attached to the LEB
+ *
+ * This structure is encoding a LEB -> PEB association. Note that the LEB
+ * number is not stored here, because it is the index used to access the
+ * entries table.
+ */
+struct ubi_eba_entry {
+ int pnum;
+};
+
+/**
+ * struct ubi_eba_table - LEB -> PEB association information
+ * @entries: the LEB to PEB mapping (one entry per LEB).
+ *
+ * This structure is private to the EBA logic and should be kept here.
+ * It is encoding the LEB to PEB association table, and is subject to
+ * changes.
+ */
+struct ubi_eba_table {
+ struct ubi_eba_entry *entries;
+};
+
+/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
@@ -84,6 +108,110 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
}
/**
+ * ubi_eba_get_ldesc - get information about a LEB
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @ldesc: the LEB descriptor to fill
+ *
+ * Used to query information about a specific LEB.
+ * It is currently only returning the physical position of the LEB, but will be
+ * extended to provide more information.
+ */
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+ struct ubi_eba_leb_desc *ldesc)
+{
+ ldesc->lnum = lnum;
+ ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
+}
+
+/**
+ * ubi_eba_create_table - allocate a new EBA table and initialize it with all
+ * LEBs unmapped
+ * @vol: volume containing the EBA table to copy
+ * @nentries: number of entries in the table
+ *
+ * Allocate a new EBA table and initialize it with all LEBs unmapped.
+ * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
+ */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+ int nentries)
+{
+ struct ubi_eba_table *tbl;
+ int err = -ENOMEM;
+ int i;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return ERR_PTR(-ENOMEM);
+
+ tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
+ GFP_KERNEL);
+ if (!tbl->entries)
+ goto err;
+
+ for (i = 0; i < nentries; i++)
+ tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
+
+ return tbl;
+
+err:
+ kfree(tbl->entries);
+ kfree(tbl);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * ubi_eba_destroy_table - destroy an EBA table
+ * @tbl: the table to destroy
+ *
+ * Destroy an EBA table.
+ */
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
+{
+ if (!tbl)
+ return;
+
+ kfree(tbl->entries);
+ kfree(tbl);
+}
+
+/**
+ * ubi_eba_copy_table - copy the EBA table attached to vol into another table
+ * @vol: volume containing the EBA table to copy
+ * @dst: destination
+ * @nentries: number of entries to copy
+ *
+ * Copy the EBA table stored in vol into the one pointed by dst.
+ */
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+ int nentries)
+{
+ struct ubi_eba_table *src;
+ int i;
+
+ ubi_assert(dst && vol && vol->eba_tbl);
+
+ src = vol->eba_tbl;
+
+ for (i = 0; i < nentries; i++)
+ dst->entries[i].pnum = src->entries[i].pnum;
+}
+
+/**
+ * ubi_eba_replace_table - assign a new EBA table to a volume
+ * @vol: volume containing the EBA table to copy
+ * @tbl: new EBA table
+ *
+ * Assign a new EBA table to the volume and release the old one.
+ */
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
+{
+ ubi_eba_destroy_table(vol->eba_tbl);
+ vol->eba_tbl = tbl;
+}
+
+/**
* ltree_lookup - look up the lock tree.
* @ubi: UBI device description object
* @vol_id: volume ID
@@ -312,6 +440,18 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
+ * ubi_eba_is_mapped - check if a LEB is mapped.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function returns true if the LEB is mapped, false otherwise.
+ */
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
+{
+ return vol->eba_tbl->entries[lnum].pnum >= 0;
+}
+
+/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
* @vol: volume description object
@@ -333,7 +473,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
return err;
- pnum = vol->eba_tbl[lnum];
+ pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0)
/* This logical eraseblock is already unmapped */
goto out_unlock;
@@ -341,7 +481,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
down_read(&ubi->fm_eba_sem);
- vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_eba_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
@@ -373,6 +513,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check)
{
int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t uninitialized_var(crc);
@@ -380,7 +521,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err)
return err;
- pnum = vol->eba_tbl[lnum];
+ pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum < 0) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
@@ -403,13 +544,15 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
retry:
if (check) {
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr) {
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb) {
err = -ENOMEM;
goto out_unlock;
}
- err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0) {
/*
@@ -455,7 +598,7 @@ retry:
ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
crc = be32_to_cpu(vid_hdr->data_crc);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
}
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
@@ -492,7 +635,7 @@ retry:
return err;
out_free:
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
out_unlock:
leb_read_unlock(ubi, vol_id, lnum);
return err;
@@ -554,52 +697,51 @@ int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
}
/**
- * recover_peb - recover from write failure.
- * @ubi: UBI device description object
+ * try_recover_peb - try to recover from write failure.
+ * @vol: volume description object
* @pnum: the physical eraseblock to recover
- * @vol_id: volume ID
* @lnum: logical eraseblock number
* @buf: data which was not written because of the write failure
* @offset: offset of the failed write
* @len: how many bytes should have been written
+ * @vidb: VID buffer
+ * @retry: whether the caller should retry in case of failure
*
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
- * Returns new physical eraseblock number in case of success, and a negative
- * error code in case of failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * In case of failure, the %retry parameter is set to false if this is a fatal
+ * error (retrying won't help), and true otherwise.
*/
-static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
- const void *buf, int offset, int len)
+static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
+ const void *buf, int offset, int len,
+ struct ubi_vid_io_buf *vidb, bool *retry)
{
- int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
- struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_device *ubi = vol->ubi;
struct ubi_vid_hdr *vid_hdr;
+ int new_pnum, err, vol_id = vol->vol_id, data_size;
uint32_t crc;
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr)
- return -ENOMEM;
+ *retry = false;
-retry:
new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- up_read(&ubi->fm_eba_sem);
- return new_pnum;
+ err = new_pnum;
+ goto out_put;
}
ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
pnum, new_pnum);
- err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
- up_read(&ubi->fm_eba_sem);
goto out_put;
}
+ vid_hdr = ubi_get_vid_hdr(vidb);
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
@@ -608,12 +750,12 @@ retry:
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
- if (err && err != UBI_IO_BITFLIPS) {
- up_read(&ubi->fm_eba_sem);
+ if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
- }
}
+ *retry = true;
+
memcpy(ubi->peb_buf + offset, buf, len);
data_size = offset + len;
@@ -622,50 +764,140 @@ retry:
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
- if (err) {
- mutex_unlock(&ubi->buf_mutex);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
+ if (err)
+ goto out_unlock;
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
- if (err) {
- mutex_unlock(&ubi->buf_mutex);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+out_unlock:
mutex_unlock(&ubi->buf_mutex);
- ubi_free_vid_hdr(ubi, vid_hdr);
- vol->eba_tbl[lnum] = new_pnum;
+ if (!err)
+ vol->eba_tbl->entries[lnum].pnum = new_pnum;
+
+out_put:
up_read(&ubi->fm_eba_sem);
- ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- ubi_msg(ubi, "data was successfully recovered");
- return 0;
+ if (!err) {
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ ubi_msg(ubi, "data was successfully recovered");
+ } else if (new_pnum >= 0) {
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's
+ * try to get another one.
+ */
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
+ }
-out_unlock:
- mutex_unlock(&ubi->buf_mutex);
-out_put:
- ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
- ubi_free_vid_hdr(ubi, vid_hdr);
return err;
+}
-write_error:
- /*
- * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
- * get another one.
- */
- ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
- if (++tries > UBI_IO_RETRIES) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
+/**
+ * recover_peb - recover from write failure.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to recover
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns 0 in case of success, and a negative error code in case of failure.
+ * This function tries %UBI_IO_RETRIES before giving up.
+ */
+static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, idx = vol_id2idx(ubi, vol_id), tries;
+ struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_vid_io_buf *vidb;
+
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
+ return -ENOMEM;
+
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ bool retry;
+
+ err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
+ &retry);
+ if (!err || !retry)
+ break;
+
+ ubi_msg(ubi, "try again");
}
- ubi_msg(ubi, "try again");
- goto retry;
+
+ ubi_free_vid_buf(vidb);
+
+ return err;
+}
+
+/**
+ * try_write_vid_and_data - try to write VID header and data to a new PEB.
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @vidb: the VID buffer to write
+ * @buf: buffer containing the data
+ * @offset: where to start writing data
+ * @len: how many bytes should be written
+ *
+ * This function tries to write VID header and data belonging to logical
+ * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
+ * in case of success and a negative error code in case of failure.
+ * In case of error, it is possible that something was still written to the
+ * flash media, but may be some garbage.
+ */
+static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ struct ubi_vid_io_buf *vidb, const void *buf,
+ int offset, int len)
+{
+ struct ubi_device *ubi = vol->ubi;
+ int pnum, opnum, err, vol_id = vol->vol_id;
+
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ err = pnum;
+ goto out_put;
+ }
+
+ opnum = vol->eba_tbl->entries[lnum].pnum;
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ goto out_put;
+ }
+
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi,
+ "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ goto out_put;
+ }
+ }
+
+ vol->eba_tbl->entries[lnum].pnum = pnum;
+
+out_put:
+ up_read(&ubi->fm_eba_sem);
+
+ if (err && pnum >= 0)
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ else if (!err && opnum >= 0)
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
+
+ return err;
}
/**
@@ -681,11 +913,13 @@ write_error:
* @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
+ * This function retries %UBI_IO_RETRIES times before giving up.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len)
{
- int err, pnum, tries = 0, vol_id = vol->vol_id;
+ int err, pnum, tries, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
@@ -695,7 +929,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err)
return err;
- pnum = vol->eba_tbl[lnum];
+ pnum = vol->eba_tbl->entries[lnum].pnum;
if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
@@ -706,23 +940,23 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
- if (err)
- ubi_ro_mode(ubi);
}
- leb_write_unlock(ubi, vol_id, lnum);
- return err;
+
+ goto out;
}
/*
* The logical eraseblock is not mapped. We have to get a free physical
* eraseblock and write the volume identifier header there first.
*/
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr) {
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb) {
leb_write_unlock(ubi, vol_id, lnum);
return -ENOMEM;
}
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -730,67 +964,30 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
-retry:
- pnum = ubi_wl_get_peb(ubi);
- if (pnum < 0) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- up_read(&ubi->fm_eba_sem);
- return pnum;
- }
-
- dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
- len, offset, vol_id, lnum, pnum);
-
- err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
- if (err) {
- ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
- vol_id, lnum, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
- if (len) {
- err = ubi_io_write_data(ubi, buf, pnum, offset, len);
- if (err) {
- ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
- len, offset, vol_id, lnum, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ /*
+ * Fortunately, this is the first write operation to this
+ * physical eraseblock, so just put it and request a new one.
+ * We assume that if this physical eraseblock went bad, the
+ * erase code will handle that.
+ */
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
}
- vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_eba_sem);
-
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ ubi_free_vid_buf(vidb);
-write_error:
- if (err != -EIO || !ubi->bad_allowed) {
+out:
+ if (err)
ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
- /*
- * Fortunately, this is the first write operation to this physical
- * eraseblock, so just put it and request a new one. We assume that if
- * this physical eraseblock went bad, the erase code will handle that.
- */
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- if (err || ++tries > UBI_IO_RETRIES) {
- ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
+ leb_write_unlock(ubi, vol_id, lnum);
- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg(ubi, "try another PEB");
- goto retry;
+ return err;
}
/**
@@ -818,7 +1015,8 @@ write_error:
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int used_ebs)
{
- int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
+ int err, tries, data_size = len, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@@ -831,15 +1029,15 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
else
ubi_assert(!(len & (ubi->min_io_size - 1)));
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
return -ENOMEM;
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
err = leb_write_lock(ubi, vol_id, lnum);
- if (err) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
+ if (err)
+ goto out;
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -853,66 +1051,26 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->used_ebs = cpu_to_be32(used_ebs);
vid_hdr->data_crc = cpu_to_be32(crc);
-retry:
- pnum = ubi_wl_get_peb(ubi);
- if (pnum < 0) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- up_read(&ubi->fm_eba_sem);
- return pnum;
- }
+ ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
- dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
- len, vol_id, lnum, pnum, used_ebs);
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
- err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
- if (err) {
- ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
- vol_id, lnum, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
}
- err = ubi_io_write_data(ubi, buf, pnum, 0, len);
- if (err) {
- ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
- len, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
-
- ubi_assert(vol->eba_tbl[lnum] < 0);
- vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_eba_sem);
+ if (err)
+ ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
-
-write_error:
- if (err != -EIO || !ubi->bad_allowed) {
- /*
- * This flash device does not admit of bad eraseblocks or
- * something nasty and unexpected happened. Switch to read-only
- * mode just in case.
- */
- ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- if (err || ++tries > UBI_IO_RETRIES) {
- ubi_ro_mode(ubi);
- leb_write_unlock(ubi, vol_id, lnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return err;
- }
+out:
+ ubi_free_vid_buf(vidb);
- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg(ubi, "try another PEB");
- goto retry;
+ return err;
}
/*
@@ -935,7 +1093,8 @@ write_error:
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len)
{
- int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
+ int err, tries, vol_id = vol->vol_id;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@@ -953,10 +1112,12 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
return -ENOMEM;
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
@@ -974,70 +1135,31 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->copy_flag = 1;
vid_hdr->data_crc = cpu_to_be32(crc);
-retry:
- pnum = ubi_wl_get_peb(ubi);
- if (pnum < 0) {
- err = pnum;
- up_read(&ubi->fm_eba_sem);
- goto out_leb_unlock;
- }
-
- dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
- vol_id, lnum, vol->eba_tbl[lnum], pnum);
+ dbg_eba("change LEB %d:%d", vol_id, lnum);
- err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
- if (err) {
- ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
- vol_id, lnum, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
- }
+ for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
+ err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
+ if (err != -EIO || !ubi->bad_allowed)
+ break;
- err = ubi_io_write_data(ubi, buf, pnum, 0, len);
- if (err) {
- ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
- len, pnum);
- up_read(&ubi->fm_eba_sem);
- goto write_error;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
}
- old_pnum = vol->eba_tbl[lnum];
- vol->eba_tbl[lnum] = pnum;
- up_read(&ubi->fm_eba_sem);
-
- if (old_pnum >= 0) {
- err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
- if (err)
- goto out_leb_unlock;
- }
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ if (err)
+ ubi_ro_mode(ubi);
-out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
+
out_mutex:
mutex_unlock(&ubi->alc_mutex);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
return err;
-
-write_error:
- if (err != -EIO || !ubi->bad_allowed) {
- /*
- * This flash device does not admit of bad eraseblocks or
- * something nasty and unexpected happened. Switch to read-only
- * mode just in case.
- */
- ubi_ro_mode(ubi);
- goto out_leb_unlock;
- }
-
- err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
- if (err || ++tries > UBI_IO_RETRIES) {
- ubi_ro_mode(ubi);
- goto out_leb_unlock;
- }
-
- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ubi_msg(ubi, "try another PEB");
- goto retry;
}
/**
@@ -1082,12 +1204,15 @@ static int is_error_sane(int err)
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
- struct ubi_vid_hdr *vid_hdr)
+ struct ubi_vid_io_buf *vidb)
{
int err, vol_id, lnum, data_size, aldata_size, idx;
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
struct ubi_volume *vol;
uint32_t crc;
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
@@ -1142,9 +1267,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* probably waiting on @ubi->move_mutex. No need to continue the work,
* cancel it.
*/
- if (vol->eba_tbl[lnum] != from) {
+ if (vol->eba_tbl->entries[lnum].pnum != from) {
dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
- vol_id, lnum, from, vol->eba_tbl[lnum]);
+ vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
@@ -1196,7 +1321,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, to, vidb);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
@@ -1206,7 +1331,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
/* Read the VID header back and check if it was written correctly */
- err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
+ err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
@@ -1229,10 +1354,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
}
- ubi_assert(vol->eba_tbl[lnum] == from);
- down_read(&ubi->fm_eba_sem);
- vol->eba_tbl[lnum] = to;
- up_read(&ubi->fm_eba_sem);
+ ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+ vol->eba_tbl->entries[lnum].pnum = to;
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
@@ -1388,7 +1511,7 @@ out_free:
*/
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- int i, j, err, num_volumes;
+ int i, err, num_volumes;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
struct ubi_ainf_peb *aeb;
@@ -1404,35 +1527,39 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
+ struct ubi_eba_table *tbl;
+
vol = ubi->volumes[i];
if (!vol)
continue;
cond_resched();
- vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
- GFP_KERNEL);
- if (!vol->eba_tbl) {
- err = -ENOMEM;
+ tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+ if (IS_ERR(tbl)) {
+ err = PTR_ERR(tbl);
goto out_free;
}
- for (j = 0; j < vol->reserved_pebs; j++)
- vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
+ ubi_eba_replace_table(vol, tbl);
av = ubi_find_av(ai, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
- if (aeb->lnum >= vol->reserved_pebs)
+ if (aeb->lnum >= vol->reserved_pebs) {
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
- else
- vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ } else {
+ struct ubi_eba_entry *entry;
+
+ entry = &vol->eba_tbl->entries[aeb->lnum];
+ entry->pnum = aeb->pnum;
+ }
}
}
@@ -1469,8 +1596,7 @@ out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
- kfree(ubi->volumes[i]->eba_tbl);
- ubi->volumes[i]->eba_tbl = NULL;
+ ubi_eba_replace_table(ubi->volumes[i], NULL);
}
return err;
}
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 30d3999dddba..4f0bd6b4422a 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ __schedule_ubi_work(ubi, wrk);
return 0;
}
@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
- return schedule_erase(ubi, e, vol_id, lnum, torture);
+ return schedule_erase(ubi, e, vol_id, lnum, torture, true);
}
/**
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 48eb55f344eb..c1f5c29e458e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -110,21 +110,23 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
* Returns a new struct ubi_vid_hdr on success.
* NULL indicates out of memory.
*/
-static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
{
- struct ubi_vid_hdr *new;
+ struct ubi_vid_io_buf *new;
+ struct ubi_vid_hdr *vh;
- new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
if (!new)
goto out;
- new->vol_type = UBI_VID_DYNAMIC;
- new->vol_id = cpu_to_be32(vol_id);
+ vh = ubi_get_vid_hdr(new);
+ vh->vol_type = UBI_VID_DYNAMIC;
+ vh->vol_id = cpu_to_be32(vol_id);
/* UBI implementations without fastmap support have to delete the
* fastmap.
*/
- new->compat = UBI_COMPAT_DELETE;
+ vh->compat = UBI_COMPAT_DELETE;
out:
return new;
@@ -145,12 +147,10 @@ static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
{
struct ubi_ainf_peb *aeb;
- aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ aeb = ubi_alloc_aeb(ai, pnum, ec);
if (!aeb)
return -ENOMEM;
- aeb->pnum = pnum;
- aeb->ec = ec;
aeb->lnum = -1;
aeb->scrub = scrub;
aeb->copy_flag = aeb->sqnum = 0;
@@ -186,40 +186,19 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
int last_eb_bytes)
{
struct ubi_ainf_volume *av;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
-
- while (*p) {
- parent = *p;
- av = rb_entry(parent, struct ubi_ainf_volume, rb);
-
- if (vol_id > av->vol_id)
- p = &(*p)->rb_left;
- else if (vol_id < av->vol_id)
- p = &(*p)->rb_right;
- else
- return ERR_PTR(-EINVAL);
- }
- av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
- if (!av)
- goto out;
+ av = ubi_add_av(ai, vol_id);
+ if (IS_ERR(av))
+ return av;
- av->highest_lnum = av->leb_count = av->used_ebs = 0;
- av->vol_id = vol_id;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
- av->root = RB_ROOT;
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = used_ebs;
dbg_bld("found volume (ID %i)", vol_id);
-
- rb_link_node(&av->rb, parent, p);
- rb_insert_color(&av->rb, &ai->volumes);
-
-out:
return av;
}
@@ -297,7 +276,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ ubi_free_aeb(ai, new_aeb);
return 0;
}
@@ -308,13 +287,10 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* new_aeb is newer */
if (cmp_res & 1) {
- victim = kmem_cache_alloc(ai->aeb_slab_cache,
- GFP_KERNEL);
+ victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
- victim->ec = aeb->ec;
- victim->pnum = aeb->pnum;
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
@@ -328,7 +304,8 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ aeb->sqnum = new_aeb->sqnum;
+ ubi_free_aeb(ai, new_aeb);
/* new_aeb is older */
} else {
@@ -370,41 +347,24 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
- struct ubi_ainf_volume *av, *tmp_av = NULL;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
- int found = 0;
+ int vol_id = be32_to_cpu(new_vh->vol_id);
+ struct ubi_ainf_volume *av;
- if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
- be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
+ ubi_free_aeb(ai, new_aeb);
return 0;
}
/* Find the volume this SEB belongs to */
- while (*p) {
- parent = *p;
- tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
-
- if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
- p = &(*p)->rb_left;
- else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
- p = &(*p)->rb_right;
- else {
- found = 1;
- break;
- }
- }
-
- if (found)
- av = tmp_av;
- else {
+ av = ubi_find_av(ai, vol_id);
+ if (!av) {
ubi_err(ubi, "orphaned volume in fastmap pool!");
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ ubi_free_aeb(ai, new_aeb);
return UBI_BAD_FASTMAP;
}
- ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+ ubi_assert(vol_id == av->vol_id);
return update_vol(ubi, ai, av, new_vh, new_aeb);
}
@@ -423,16 +383,12 @@ static void unmap_peb(struct ubi_attach_info *ai, int pnum)
struct rb_node *node, *node2;
struct ubi_ainf_peb *aeb;
- for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
- av = rb_entry(node, struct ubi_ainf_volume, rb);
-
- for (node2 = rb_first(&av->root); node2;
- node2 = rb_next(node2)) {
- aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
av->leb_count--;
- kmem_cache_free(ai->aeb_slab_cache, aeb);
+ ubi_free_aeb(ai, aeb);
return;
}
}
@@ -455,6 +411,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *free)
{
+ struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_ainf_peb *new_aeb;
@@ -464,12 +421,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (!ech)
return -ENOMEM;
- vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vh) {
+ vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vb) {
kfree(ech);
return -ENOMEM;
}
+ vh = ubi_get_vid_hdr(vb);
+
dbg_bld("scanning fastmap pool: size = %i", pool_size);
/*
@@ -510,15 +469,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto out;
}
- err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
+
if (err == UBI_IO_FF_BITFLIPS)
- add_aeb(ai, free, pnum, ec, 1);
- else
- add_aeb(ai, free, pnum, ec, 0);
+ scrub = 1;
+
+ add_aeb(ai, free, pnum, ec, scrub);
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -526,15 +486,12 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err == UBI_IO_BITFLIPS)
scrub = 1;
- new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
- GFP_KERNEL);
+ new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
if (!new_aeb) {
ret = -ENOMEM;
goto out;
}
- new_aeb->ec = be64_to_cpu(ech->ec);
- new_aeb->pnum = pnum;
new_aeb->lnum = be32_to_cpu(vh->lnum);
new_aeb->sqnum = be64_to_cpu(vh->sqnum);
new_aeb->copy_flag = vh->copy_flag;
@@ -558,7 +515,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
out:
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vb);
kfree(ech);
return ret;
}
@@ -750,11 +707,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
- if (!av)
- goto fail_bad;
- if (PTR_ERR(av) == -EINVAL) {
- ubi_err(ubi, "volume (ID %i) already exists",
- fmvhdr->vol_id);
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
goto fail_bad;
}
@@ -841,11 +798,11 @@ fail_bad:
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
list_del(&tmp_aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ ubi_free_aeb(ai, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ ubi_free_aeb(ai, tmp_aeb);
}
return ret;
@@ -886,6 +843,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_io_buf *vb;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
@@ -919,7 +877,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto out;
}
- ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
if (ret && ret != UBI_IO_BITFLIPS)
goto free_fm_sb;
else if (ret == UBI_IO_BITFLIPS)
@@ -961,12 +919,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_fm_sb;
}
- vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vh) {
+ vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vb) {
ret = -ENOMEM;
goto free_hdr;
}
+ vh = ubi_get_vid_hdr(vb);
+
for (i = 0; i < used_blocks; i++) {
int image_seq;
@@ -1009,7 +969,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_hdr;
}
- ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum);
@@ -1037,8 +997,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
- ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
- ubi->leb_start, ubi->leb_size);
+ ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
+ pnum, 0, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
"err: %i)", i, pnum, ret);
@@ -1099,7 +1059,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi->fm_disabled = 0;
ubi->fast_attach = 1;
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vb);
kfree(ech);
out:
up_write(&ubi->fm_protect);
@@ -1108,7 +1068,7 @@ out:
return ret;
free_hdr:
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vb);
kfree(ech);
free_fm_sb:
kfree(fmsb);
@@ -1136,6 +1096,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fm_eba *feba;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
+ struct ubi_vid_io_buf *avbuf, *dvbuf;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
struct rb_node *tmp_rb;
@@ -1146,18 +1107,21 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
- avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
- if (!avhdr) {
+ avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avbuf) {
ret = -ENOMEM;
goto out;
}
- dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
- if (!dvhdr) {
+ dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvbuf) {
ret = -ENOMEM;
goto out_kfree;
}
+ avhdr = ubi_get_vid_hdr(avbuf);
+ dvhdr = ubi_get_vid_hdr(dvbuf);
+
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
@@ -1306,8 +1270,12 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
ubi_assert(fm_pos <= ubi->fm_size);
- for (j = 0; j < vol->reserved_pebs; j++)
- feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ struct ubi_eba_leb_desc ldesc;
+
+ ubi_eba_get_ldesc(vol, j, &ldesc);
+ feba->pnum[j] = cpu_to_be32(ldesc.pnum);
+ }
feba->reserved_pebs = cpu_to_be32(j);
feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
@@ -1322,7 +1290,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
spin_unlock(&ubi->volumes_lock);
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
- ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
goto out_kfree;
@@ -1343,7 +1311,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
- ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
@@ -1352,8 +1320,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
}
for (i = 0; i < new_fm->used_blocks; i++) {
- ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
- new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, 0, ubi->leb_size);
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
@@ -1368,8 +1336,8 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dbg_bld("fastmap written!");
out_kfree:
- ubi_free_vid_hdr(ubi, avhdr);
- ubi_free_vid_hdr(ubi, dvhdr);
+ ubi_free_vid_buf(avbuf);
+ ubi_free_vid_buf(dvbuf);
free_seen(seen_pebs);
out:
return ret;
@@ -1439,7 +1407,8 @@ static int invalidate_fastmap(struct ubi_device *ubi)
int ret;
struct ubi_fastmap_layout *fm;
struct ubi_wl_entry *e;
- struct ubi_vid_hdr *vh = NULL;
+ struct ubi_vid_io_buf *vb = NULL;
+ struct ubi_vid_hdr *vh;
if (!ubi->fm)
return 0;
@@ -1451,10 +1420,12 @@ static int invalidate_fastmap(struct ubi_device *ubi)
if (!fm)
goto out;
- vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
- if (!vh)
+ vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vb)
goto out_free_fm;
+ vh = ubi_get_vid_hdr(vb);
+
ret = -ENOSPC;
e = ubi_wl_get_fm_peb(ubi, 1);
if (!e)
@@ -1465,7 +1436,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
* to scanning mode.
*/
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
- ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+ ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
if (ret < 0) {
ubi_wl_put_fm_peb(ubi, e, 0, 0);
goto out_free_fm;
@@ -1477,7 +1448,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
ubi->fm = fm;
out:
- ubi_free_vid_hdr(ubi, vh);
+ ubi_free_vid_buf(vb);
return ret;
out_free_fm:
@@ -1522,22 +1493,30 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_wl_entry *tmp_e;
down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return -ENOMEM;
}
@@ -1646,16 +1625,14 @@ int ubi_update_fastmap(struct ubi_device *ubi)
new_fm->e[0] = tmp_e;
}
- down_write(&ubi->work_sem);
- down_write(&ubi->fm_eba_sem);
ret = ubi_write_fastmap(ubi, new_fm);
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
if (ret)
goto err;
out_unlock:
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
return ret;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index ff8cafe1e5cd..b6fb8f945c21 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -502,6 +502,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
loff_t addr;
uint32_t data = 0;
struct ubi_ec_hdr ec_hdr;
+ struct ubi_vid_io_buf vidb;
/*
* Note, we cannot generally define VID header buffers on stack,
@@ -528,7 +529,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
goto error;
}
- err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ ubi_init_vid_buf(ubi, &vidb, &vid_hdr);
+ ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb));
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0);
if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
err != UBI_IO_FF){
addr += ubi->vid_hdr_aloffset;
@@ -995,12 +999,11 @@ bad:
* ubi_io_read_vid_hdr - read and check a volume identifier header.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to read from
- * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
- * identifier header
+ * @vidb: the volume identifier buffer to store data in
* @verbose: be verbose if the header is corrupted or wasn't found
*
* This function reads the volume identifier header from physical eraseblock
- * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
+ * @pnum and stores it in @vidb. It also checks CRC checksum of the read
* volume identifier header. The error codes are the same as in
* 'ubi_io_read_ec_hdr()'.
*
@@ -1008,16 +1011,16 @@ bad:
* 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
*/
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
- struct ubi_vid_hdr *vid_hdr, int verbose)
+ struct ubi_vid_io_buf *vidb, int verbose)
{
int err, read_err;
uint32_t crc, magic, hdr_crc;
- void *p;
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
+ void *p = vidb->buffer;
dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
@@ -1080,23 +1083,24 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
* ubi_io_write_vid_hdr - write a volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to write to
- * @vid_hdr: the volume identifier header to write
+ * @vidb: the volume identifier buffer to write
*
* This function writes the volume identifier header described by @vid_hdr to
* physical eraseblock @pnum. This function automatically fills the
- * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
- * header CRC checksum and stores it at vid_hdr->hdr_crc.
+ * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vidb->hdr->hdr_crc.
*
* This function returns zero in case of success and a negative error code in
* case of failure. If %-EIO is returned, the physical eraseblock probably went
* bad.
*/
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
- struct ubi_vid_hdr *vid_hdr)
+ struct ubi_vid_io_buf *vidb)
{
+ struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
int err;
uint32_t crc;
- void *p;
+ void *p = vidb->buffer;
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
@@ -1117,7 +1121,6 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
return -EROFS;
- p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
return err;
@@ -1283,17 +1286,19 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
void *p;
if (!ubi_dbg_chk_io(ubi))
return 0;
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
return -ENOMEM;
- p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ vid_hdr = ubi_get_vid_hdr(vidb);
+ p = vidb->buffer;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
ubi->vid_hdr_alsize);
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
@@ -1314,7 +1319,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
return err;
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index a9e2cef7c95c..88b1897aeb40 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -538,7 +538,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+ if (!ubi_leb_valid(vol, lnum) || offset < 0 || len < 0 ||
offset + len > vol->usable_leb_size ||
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
@@ -583,7 +583,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+ if (!ubi_leb_valid(vol, lnum) || len < 0 ||
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
@@ -620,7 +620,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- if (lnum < 0 || lnum >= vol->reserved_pebs)
+ if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
@@ -680,7 +680,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- if (lnum < 0 || lnum >= vol->reserved_pebs)
+ if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
@@ -716,13 +716,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
- if (lnum < 0 || lnum >= vol->reserved_pebs)
+ if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
- if (vol->eba_tbl[lnum] >= 0)
+ if (ubi_eba_is_mapped(vol, lnum))
return -EBADMSG;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
@@ -751,13 +751,13 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
- if (lnum < 0 || lnum >= vol->reserved_pebs)
+ if (!ubi_leb_valid(vol, lnum))
return -EINVAL;
if (vol->upd_marker)
return -EBADF;
- return vol->eba_tbl[lnum] >= 0;
+ return ubi_eba_is_mapped(vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_is_mapped);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index b616a115c9d3..697dbcba7371 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -167,6 +167,17 @@ enum {
};
/**
+ * struct ubi_vid_io_buf - VID buffer used to read/write VID info to/from the
+ * flash.
+ * @hdr: a pointer to the VID header stored in buffer
+ * @buffer: underlying buffer
+ */
+struct ubi_vid_io_buf {
+ struct ubi_vid_hdr *hdr;
+ void *buffer;
+};
+
+/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
* @u.list: link in the protection queue
@@ -267,6 +278,21 @@ struct ubi_fm_pool {
};
/**
+ * struct ubi_eba_leb_desc - EBA logical eraseblock descriptor
+ * @lnum: the logical eraseblock number
+ * @pnum: the physical eraseblock where the LEB can be found
+ *
+ * This structure is here to hide EBA's internal from other part of the
+ * UBI implementation.
+ *
+ * One can query the position of a LEB by calling ubi_eba_get_ldesc().
+ */
+struct ubi_eba_leb_desc {
+ int lnum;
+ int pnum;
+};
+
+/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
* @cdev: character device object to create character device
@@ -344,7 +370,7 @@ struct ubi_volume {
long long upd_received;
void *upd_buf;
- int *eba_tbl;
+ struct ubi_eba_table *eba_tbl;
unsigned int checked:1;
unsigned int corrupted:1;
unsigned int upd_marker:1;
@@ -724,6 +750,8 @@ struct ubi_ainf_volume {
* @ec_sum: a temporary variable used when calculating @mean_ec
* @ec_count: a temporary variable used when calculating @mean_ec
* @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ * @ech: temporary EC header. Only available during scan
+ * @vidh: temporary VID buffer. Only available during scan
*
* This data structure contains the result of attaching an MTD device and may
* be used by other UBI sub-systems to build final UBI data structures, further
@@ -752,6 +780,8 @@ struct ubi_attach_info {
uint64_t ec_sum;
int ec_count;
struct kmem_cache *aeb_slab_cache;
+ struct ubi_ec_hdr *ech;
+ struct ubi_vid_io_buf *vidb;
};
/**
@@ -792,8 +822,12 @@ extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
/* attach.c */
+struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum,
+ int ec);
+void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb);
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id);
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
int vol_id);
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
@@ -835,7 +869,21 @@ void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
+static inline bool ubi_leb_valid(struct ubi_volume *vol, int lnum)
+{
+ return lnum >= 0 && lnum < vol->reserved_pebs;
+}
+
/* eba.c */
+struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
+ int nentries);
+void ubi_eba_destroy_table(struct ubi_eba_table *tbl);
+void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
+ int nentries);
+void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl);
+void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
+ struct ubi_eba_leb_desc *ldesc);
+bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum);
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
@@ -850,7 +898,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
- struct ubi_vid_hdr *vid_hdr);
+ struct ubi_vid_io_buf *vidb);
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
@@ -885,9 +933,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr);
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
- struct ubi_vid_hdr *vid_hdr, int verbose);
+ struct ubi_vid_io_buf *vidb, int verbose);
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
- struct ubi_vid_hdr *vid_hdr);
+ struct ubi_vid_io_buf *vidb);
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
@@ -1008,44 +1056,68 @@ static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
}
/**
- * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
- * @ubi: UBI device description object
- * @gfp_flags: GFP flags to allocate with
- *
- * This function returns a pointer to the newly allocated and zero-filled
- * volume identifier header object in case of success and %NULL in case of
- * failure.
+ * ubi_init_vid_buf - Initialize a VID buffer
+ * @ubi: the UBI device
+ * @vidb: the VID buffer to initialize
+ * @buf: the underlying buffer
*/
-static inline struct ubi_vid_hdr *
-ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
+static inline void ubi_init_vid_buf(const struct ubi_device *ubi,
+ struct ubi_vid_io_buf *vidb,
+ void *buf)
{
- void *vid_hdr;
+ if (buf)
+ memset(buf, 0, ubi->vid_hdr_alsize);
- vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
- if (!vid_hdr)
+ vidb->buffer = buf;
+ vidb->hdr = buf + ubi->vid_hdr_shift;
+}
+
+/**
+ * ubi_init_vid_buf - Allocate a VID buffer
+ * @ubi: the UBI device
+ * @gfp_flags: GFP flags to use for the allocation
+ */
+static inline struct ubi_vid_io_buf *
+ubi_alloc_vid_buf(const struct ubi_device *ubi, gfp_t gfp_flags)
+{
+ struct ubi_vid_io_buf *vidb;
+ void *buf;
+
+ vidb = kzalloc(sizeof(*vidb), gfp_flags);
+ if (!vidb)
+ return NULL;
+
+ buf = kmalloc(ubi->vid_hdr_alsize, gfp_flags);
+ if (!buf) {
+ kfree(vidb);
return NULL;
+ }
- /*
- * VID headers may be stored at un-aligned flash offsets, so we shift
- * the pointer.
- */
- return vid_hdr + ubi->vid_hdr_shift;
+ ubi_init_vid_buf(ubi, vidb, buf);
+
+ return vidb;
}
/**
- * ubi_free_vid_hdr - free a volume identifier header object.
- * @ubi: UBI device description object
- * @vid_hdr: the object to free
+ * ubi_free_vid_buf - Free a VID buffer
+ * @vidb: the VID buffer to free
*/
-static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
- struct ubi_vid_hdr *vid_hdr)
+static inline void ubi_free_vid_buf(struct ubi_vid_io_buf *vidb)
{
- void *p = vid_hdr;
-
- if (!p)
+ if (!vidb)
return;
- kfree(p - ubi->vid_hdr_shift);
+ kfree(vidb->buffer);
+ kfree(vidb);
+}
+
+/**
+ * ubi_get_vid_hdr - Get the VID header attached to a VID buffer
+ * @vidb: VID buffer
+ */
+static inline struct ubi_vid_hdr *ubi_get_vid_hdr(struct ubi_vid_io_buf *vidb)
+{
+ return vidb->hdr;
}
/*
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0138f526474a..7ac78c13dd1c 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -138,7 +138,7 @@ static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- kfree(vol->eba_tbl);
+ ubi_eba_replace_table(vol, NULL);
kfree(vol);
}
@@ -158,6 +158,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
int i, err, vol_id = req->vol_id, do_free = 1;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
+ struct ubi_eba_table *eba_tbl = NULL;
dev_t dev;
if (ubi->ro_mode)
@@ -241,14 +242,13 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_acc;
- vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
- if (!vol->eba_tbl) {
- err = -ENOMEM;
+ eba_tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
+ if (IS_ERR(eba_tbl)) {
+ err = PTR_ERR(eba_tbl);
goto out_acc;
}
- for (i = 0; i < vol->reserved_pebs; i++)
- vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
+ ubi_eba_replace_table(vol, eba_tbl);
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
@@ -329,7 +329,7 @@ out_cdev:
cdev_del(&vol->cdev);
out_mapping:
if (do_free)
- kfree(vol->eba_tbl);
+ ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
@@ -427,10 +427,11 @@ out_unlock:
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
- int i, err, pebs, *new_mapping;
+ int i, err, pebs;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
+ struct ubi_eba_table *new_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -450,12 +451,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (reserved_pebs == vol->reserved_pebs)
return 0;
- new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
- if (!new_mapping)
- return -ENOMEM;
-
- for (i = 0; i < reserved_pebs; i++)
- new_mapping[i] = UBI_LEB_UNMAPPED;
+ new_eba_tbl = ubi_eba_create_table(vol, reserved_pebs);
+ if (IS_ERR(new_eba_tbl))
+ return PTR_ERR(new_eba_tbl);
spin_lock(&ubi->volumes_lock);
if (vol->ref_count > 1) {
@@ -481,10 +479,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
- for (i = 0; i < vol->reserved_pebs; i++)
- new_mapping[i] = vol->eba_tbl[i];
- kfree(vol->eba_tbl);
- vol->eba_tbl = new_mapping;
+ ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
+ ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
@@ -498,10 +494,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
- for (i = 0; i < reserved_pebs; i++)
- new_mapping[i] = vol->eba_tbl[i];
- kfree(vol->eba_tbl);
- vol->eba_tbl = new_mapping;
+ ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
+ ubi_eba_replace_table(vol, new_eba_tbl);
spin_unlock(&ubi->volumes_lock);
}
@@ -543,7 +537,7 @@ out_acc:
spin_unlock(&ubi->volumes_lock);
}
out_free:
- kfree(new_mapping);
+ kfree(new_eba_tbl);
return err;
}
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index d85c19762160..263743e7b741 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -299,15 +299,18 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
struct ubi_ainf_peb *new_aeb;
dbg_gen("create volume table (copy #%d)", copy + 1);
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
- if (!vid_hdr)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+ if (!vidb)
return -ENOMEM;
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
retry:
new_aeb = ubi_early_get_peb(ubi, ai);
if (IS_ERR(new_aeb)) {
@@ -324,7 +327,7 @@ retry:
vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb);
if (err)
goto write_error;
@@ -338,8 +341,8 @@ retry:
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_aeb(ai, new_aeb);
+ ubi_free_vid_buf(vidb);
return err;
write_error:
@@ -351,9 +354,9 @@ write_error:
list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ ubi_free_aeb(ai, new_aeb);
out_free:
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
return err;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f4533266d7b2..b5b8cd6f481c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int vol_id, int lnum, int torture)
+ int vol_id, int lnum, int torture, bool nested)
{
struct ubi_work *wl_wrk;
@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
- schedule_ubi_work(ubi, wl_wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wl_wrk);
+ else
+ schedule_ubi_work(ubi, wl_wrk);
return 0;
}
@@ -644,11 +647,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
- int vol_id = -1, lnum = -1;
+ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
int dst_leb_clean = 0;
@@ -656,10 +660,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (shutdown)
return 0;
- vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
- if (!vid_hdr)
+ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
+ if (!vidb)
return -ENOMEM;
+ vid_hdr = ubi_get_vid_hdr(vidb);
+
+ down_read(&ubi->fm_eba_sem);
mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
@@ -753,7 +760,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* which is being moved was unmapped.
*/
- err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
+ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
if (err && err != UBI_IO_BITFLIPS) {
dst_leb_clean = 1;
if (err == UBI_IO_FF) {
@@ -780,6 +787,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->pnum);
scrubbing = 1;
goto out_not_moved;
+ } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
+ /*
+ * While a full scan would detect interrupted erasures
+ * at attach time we can face them here when attached from
+ * Fastmap.
+ */
+ dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
+ e1->pnum);
+ erase = 1;
+ goto out_not_moved;
}
ubi_err(ubi, "error %d while reading VID header from PEB %d",
@@ -790,7 +807,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
- err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
+ err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
if (err) {
if (err == MOVE_CANCEL_RACE) {
/*
@@ -815,6 +832,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Target PEB had bit-flips or write error - torture it.
*/
torture = 1;
+ keep = 1;
goto out_not_moved;
}
@@ -847,7 +865,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (scrubbing)
ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
e1->pnum, vol_id, lnum, e2->pnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put) {
@@ -879,6 +897,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
dbg_wl("done");
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
return 0;
/*
@@ -901,7 +920,7 @@ out_not_moved:
ubi->erroneous_peb_count += 1;
} else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
- else
+ else if (keep)
wl_tree_add(e1, &ubi->used);
if (dst_leb_clean) {
wl_tree_add(e2, &ubi->free);
@@ -913,7 +932,7 @@ out_not_moved:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
if (dst_leb_clean) {
ensure_wear_leveling(ubi, 1);
} else {
@@ -922,7 +941,14 @@ out_not_moved:
goto out_ro;
}
+ if (erase) {
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
+ if (err)
+ goto out_ro;
+ }
+
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
return 0;
out_error:
@@ -937,13 +963,14 @@ out_error:
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ ubi_free_vid_buf(vidb);
wl_entry_destroy(ubi, e1);
wl_entry_destroy(ubi, e2);
out_ro:
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
ubi_assert(err != 0);
return err < 0 ? err : -EIO;
@@ -951,7 +978,8 @@ out_cancel:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
mutex_unlock(&ubi->move_mutex);
- ubi_free_vid_hdr(ubi, vid_hdr);
+ up_read(&ubi->fm_eba_sem);
+ ubi_free_vid_buf(vidb);
return 0;
}
@@ -1073,7 +1101,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
wl_entry_destroy(ubi, e);
err = err1;
@@ -1254,7 +1282,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1545,7 +1573,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}
@@ -1624,7 +1652,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->ec = aeb->ec;
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
wl_entry_destroy(ubi, e);
goto out_free;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f31ca32f52b..5fa36ebc0640 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -471,9 +471,9 @@ static int bond_check_dev_link(struct bonding *bond,
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
- if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
+ if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
- if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
+ if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3eb7430dffbf..f8ff25c8ee2e 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -142,6 +142,9 @@ struct plx_pci_card {
#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
+#define MOXA_PCI_VENDOR_ID 0x1393
+#define MOXA_PCI_DEVICE_ID 0x0100
+
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
/* based on PLX9030 */
};
+static struct plx_pci_card_info plx_pci_card_info_moxa = {
+ "MOXA", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
static const struct pci_device_id plx_pci_tbl[] = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_elcus
},
+ {
+ /* moxa */
+ MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_moxa
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 7717b19dc806..947adda3397d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
vl->members |= BIT(port) | BIT(cpu_port);
if (untagged)
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port);
else
- vl->untag &= ~(BIT(port) | BIT(cpu_port));
+ vl->untag &= ~BIT(port);
+ vl->untag &= ~BIT(cpu_port);
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
if (pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
- vlan->vid_end);
b53_fast_age_vlan(dev, vid);
}
}
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl;
u16 vid;
u16 pvid;
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
vl->members &= ~BIT(port);
- if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
- vl->members = 0;
if (pvid == vid) {
if (is5325(dev) || is5365(dev))
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0;
}
- if (untagged) {
+ if (untagged)
vl->untag &= ~(BIT(port));
- if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
- vl->untag = 0;
- }
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
}
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
b53_fast_age_vlan(dev, pvid);
return 0;
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index cc9e6bd83e0e..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -17,7 +17,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kconfig.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -257,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm63xx-switch" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..e3ee27ce13dd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
@@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7445-switch-v4.0" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c481f104a8fe..5390ae89136c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs;
}
-static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
-{
- u32 data = 0x7777;
-
- xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
-}
-
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
.clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len,
- .coalesce = xgene_enet_setup_coalescing,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8456337a237d..06e598c8bc16 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -55,8 +55,10 @@ enum xgene_enet_rm {
#define PREFETCH_BUF_EN BIT(21)
#define CSR_RING_ID_BUF 0x000c
#define CSR_PBM_COAL 0x0014
+#define CSR_PBM_CTICK0 0x0018
#define CSR_PBM_CTICK1 0x001c
#define CSR_PBM_CTICK2 0x0020
+#define CSR_PBM_CTICK3 0x0024
#define CSR_THRESHOLD0_SET1 0x0030
#define CSR_THRESHOLD1_SET1 0x0034
#define CSR_RING_NE_INT_MODE 0x017c
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 429f18fc5503..8158d4698734 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
}
- pdata->ring_ops->coalesce(pdata->tx_ring[0]);
+ if (pdata->ring_ops->coalesce)
+ pdata->ring_ops->coalesce(pdata->tx_ring[0]);
pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 2b76732add5d..af51dd5844ce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
}
- ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
addr >>= 8;
ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
{
- u32 data = 0x7777;
+ u32 data = 0x77777777;
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
- xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
- xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
}
struct xgene_ring_ops xgene_ring2_ops = {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b0da9693f28a..be865b4dada2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
arc_reg_set(priv, R_LAFL, ~0);
arc_reg_set(priv, R_LAFH, ~0);
- } else {
+ } else if (ndev->flags & IFF_MULTICAST) {
struct netdev_hw_addr *ha;
unsigned int filter[2] = { 0, 0 };
int bit;
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
arc_reg_set(priv, R_LAFL, filter[0]);
arc_reg_set(priv, R_LAFH, filter[1]);
+ } else {
+ arc_reg_set(priv, R_LAFL, 0);
+ arc_reg_set(priv, R_LAFH, 0);
}
}
}
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
- /* FIXME :: no multicast support yet */
- ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev);
priv->dev = dev;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index b047fd607b83..00c38bf151e6 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1358,6 +1358,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index ae364c74baf3..537090952c45 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(phydev);
+ if (priv->has_phy)
+ phy_disconnect(phydev);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ea0e5ff1e44..49f4cafe5438 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+
+ /* preserve ONLY bits 16-17 from current hardware value */
+ ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+
if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_RX_BL_MASK;
ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
ctl &= ~BGMAC_DMA_RX_PT_MASK;
ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
}
- ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
ctl |= BGMAC_DMA_RX_ENABLE;
ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
@@ -1048,7 +1051,7 @@ static void bgmac_enable(struct bgmac *bgmac)
BGMAC_DS_MM_SHIFT;
if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
- if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
+ if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
bgmac_cco_ctl_maskset(bgmac, 1, ~0,
BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
@@ -1449,7 +1452,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 27f11a5d5fe2..1f7034d739b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -49,6 +49,7 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/aer.h>
+#include <linux/crash_dump.h>
#if IS_ENABLED(CONFIG_CNIC)
#define BCM_CNIC 1
@@ -271,22 +272,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
+ unsigned long flags;
u32 val;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
return val;
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
- spin_lock_bh(&bp->indirect_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
static void
@@ -304,8 +308,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
+ unsigned long flags;
+
offset += cid_addr;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
@@ -322,7 +328,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
BNX2_WR(bp, BNX2_CTX_DATA, val);
}
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
#ifdef BCM_CNIC
@@ -4759,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
}
-static int
-bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+static void
+bnx2_wait_dma_complete(struct bnx2 *bp)
{
u32 val;
- int i, rc = 0;
- u8 old_port;
+ int i;
- /* Wait for the current PCI transaction to complete before
- * issuing a reset. */
+ /*
+ * Wait for the current PCI transaction to complete before
+ * issuing a reset.
+ */
if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
(BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
@@ -4791,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
}
}
+ return;
+}
+
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+ u32 val;
+ int i, rc = 0;
+ u8 old_port;
+
+ /* Wait for the current PCI transaction to complete before
+ * issuing a reset. */
+ bnx2_wait_dma_complete(bp);
+
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -6356,6 +6378,10 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto out;
+
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6424,6 +6450,7 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
+ bnx2_release_firmware(bp);
goto out;
}
@@ -8570,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto error;
-
+ /*
+ * In-flight DMA from 1st kernel could continue going in kdump kernel.
+ * New io-page table has been created before bnx2 does reset at open stage.
+ * We have to wait for the in-flight DMA to complete to avoid it look up
+ * into the newly created io-page table.
+ */
+ if (is_kdump_kernel())
+ bnx2_wait_dma_complete(bp);
- bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8608,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
- bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 20fe6a8c35c1..0cee4c0283f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15241,7 +15241,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a9f9f3738022..e18635b2a002 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4934,6 +4934,10 @@ static void bnxt_del_napi(struct bnxt *bp)
napi_hash_del(&bnapi->napi);
netif_napi_del(&bnapi->napi);
}
+ /* We called napi_hash_del() before netif_napi_del(), we need
+ * to respect an RCU grace period before freeing napi structures.
+ */
+ synchronize_net();
}
static void bnxt_init_napi(struct bnxt *bp)
@@ -6309,6 +6313,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *ntc)
{
struct bnxt *bp = netdev_priv(dev);
+ bool sh = false;
u8 tc;
if (ntc->type != TC_SETUP_MQPRIO)
@@ -6325,12 +6330,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (netdev_get_num_tc(dev) == tc)
return 0;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ sh = true;
+
if (tc) {
int max_rx_rings, max_tx_rings, rc;
- bool sh = false;
-
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- sh = true;
rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
@@ -6348,7 +6352,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
- bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+ bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
if (netif_running(bp->dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ec6cd18842c3..60e2af8678bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
if (vf->flags & BNXT_VF_LINK_UP) {
/* if physical link is down, force link up on VF */
- if (phy_qcfg_resp.link ==
- PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+ if (phy_qcfg_resp.link !=
+ PORT_PHY_QCFG_RESP_LINK_LINK) {
phy_qcfg_resp.link =
PORT_PHY_QCFG_RESP_LINK_LINK;
phy_qcfg_resp.link_speed = cpu_to_le16(
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f9df4b5ae90e..f42f672b0e7e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
return 0;
hw_cons = *(tcb->hw_consumer_index);
+ rmb();
cons = tcb->consumer_index;
q_depth = tcb->q_depth;
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNA_QE_INDX_INC(prod, q_depth);
tcb->producer_index = prod;
- smp_mb();
+ wmb();
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_tx_timestamp(skb);
bna_txq_prod_indx_doorbell(tcb);
- smp_mb();
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 63144bb413d1..533653bd7aec 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ netdev_err(dev, "%s: DMA mapping error\n", __func__);
+ return NETDEV_TX_OK;
+ }
/* Set address of the data in the Transmit Address register */
macb_writel(lp, TAR, lp->skb_physaddr);
@@ -3117,6 +3123,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev->phydev)
phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
+ dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
/* Shutdown the PHY if there is a GPIO reset */
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index bddb198c0b74..380a64115a98 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -693,7 +693,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
!(reg_val &
CN23XX_PKT_INPUT_CTL_QUIET) &&
- loop--) {
+ --loop) {
reg_val = octeon_read_csr64(
oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 30426109711c..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -47,7 +47,7 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
-#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
struct nicvf_hw_stats {
u64 rx_bytes;
+ u64 rx_frames;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
- u64 rx_fcs_errors;
- u64 rx_l2_errors;
+ u64 rx_drops;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* CQE Rx errs */
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
- u64 tx_bytes_ok;
- u64 tx_ucast_frames_ok;
- u64 tx_bcast_frames_ok;
- u64 tx_mcast_frames_ok;
- u64 tx_drops;
-};
-
-struct nicvf_drv_stats {
- /* Rx */
- u64 rx_frames_ok;
- u64 rx_frames_64;
- u64 rx_frames_127;
- u64 rx_frames_255;
- u64 rx_frames_511;
- u64 rx_frames_1023;
- u64 rx_frames_1518;
- u64 rx_frames_jumbo;
- u64 rx_drops;
-
+ /* CQE Tx errs */
+ u64 tx_desc_fault;
+ u64 tx_hdr_cons_err;
+ u64 tx_subdesc_err;
+ u64 tx_max_size_exceeded;
+ u64 tx_imm_size_oflow;
+ u64 tx_data_seq_err;
+ u64 tx_mem_seq_err;
+ u64 tx_lock_viol;
+ u64 tx_data_fault;
+ u64 tx_tstmp_conflict;
+ u64 tx_tstmp_timeout;
+ u64 tx_mem_fault;
+ u64 tx_csum_overlap;
+ u64 tx_csum_overflow;
+
+ /* driver debug stats */
u64 rcv_buffer_alloc_failures;
-
- /* Tx */
- u64 tx_frames_ok;
- u64 tx_drops;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
+
+ struct u64_stats_sync syncp;
};
struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
u8 node;
u8 cpi_alg;
- u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
@@ -298,7 +306,7 @@ struct nicvf {
/* Stats */
struct nicvf_hw_stats hw_stats;
- struct nicvf_drv_stats drv_stats;
+ struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
/* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 2bbf4cbf08b2..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/if_vlan.h>
#include "nic_reg.h"
#include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
/* Update hardware min/max frame size */
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{
- if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
- dev_err(&nic->pdev->dev,
- "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
- vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ int bgx, lmac, lmac_cnt;
+ u64 lmac_credits;
+
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
return 1;
- }
- new_frs += ETH_HLEN;
- if (new_frs <= nic->pkind.maxlen)
- return 0;
- nic->pkind.maxlen = new_frs;
- nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac += bgx * MAX_LMAC_PER_BGX;
+
+ new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+
+ /* Update corresponding LMAC credits */
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
+ lmac_credits &= ~(0xFFFFFULL << 12);
+ lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
+
+ /* Enforce MTU in HW
+ * This config is supported only from 88xx pass 2.0 onwards.
+ */
+ if (!pass1_silicon(nic->pdev))
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
return 0;
}
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
/* PKIND configuration */
nic->pkind.minlen = 0;
- nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
nic_reg_write(nic, reg_addr, 0);
}
}
+
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index edf779f5a227..80d46337cf29 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -106,6 +106,7 @@
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_frames),
NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames),
- NICVF_HW_STAT(rx_fcs_errors),
- NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drops),
NICVF_HW_STAT(rx_drop_red),
NICVF_HW_STAT(rx_drop_red_bytes),
NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
- NICVF_HW_STAT(rx_bgx_truncated_pkts),
- NICVF_HW_STAT(rx_jabber_errs),
- NICVF_HW_STAT(rx_fcs_errs),
- NICVF_HW_STAT(rx_bgx_errs),
- NICVF_HW_STAT(rx_prel2_errs),
- NICVF_HW_STAT(rx_l2_hdr_malformed),
- NICVF_HW_STAT(rx_oversize),
- NICVF_HW_STAT(rx_undersize),
- NICVF_HW_STAT(rx_l2_len_mismatch),
- NICVF_HW_STAT(rx_l2_pclp),
- NICVF_HW_STAT(rx_ip_ver_errs),
- NICVF_HW_STAT(rx_ip_csum_errs),
- NICVF_HW_STAT(rx_ip_hdr_malformed),
- NICVF_HW_STAT(rx_ip_payload_malformed),
- NICVF_HW_STAT(rx_ip_ttl_errs),
- NICVF_HW_STAT(rx_l3_pclp),
- NICVF_HW_STAT(rx_l4_malformed),
- NICVF_HW_STAT(rx_l4_csum_errs),
- NICVF_HW_STAT(rx_udp_len_errs),
- NICVF_HW_STAT(rx_l4_port_errs),
- NICVF_HW_STAT(rx_tcp_flag_errs),
- NICVF_HW_STAT(rx_tcp_offset_errs),
- NICVF_HW_STAT(rx_l4_pclp),
- NICVF_HW_STAT(rx_truncated_pkts),
- NICVF_HW_STAT(tx_bytes_ok),
- NICVF_HW_STAT(tx_ucast_frames_ok),
- NICVF_HW_STAT(tx_bcast_frames_ok),
- NICVF_HW_STAT(tx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(tx_bytes),
+ NICVF_HW_STAT(tx_frames),
+ NICVF_HW_STAT(tx_ucast_frames),
+ NICVF_HW_STAT(tx_bcast_frames),
+ NICVF_HW_STAT(tx_mcast_frames),
+ NICVF_HW_STAT(tx_drops),
};
static const struct nicvf_stat nicvf_drv_stats[] = {
- NICVF_DRV_STAT(rx_frames_ok),
- NICVF_DRV_STAT(rx_frames_64),
- NICVF_DRV_STAT(rx_frames_127),
- NICVF_DRV_STAT(rx_frames_255),
- NICVF_DRV_STAT(rx_frames_511),
- NICVF_DRV_STAT(rx_frames_1023),
- NICVF_DRV_STAT(rx_frames_1518),
- NICVF_DRV_STAT(rx_frames_jumbo),
- NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(rx_bgx_truncated_pkts),
+ NICVF_DRV_STAT(rx_jabber_errs),
+ NICVF_DRV_STAT(rx_fcs_errs),
+ NICVF_DRV_STAT(rx_bgx_errs),
+ NICVF_DRV_STAT(rx_prel2_errs),
+ NICVF_DRV_STAT(rx_l2_hdr_malformed),
+ NICVF_DRV_STAT(rx_oversize),
+ NICVF_DRV_STAT(rx_undersize),
+ NICVF_DRV_STAT(rx_l2_len_mismatch),
+ NICVF_DRV_STAT(rx_l2_pclp),
+ NICVF_DRV_STAT(rx_ip_ver_errs),
+ NICVF_DRV_STAT(rx_ip_csum_errs),
+ NICVF_DRV_STAT(rx_ip_hdr_malformed),
+ NICVF_DRV_STAT(rx_ip_payload_malformed),
+ NICVF_DRV_STAT(rx_ip_ttl_errs),
+ NICVF_DRV_STAT(rx_l3_pclp),
+ NICVF_DRV_STAT(rx_l4_malformed),
+ NICVF_DRV_STAT(rx_l4_csum_errs),
+ NICVF_DRV_STAT(rx_udp_len_errs),
+ NICVF_DRV_STAT(rx_l4_port_errs),
+ NICVF_DRV_STAT(rx_tcp_flag_errs),
+ NICVF_DRV_STAT(rx_tcp_offset_errs),
+ NICVF_DRV_STAT(rx_l4_pclp),
+ NICVF_DRV_STAT(rx_truncated_pkts),
+
+ NICVF_DRV_STAT(tx_desc_fault),
+ NICVF_DRV_STAT(tx_hdr_cons_err),
+ NICVF_DRV_STAT(tx_subdesc_err),
+ NICVF_DRV_STAT(tx_max_size_exceeded),
+ NICVF_DRV_STAT(tx_imm_size_oflow),
+ NICVF_DRV_STAT(tx_data_seq_err),
+ NICVF_DRV_STAT(tx_mem_seq_err),
+ NICVF_DRV_STAT(tx_lock_viol),
+ NICVF_DRV_STAT(tx_data_fault),
+ NICVF_DRV_STAT(tx_tstmp_conflict),
+ NICVF_DRV_STAT(tx_tstmp_timeout),
+ NICVF_DRV_STAT(tx_mem_fault),
+ NICVF_DRV_STAT(tx_csum_overlap),
+ NICVF_DRV_STAT(tx_csum_overflow),
+
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
- NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
- NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stat;
- int sqs;
+ int stat, tmp_stats;
+ int sqs, cpu;
nicvf_update_stats(nic);
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
- for (stat = 0; stat < nicvf_n_drv_stats; stat++)
- *(data++) = ((u64 *)&nic->drv_stats)
- [nicvf_drv_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
+ tmp_stats = 0;
+ for_each_possible_cpu(cpu)
+ tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
+ [nicvf_drv_stats[stat].index];
+ *(data++) = tmp_stats;
+ }
nicvf_get_qset_stats(nic, stats, &data);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45a13f718863..8a37012c9c89 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
return qidx;
}
-static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
- struct sk_buff *skb)
-{
- if (skb->len <= 64)
- nic->drv_stats.rx_frames_64++;
- else if (skb->len <= 127)
- nic->drv_stats.rx_frames_127++;
- else if (skb->len <= 255)
- nic->drv_stats.rx_frames_255++;
- else if (skb->len <= 511)
- nic->drv_stats.rx_frames_511++;
- else if (skb->len <= 1023)
- nic->drv_stats.rx_frames_1023++;
- else if (skb->len <= 1518)
- nic->drv_stats.rx_frames_1518++;
- else
- nic->drv_stats.rx_frames_jumbo++;
-}
-
/* The Cavium ThunderX network controller can *only* be found in SoCs
* containing the ThunderX ARM64 CPU implementation. All accesses to the device
* registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
static int nicvf_init_resources(struct nicvf *nic)
{
int err;
- union nic_mbx mbx = {};
-
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */
nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
return err;
}
- /* Send VF config done msg to PF */
- nicvf_write_to_mbx(nic, &mbx);
-
return 0;
}
static void nicvf_snd_pkt_handler(struct net_device *netdev,
- struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
- nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
- nicvf_set_rx_frame_cnt(nic, skb);
-
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
work_done++;
break;
case CQE_TYPE_SEND:
- nicvf_snd_pkt_handler(netdev, cq,
+ nicvf_snd_pkt_handler(netdev,
(void *)cq_desc, CQE_TYPE_SEND,
budget, &tx_pkts, &tx_bytes);
tx_done++;
@@ -740,7 +712,7 @@ done:
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
- nic->drv_stats.txq_wake++;
+ this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
- nic->drv_stats.txq_stop++;
+ this_cpu_inc(nic->drv_stats->txq_stop);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
return 0;
}
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
int nicvf_open(struct net_device *netdev)
{
- int err, qidx;
+ int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
-
- nic->mtu = netdev->mtu;
+ union nic_mbx mbx = {};
netif_carrier_off(netdev);
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
- /* Configure receive side scaling */
- if (!nic->sqs_mode)
+ /* Configure receive side scaling and MTU */
+ if (!nic->sqs_mode) {
nicvf_rss_init(nic);
+ if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+ goto cleanup;
+
+ /* Clear percpu stats */
+ for_each_possible_cpu(cpu)
+ memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
+ sizeof(struct nicvf_drv_stats));
+ }
err = nicvf_register_interrupts(nic);
if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
- nic->drv_stats.txq_stop = 0;
- nic->drv_stats.txq_wake = 0;
+ /* Send VF config done msg to PF */
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ nicvf_write_to_mbx(nic, &mbx);
return 0;
cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
return err;
}
-static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
-{
- union nic_mbx mbx = {};
-
- mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
- mbx.frs.max_frs = mtu;
- mbx.frs.vf_id = nic->vf_id;
-
- return nicvf_send_msg_to_pf(nic, &mbx);
-}
-
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL;
+ netdev->mtu = new_mtu;
+
+ if (!netif_running(netdev))
+ return 0;
+
if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- netdev->mtu = new_mtu;
- nic->mtu = new_mtu;
return 0;
}
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic)
{
- int qidx;
+ int qidx, cpu;
+ u64 tmp_stats = 0;
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct nicvf_drv_stats *drv_stats;
struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
- stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
- stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
- stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
- stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_bytes = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
- stats->tx_bcast_frames_ok +
- stats->tx_mcast_frames_ok;
- drv_stats->rx_frames_ok = stats->rx_ucast_frames +
- stats->rx_bcast_frames +
- stats->rx_mcast_frames;
- drv_stats->rx_drops = stats->rx_drop_red +
- stats->rx_drop_overrun;
- drv_stats->tx_drops = stats->tx_drops;
+ /* On T88 pass 2.0, the dummy SQE added for TSO notification
+ * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
+ * pointed by dummy SQE and results in tx_drops counter being
+ * incremented. Subtracting it from tx_tso counter will give
+ * exact tx_drops counter.
+ */
+ if (nic->t88 && nic->hw_tso) {
+ for_each_possible_cpu(cpu) {
+ drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
+ tmp_stats += drv_stats->tx_tso;
+ }
+ stats->tx_drops = tmp_stats - stats->tx_drops;
+ }
+ stats->tx_frames = stats->tx_ucast_frames +
+ stats->tx_bcast_frames +
+ stats->tx_mcast_frames;
+ stats->rx_frames = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
+ stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
/* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes;
- stats->rx_packets = drv_stats->rx_frames_ok;
- stats->rx_dropped = drv_stats->rx_drops;
+ stats->rx_packets = hw_stats->rx_frames;
+ stats->rx_dropped = hw_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames;
- stats->tx_bytes = hw_stats->tx_bytes_ok;
- stats->tx_packets = drv_stats->tx_frames_ok;
- stats->tx_dropped = drv_stats->tx_drops;
+ stats->tx_bytes = hw_stats->tx_bytes;
+ stats->tx_packets = hw_stats->tx_frames;
+ stats->tx_dropped = hw_stats->tx_drops;
return stats;
}
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
- nic->drv_stats.tx_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
}
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_netdev;
}
+ nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
+ if (!nic->drv_stats) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
err = nicvf_set_qset_resources(nic);
if (err)
goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
err_release_regions:
pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a4fc50155881..747ef0882976 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
- nic->drv_stats.rcv_buffer_alloc_failures++;
+ this_cpu_inc(nic->pnicvf->drv_stats->
+ rcv_buffer_alloc_failures);
return -ENOMEM;
}
nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
rbdr_idx, new_rb);
next_rbdr:
/* Re-enable RBDR interrupts only if buffer allocation is success */
- if (!nic->rb_alloc_fail && rbdr->enable)
+ if (!nic->rb_alloc_fail && rbdr->enable &&
+ netif_running(nic->pnicvf->netdev))
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
+ struct sk_buff *skb;
+
if (!sq)
return;
if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
+ /* Free pending skbs in the queue */
+ smp_rmb();
+ while (sq->head != sq->tail) {
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
+ sq->head++;
+ sq->head &= (sq->dmem.q_len - 1);
+ }
kfree(sq->skbuff);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
{
union nic_mbx mbx = {};
- /* Reset all RXQ's stats */
+ /* Reset all RQ/SQ and VF stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = 0x3FFF;
+ mbx.reset_stat.tx_stat_mask = 0x1F;
mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ mbx.reset_stat.sq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx);
}
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
- nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
- if (!nic->sqs_mode)
+ if (!nic->sqs_mode && (qidx == 0)) {
+ /* Enable checking L3/L4 length and TCP/UDP checksums */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+ (BIT(24) | BIT(23) | BIT(21)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ }
/* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
}
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
return 1;
}
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- struct nicvf_hw_stats *stats = &nic->hw_stats;
-
if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
- stats->rx_bgx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
break;
case CQ_RX_ERROP_RE_JABBER:
- stats->rx_jabber_errs++;
+ this_cpu_inc(nic->drv_stats->rx_jabber_errs);
break;
case CQ_RX_ERROP_RE_FCS:
- stats->rx_fcs_errs++;
+ this_cpu_inc(nic->drv_stats->rx_fcs_errs);
break;
case CQ_RX_ERROP_RE_RX_CTL:
- stats->rx_bgx_errs++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_errs);
break;
case CQ_RX_ERROP_PREL2_ERR:
- stats->rx_prel2_errs++;
+ this_cpu_inc(nic->drv_stats->rx_prel2_errs);
break;
case CQ_RX_ERROP_L2_MAL:
- stats->rx_l2_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
break;
case CQ_RX_ERROP_L2_OVERSIZE:
- stats->rx_oversize++;
+ this_cpu_inc(nic->drv_stats->rx_oversize);
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
- stats->rx_undersize++;
+ this_cpu_inc(nic->drv_stats->rx_undersize);
break;
case CQ_RX_ERROP_L2_LENMISM:
- stats->rx_l2_len_mismatch++;
+ this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
break;
case CQ_RX_ERROP_L2_PCLP:
- stats->rx_l2_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l2_pclp);
break;
case CQ_RX_ERROP_IP_NOT:
- stats->rx_ip_ver_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
- stats->rx_ip_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
break;
case CQ_RX_ERROP_IP_MAL:
- stats->rx_ip_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
break;
case CQ_RX_ERROP_IP_MALD:
- stats->rx_ip_payload_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
break;
case CQ_RX_ERROP_IP_HOP:
- stats->rx_ip_ttl_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
break;
case CQ_RX_ERROP_L3_PCLP:
- stats->rx_l3_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l3_pclp);
break;
case CQ_RX_ERROP_L4_MAL:
- stats->rx_l4_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l4_malformed);
break;
case CQ_RX_ERROP_L4_CHK:
- stats->rx_l4_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
break;
case CQ_RX_ERROP_UDP_LEN:
- stats->rx_udp_len_errs++;
+ this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
break;
case CQ_RX_ERROP_L4_PORT:
- stats->rx_l4_port_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
break;
case CQ_RX_ERROP_TCP_FLAG:
- stats->rx_tcp_flag_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
break;
case CQ_RX_ERROP_TCP_OFFSET:
- stats->rx_tcp_offset_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
break;
case CQ_RX_ERROP_L4_PCLP:
- stats->rx_l4_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l4_pclp);
break;
case CQ_RX_ERROP_RBDR_TRUNC:
- stats->rx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
break;
}
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
}
/* Check for errors in the send cmp.queue entry */
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
- struct cmp_queue_stats *stats = &cq->stats;
-
switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD:
- stats->tx.good++;
return 0;
case CQ_TX_ERROP_DESC_FAULT:
- stats->tx.desc_fault++;
+ this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
case CQ_TX_ERROP_HDR_CONS_ERR:
- stats->tx.hdr_cons_err++;
+ this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
break;
case CQ_TX_ERROP_SUBDC_ERR:
- stats->tx.subdesc_err++;
+ this_cpu_inc(nic->drv_stats->tx_subdesc_err);
+ break;
+ case CQ_TX_ERROP_MAX_SIZE_VIOL:
+ this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW:
- stats->tx.imm_size_oflow++;
+ this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
- stats->tx.data_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_data_seq_err);
break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
- stats->tx.mem_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
break;
case CQ_TX_ERROP_LOCK_VIOL:
- stats->tx.lock_viol++;
+ this_cpu_inc(nic->drv_stats->tx_lock_viol);
break;
case CQ_TX_ERROP_DATA_FAULT:
- stats->tx.data_fault++;
+ this_cpu_inc(nic->drv_stats->tx_data_fault);
break;
case CQ_TX_ERROP_TSTMP_CONFLICT:
- stats->tx.tstmp_conflict++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
break;
case CQ_TX_ERROP_TSTMP_TIMEOUT:
- stats->tx.tstmp_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
break;
case CQ_TX_ERROP_MEM_FAULT:
- stats->tx.mem_fault++;
+ this_cpu_inc(nic->drv_stats->tx_mem_fault);
break;
case CQ_TX_ERROP_CK_OVERLAP:
- stats->tx.csum_overlap++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overlap);
break;
case CQ_TX_ERROP_CK_OFLOW:
- stats->tx.csum_overflow++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overflow);
break;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 869f3386028b..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
-struct cmp_queue_stats {
- struct tx_stats {
- u64 good;
- u64 desc_fault;
- u64 hdr_cons_err;
- u64 subdesc_err;
- u64 imm_size_oflow;
- u64 data_seq_err;
- u64 mem_seq_err;
- u64 lock_viol;
- u64 data_fault;
- u64 tstmp_conflict;
- u64 tstmp_timeout;
- u64 mem_fault;
- u64 csum_overlap;
- u64 csum_overflow;
- } tx;
-} ____cacheline_aligned_in_smp;
-
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
- struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8bbaedbb7b94..050e21fbb147 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
- bgx->bgx_id =
- (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id = (pci_resource_start(pdev,
+ PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index d59c71e4a000..01cc7c859131 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -28,6 +28,8 @@
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
+#define BGX_ID_MASK 0x3
+
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
/* Registers */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 28e653e9c856..2125903043fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -350,6 +350,7 @@ struct adapter_params {
unsigned int nsched_cls; /* number of traffic classes */
unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
unsigned int max_ird_adapter; /* Max read depth per adapter */
+ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index cf147ca419a8..57eb4e1345cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3668,6 +3668,12 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
+ /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
+ params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4051,7 +4057,7 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = num_online_cpus();
+ i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index b4b2d20aab3c..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
@@ -550,6 +546,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->max_ird_adapter = adap->params.max_ird_adapter;
lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
lld->nodeid = dev_to_node(adap->pdev_dev);
+ lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
}
static void uld_attach(struct adapter *adap, unsigned int uld)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 47bd14f602db..2996793b1aaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -308,6 +308,7 @@ struct cxgb4_lld_info {
unsigned int iscsi_llimit; /* chip's iscsi region llimit */
void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
+ bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err)
+ if (err) {
+ t4_free_mem(qe);
goto out;
+ }
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e74fd6085df..e19a0ca8e5dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
- napi_hash_del(&rq->napi);
netif_napi_del(&rq->napi);
rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 50812a1d67bd..df1573c4a659 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6005),
CH_PCI_ID_TABLE_FENTRY(0x6006),
CH_PCI_ID_TABLE_FENTRY(0x6007),
+ CH_PCI_ID_TABLE_FENTRY(0x6008),
CH_PCI_ID_TABLE_FENTRY(0x6009),
CH_PCI_ID_TABLE_FENTRY(0x600d),
- CH_PCI_ID_TABLE_FENTRY(0x6010),
CH_PCI_ID_TABLE_FENTRY(0x6011),
CH_PCI_ID_TABLE_FENTRY(0x6014),
CH_PCI_ID_TABLE_FENTRY(0x6015),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 4b58b32105f7..8d9e4b7a8e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -100,6 +100,7 @@ enum fw_wr_opcodes {
FW_RI_RECV_WR = 0x17,
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
+ FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_ISCSI_TX_DATA_WR = 0x45,
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
@@ -1121,6 +1122,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
FW_PARAMS_PARAM_DEV_FWCACHE = 0x18,
+ FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
};
/*
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
+ int i;
- iowrite32(0, &rq->ctrl->enable);
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
+ * to disable the RQ twice to guarantee that stale descriptors are not
+ * used when this RQ is re-enabled.
+ */
+ for (i = 0; i < 2; i++) {
+ iowrite32(0, &rq->ctrl->enable);
- /* Wait for HW to ACK disable request */
- for (wait = 0; wait < 1000; wait++) {
- if (!(ioread32(&rq->ctrl->running)))
- return 0;
- udelay(10);
- }
+ /* Wait for HW to ACK disable request */
+ for (wait = 20000; wait > 0; wait--)
+ if (!ioread32(&rq->ctrl->running))
+ break;
+ if (!wait) {
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
+ rq->index);
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
+ return -ETIMEDOUT;
+ }
+ }
- return -ETIMEDOUT;
+ return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
+ * to re-sync internal VIC state.
+ */
+ iowrite32(0, &rq->ctrl->enable);
+
vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9cffe48be156..1fb5d7239254 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2728,6 +2728,26 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
return 0;
}
+#define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n"
+static bool be_fw_ncsi_supported(char *ver)
+{
+ int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */
+ int v2[4];
+ int i;
+
+ if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4)
+ return false;
+
+ for (i = 0; i < 4; i++) {
+ if (v1[i] < v2[i])
+ return true;
+ else if (v1[i] > v2[i])
+ return false;
+ }
+
+ return true;
+}
+
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
const struct firmware *fw,
@@ -2805,8 +2825,10 @@ static int be_flash_BEx(struct be_adapter *adapter,
continue;
if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
- memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+ !be_fw_ncsi_supported(adapter->fw_ver)) {
+ dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver);
continue;
+ }
if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
!phy_flashing_required(adapter))
@@ -3527,6 +3549,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
(BIT_MASK(16) - 1);
+ /* For BEx, since GET_FUNC_CONFIG command is not
+ * supported, we read funcnum here as a workaround.
+ */
+ if (BEx_chip(adapter))
+ adapter->pf_num = attribs->hba_attribs.pci_funcnum;
}
err:
@@ -4950,7 +4977,7 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
{
int status;
- if (BEx_chip(adapter))
+ if (BE2_chip(adapter))
return -EOPNOTSUPP;
status = __be_cmd_set_logical_link_config(adapter, link_state,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1bd82bcb3be5..09da2d82c2f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1720,7 +1720,11 @@ struct mgmt_hba_attribs {
u32 rsvd2[55];
u8 rsvd3[3];
u8 phy_port;
- u32 rsvd4[13];
+ u32 rsvd4[15];
+ u8 rsvd5[2];
+ u8 pci_funcnum;
+ u8 rsvd6;
+ u32 rsvd7[6];
} __packed;
struct mgmt_controller_attrib {
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 92942c84d329..36e4232ed6b8 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005-2016 Broadcom.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dcb930a52613..93aa2939142a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -724,14 +724,24 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
}
+static int be_gso_hdr_len(struct sk_buff *skb)
+{
+ if (skb->encapsulation)
+ return skb_inner_transport_offset(skb) +
+ inner_tcp_hdrlen(skb);
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
{
struct be_tx_stats *stats = tx_stats(txo);
- u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
+ u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
+ /* Account for headers which get duplicated in TSO pkt */
+ u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
u64_stats_update_begin(&stats->sync);
stats->tx_reqs++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += skb->len + dup_hdr_len;
stats->tx_pkts += tx_pkts;
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
stats->tx_vxlan_offload_pkts += tx_pkts;
@@ -2803,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
- napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
free_cpumask_var(eqo->affinity_mask);
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
{ .compatible = "ezchip,nps-mgt-enet" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 48a033e64423..5aa9d4ded214 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
index 51fd2e6c1b84..60491779e49f 100644
--- a/drivers/net/ethernet/freescale/fman/Makefile
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -1,7 +1,9 @@
subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
-obj-y += fsl_fman.o fsl_fman_mac.o fsl_mac.o
+obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
+obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
+obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
-fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_port.o
-fsl_fman_mac-objs := fman_dtsec.o fman_memac.o fman_tgec.o
-fsl_mac-objs += mac.o
+fsl_fman-objs := fman_muram.o fman.o fman_sp.o
+fsl_fman_port-objs := fman_port.o
+fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 1de2e1e51c2b..dafd9e1baba2 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -618,7 +618,7 @@ struct fman {
unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
- int fifo_offset;
+ unsigned long fifo_offset;
size_t fifo_size;
u32 liodn_base[64];
@@ -2036,7 +2036,7 @@ static int fman_init(struct fman *fman)
/* allocate MURAM for FIFO according to total size */
fman->fifo_offset = fman_muram_alloc(fman->muram,
fman->state->total_fifo_size);
- if (IS_ERR_VALUE(fman->cam_offset)) {
+ if (IS_ERR_VALUE(fman->fifo_offset)) {
free_init_resources(fman);
dev_err(fman->dev, "%s: MURAM alloc for BMI FIFO failed\n",
__func__);
@@ -2115,6 +2115,7 @@ void fman_register_intr(struct fman *fman, enum fman_event_modules module,
fman->intr_mng[event].isr_cb = isr_cb;
fman->intr_mng[event].src_handle = src_arg;
}
+EXPORT_SYMBOL(fman_register_intr);
/**
* fman_unregister_intr
@@ -2138,6 +2139,7 @@ void fman_unregister_intr(struct fman *fman, enum fman_event_modules module,
fman->intr_mng[event].isr_cb = NULL;
fman->intr_mng[event].src_handle = NULL;
}
+EXPORT_SYMBOL(fman_unregister_intr);
/**
* fman_set_port_params
@@ -2241,6 +2243,7 @@ return_err:
spin_unlock_irqrestore(&fman->spinlock, flags);
return err;
}
+EXPORT_SYMBOL(fman_set_port_params);
/**
* fman_reset_mac
@@ -2310,6 +2313,7 @@ int fman_reset_mac(struct fman *fman, u8 mac_id)
return 0;
}
+EXPORT_SYMBOL(fman_reset_mac);
/**
* fman_set_mac_max_frame
@@ -2327,8 +2331,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
* or equal to the port's max
*/
if ((!fman->state->port_mfl[mac_id]) ||
- (fman->state->port_mfl[mac_id] &&
- (mfl <= fman->state->port_mfl[mac_id]))) {
+ (mfl <= fman->state->port_mfl[mac_id])) {
fman->state->mac_mfl[mac_id] = mfl;
} else {
dev_warn(fman->dev, "%s: MAC max_frame_length is larger than Port max_frame_length\n",
@@ -2337,6 +2340,7 @@ int fman_set_mac_max_frame(struct fman *fman, u8 mac_id, u16 mfl)
}
return 0;
}
+EXPORT_SYMBOL(fman_set_mac_max_frame);
/**
* fman_get_clock_freq
@@ -2363,6 +2367,7 @@ u32 fman_get_bmi_max_fifo_size(struct fman *fman)
{
return fman->state->bmi_max_fifo_size;
}
+EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
/**
* fman_get_revision
@@ -2384,6 +2389,7 @@ void fman_get_revision(struct fman *fman, struct fman_rev_info *rev_info)
FPM_REV1_MAJOR_SHIFT);
rev_info->minor = tmp & FPM_REV1_MINOR_MASK;
}
+EXPORT_SYMBOL(fman_get_revision);
/**
* fman_get_qman_channel_id
@@ -2419,6 +2425,7 @@ u32 fman_get_qman_channel_id(struct fman *fman, u32 port_id)
return fman->state->qman_channel_base + i;
}
+EXPORT_SYMBOL(fman_get_qman_channel_id);
/**
* fman_get_mem_region
@@ -2432,6 +2439,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
{
return fman->state->res;
}
+EXPORT_SYMBOL(fman_get_mem_region);
/* Bootargs defines */
/* Extra headroom for RX buffers - Default, min and max */
@@ -2453,7 +2461,7 @@ struct resource *fman_get_mem_region(struct fman *fman)
* particular forwarding scenarios that add extra headers to the
* forwarded frame.
*/
-int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
+static int fsl_fm_rx_extra_headroom = FSL_FM_RX_EXTRA_HEADROOM;
module_param(fsl_fm_rx_extra_headroom, int, 0);
MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
@@ -2466,7 +2474,7 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* Could be overridden once, at boot-time, via the
* fm_set_max_frm() callback.
*/
-int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
+static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
module_param(fsl_fm_max_frm, int, 0);
MODULE_PARM_DESC(fsl_fm_max_frm, "Maximum frame size, across all interfaces");
@@ -2538,6 +2546,7 @@ struct fman *fman_bind(struct device *fm_dev)
{
return (struct fman *)(dev_get_drvdata(get_device(fm_dev)));
}
+EXPORT_SYMBOL(fman_bind);
static irqreturn_t fman_err_irq(int irq, void *handle)
{
@@ -2727,8 +2736,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
struct fman *fman;
struct device_node *fm_node, *muram_node;
struct resource *res;
- const u32 *u32_prop;
- int lenp, err, irq;
+ u32 val, range[2];
+ int err, irq;
struct clk *clk;
u32 clk_rate;
phys_addr_t phys_base_addr;
@@ -2740,16 +2749,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fm_node = of_node_get(of_dev->dev.of_node);
- u32_prop = (const u32 *)of_get_property(fm_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(&of_dev->dev, "%s: of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(fm_node, "cell-index", &val);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n",
__func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32)))
- goto fman_node_put;
-
- fman->dts_params.id = (u8)fdt32_to_cpu(u32_prop[0]);
+ fman->dts_params.id = (u8)val;
/* Get the FM interrupt */
res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
@@ -2796,18 +2802,15 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Rounding to MHz */
fman->dts_params.clk_freq = DIV_ROUND_UP(clk_rate, 1000000);
- u32_prop = (const u32 *)of_get_property(fm_node,
- "fsl,qman-channel-range",
- &lenp);
- if (!u32_prop) {
- dev_err(&of_dev->dev, "%s: of_get_property(%s, fsl,qman-channel-range) failed\n",
+ err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range",
+ &range[0], 2);
+ if (err) {
+ dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n",
__func__, fm_node->full_name);
goto fman_node_put;
}
- if (WARN_ON(lenp != sizeof(u32) * 2))
- goto fman_node_put;
- fman->dts_params.qman_channel_base = fdt32_to_cpu(u32_prop[0]);
- fman->dts_params.num_of_qman_channels = fdt32_to_cpu(u32_prop[1]);
+ fman->dts_params.qman_channel_base = range[0];
+ fman->dts_params.num_of_qman_channels = range[1];
/* Get the MURAM base address and size */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
@@ -2858,7 +2861,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (fman->dts_params.base_addr == 0) {
+ if (!fman->dts_params.base_addr) {
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
@@ -2930,7 +2933,7 @@ static const struct of_device_id fman_match[] = {
{}
};
-MODULE_DEVICE_TABLE(of, fm_match);
+MODULE_DEVICE_TABLE(of, fman_match);
static struct platform_driver fman_driver = {
.driver = {
@@ -2940,4 +2943,25 @@ static struct platform_driver fman_driver = {
.probe = fman_probe,
};
-builtin_platform_driver(fman_driver);
+static int __init fman_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_load);
+
+static void __exit fman_unload(void)
+{
+ platform_driver_unregister(&fman_driver);
+}
+module_exit(fman_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index ddf0260176c9..dd6d0526f6c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -191,10 +191,6 @@ struct fman_mac_params {
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- /* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 0 for MACs that don't have
- * mdio-irq, or for polling
- */
void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 45e98fd8b79e..53ef51e3bd9e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -507,6 +507,9 @@ static void setup_sgmii_internal_phy(struct fman_mac *memac,
{
u16 tmp_reg16;
+ if (WARN_ON(!memac->pcsphy))
+ return;
+
/* SGMII mode */
tmp_reg16 = IF_MODE_SGMII_EN;
if (!fixed_link)
@@ -1151,7 +1154,8 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
if (!params->internal_phy_node) {
pr_err("PCS PHY node is not available\n");
memac_free(memac);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 47394c45b6e8..5ec94d243da0 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -150,7 +150,8 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 889649ad8931..453bf849eee1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -46,6 +46,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset,
+ size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 70c198d072dc..9f3bb50a2365 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1477,7 +1477,8 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
*/
int fman_port_disable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, *bmi_status_reg, tmp;
+ u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
+ u32 tmp;
bool rx_port, failure = false;
int count;
@@ -1553,7 +1554,8 @@ EXPORT_SYMBOL(fman_port_disable);
*/
int fman_port_enable(struct fman_port *port)
{
- u32 __iomem *bmi_cfg_reg, tmp;
+ u32 __iomem *bmi_cfg_reg;
+ u32 tmp;
bool rx_port;
if (!is_init_done(port->cfg))
@@ -1623,7 +1625,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct device_node *fm_node, *port_node;
struct resource res;
struct resource *dev_res;
- const u32 *u32_prop;
+ u32 val;
int err = 0, lenp;
enum fman_port_type port_type;
u16 port_speed;
@@ -1652,28 +1654,20 @@ static int fman_port_probe(struct platform_device *of_dev)
goto return_err;
}
- u32_prop = (const u32 *)of_get_property(port_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(port->dev, "%s: of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(port_node, "cell-index", &val);
+ if (err) {
+ dev_err(port->dev, "%s: reading cell-index for %s failed\n",
__func__, port_node->full_name);
err = -EINVAL;
goto return_err;
}
- if (WARN_ON(lenp != sizeof(u32))) {
- err = -EINVAL;
- goto return_err;
- }
- port_id = (u8)fdt32_to_cpu(u32_prop[0]);
-
+ port_id = (u8)val;
port->dts_params.id = port_id;
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port",
- &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1686,9 +1680,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- u32_prop = (const u32 *)of_get_property(port_node,
- "fsl,fman-10g-port", &lenp);
- if (u32_prop)
+ if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
@@ -1743,7 +1735,7 @@ static int fman_port_probe(struct platform_device *of_dev)
port->dts_params.base_addr = devm_ioremap(port->dev, res.start,
resource_size(&res));
- if (port->dts_params.base_addr == 0)
+ if (!port->dts_params.base_addr)
dev_err(port->dev, "%s: devm_ioremap() failed\n", __func__);
dev_set_drvdata(&of_dev->dev, port);
@@ -1775,4 +1767,25 @@ static struct platform_driver fman_port_driver = {
.probe = fman_port_probe,
};
-builtin_platform_driver(fman_port_driver);
+static int __init fman_port_load(void)
+{
+ int err;
+
+ pr_debug("FSL DPAA FMan driver\n");
+
+ err = platform_driver_register(&fman_port_driver);
+ if (err < 0)
+ pr_err("Error, platform_driver_register() = %d\n", err);
+
+ return err;
+}
+module_init(fman_port_load);
+
+static void __exit fman_port_unload(void)
+{
+ platform_driver_unregister(&fman_port_driver);
+}
+module_exit(fman_port_unload);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Freescale DPAA Frame Manager Port driver");
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index f9e7aa385cba..248f5bcca468 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -80,6 +80,7 @@ void fman_sp_set_buf_pools_in_asc_order_of_buf_sizes(struct fman_ext_pools
}
}
}
+EXPORT_SYMBOL(fman_sp_set_buf_pools_in_asc_order_of_buf_sizes);
int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
int_context_data_copy,
@@ -164,3 +165,5 @@ int fman_sp_build_buffer_struct(struct fman_sp_int_context_data_copy *
return 0;
}
+EXPORT_SYMBOL(fman_sp_build_buffer_struct);
+
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index e33d9d24c1db..8fe6b3e253fa 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -469,9 +469,9 @@ static void adjust_link_memac(struct net_device *net_dev)
/* Initializes driver's PHY state, and attaches to the PHY.
* Returns 0 on success.
*/
-static int init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev,
- void (*adj_lnk)(struct net_device *))
+static struct phy_device *init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev,
+ void (*adj_lnk)(struct net_device *))
{
struct phy_device *phy_dev;
struct mac_priv_s *priv = mac_dev->priv;
@@ -480,7 +480,7 @@ static int init_phy(struct net_device *net_dev,
priv->phy_if);
if (!phy_dev) {
netdev_err(net_dev, "Could not connect to PHY\n");
- return -ENODEV;
+ return NULL;
}
/* Remove any features not supported by the controller */
@@ -493,23 +493,23 @@ static int init_phy(struct net_device *net_dev,
mac_dev->phy_dev = phy_dev;
- return 0;
+ return phy_dev;
}
-static int dtsec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
}
-static int tgec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *tgec_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, adjust_link_void);
}
-static int memac_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
+static struct phy_device *memac_init_phy(struct net_device *net_dev,
+ struct mac_device *mac_dev)
{
return init_phy(net_dev, mac_dev, &adjust_link_memac);
}
@@ -583,31 +583,6 @@ static void setup_memac(struct mac_device *mac_dev)
static DEFINE_MUTEX(eth_lock);
-static const char phy_str[][11] = {
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii"
-};
-
-static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(phy_str); i++)
- if (strcmp(str, phy_str[i]) == 0)
- return (phy_interface_t)i;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static const u16 phy2speed[] = {
[PHY_INTERFACE_MODE_MII] = SPEED_100,
[PHY_INTERFACE_MODE_GMII] = SPEED_1000,
@@ -678,7 +653,7 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
- int err, i, lenp, nph;
+ int err, i, nph;
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
@@ -686,9 +661,9 @@ static int mac_probe(struct platform_device *_of_dev)
struct resource res;
struct mac_priv_s *priv;
const u8 *mac_addr;
- const char *char_prop;
- const u32 *u32_prop;
+ u32 val;
u8 fman_id;
+ int phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
@@ -749,16 +724,15 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the FMan cell-index */
- u32_prop = of_get_property(dev_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(dev_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
dev_node->full_name);
err = -EINVAL;
goto _return_of_node_put;
}
- WARN_ON(lenp != sizeof(u32));
/* cell-index 0 => FMan id 1 */
- fman_id = (u8)(fdt32_to_cpu(u32_prop[0]) + 1);
+ fman_id = (u8)(val + 1);
priv->fman = fman_bind(&of_dev->dev);
if (!priv->fman) {
@@ -805,15 +779,14 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the cell-index */
- u32_prop = of_get_property(mac_node, "cell-index", &lenp);
- if (!u32_prop) {
- dev_err(dev, "of_get_property(%s, cell-index) failed\n",
+ err = of_property_read_u32(mac_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %s\n",
mac_node->full_name);
err = -EINVAL;
goto _return_dev_set_drvdata;
}
- WARN_ON(lenp != sizeof(u32));
- priv->cell_index = (u8)fdt32_to_cpu(u32_prop[0]);
+ priv->cell_index = (u8)val;
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
@@ -870,16 +843,14 @@ static int mac_probe(struct platform_device *_of_dev)
}
/* Get the PHY connection type */
- char_prop = (const char *)of_get_property(mac_node,
- "phy-connection-type", NULL);
- if (!char_prop) {
+ phy_if = of_get_phy_mode(mac_node);
+ if (phy_if < 0) {
dev_warn(dev,
- "of_get_property(%s, phy-connection-type) failed. Defaulting to MII\n",
+ "of_get_phy_mode() for %s failed. Defaulting to SGMII\n",
mac_node->full_name);
- priv->phy_if = PHY_INTERFACE_MODE_MII;
- } else {
- priv->phy_if = str2phy(char_prop);
+ phy_if = PHY_INTERFACE_MODE_SGMII;
}
+ priv->phy_if = phy_if;
priv->speed = phy2speed[priv->phy_if];
priv->max_speed = priv->speed;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 0211cc9a46d6..d7313f0c5135 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -58,7 +58,8 @@ struct mac_device {
bool tx_pause_active;
bool promisc;
- int (*init_phy)(struct net_device *net_dev, struct mac_device *mac_dev);
+ struct phy_device *(*init_phy)(struct net_device *net_dev,
+ struct mac_device *mac_dev);
int (*init)(struct mac_device *mac_dev);
int (*start)(struct mac_device *mac_dev);
int (*stop)(struct mac_device *mac_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index c54c6fac0d1d..b6ed818f78ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
return ERR_PTR(-ENODEV);
handle = dev->ops->get_handle(dev, port_id);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ put_device(&dev->cls_dev);
return handle;
+ }
handle->dev = dev;
handle->owner_dev = owner_dev;
@@ -356,6 +358,8 @@ out_when_init_queue:
for (j = i - 1; j >= 0; j--)
hnae_fini_queue(handle->qs[j]);
+ put_device(&dev->cls_dev);
+
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(hnae_get_handle);
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
dev->ops->put_handle(h);
module_put(dev->owner);
+
+ put_device(&dev->cls_dev);
}
EXPORT_SYMBOL(hnae_put_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e28d960997af..2d0cb609adc3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
int ret;
char *mac_addr = (char *)addr;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ u8 port_num;
assert(mac_cb);
@@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
return ret;
}
- ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
- mac_addr, true);
+ ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
if (ret)
dev_err(handle->owner_dev,
"mac add mul_mac:%pM port%d fail, ret = %#x!\n",
@@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
ret = -EINVAL;
}
- if (!ret)
- hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a834774fdb02..ec8c738af726 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -126,7 +126,7 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
(enum mac_speed)speed, duplex);
if (ret) {
dev_err(mac_cb->dev,
- "adjust_link failed,%s mac%d ret = %#x!\n",
+ "adjust_link failed, %s mac%d ret = %#x!\n",
mac_cb->dsaf_dev->ae_dev.name,
mac_cb->mac_id, ret);
return;
@@ -141,15 +141,16 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
*@port_num:port number
*
*/
-static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
- u8 vmid, u8 *port_num)
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
{
+ int q_num_per_vf, vf_num_per_port;
+ int vm_queue_id;
u8 tmp_port;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
- "input invalid,%s mac%d vmid%d !\n",
+ "input invalid, %s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name,
mac_cb->mac_id, vmid);
return -EINVAL;
@@ -157,23 +158,29 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
- "input invalid,%s mac%d vmid%d!\n",
+ "input invalid, %s mac%d vmid%d!\n",
mac_cb->dsaf_dev->ae_dev.name,
mac_cb->mac_id, vmid);
return -EINVAL;
}
} else {
- dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
return -EINVAL;
}
if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
- dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
+ dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
return -EINVAL;
}
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+ vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+ vm_queue_id = vmid * q_num_per_vf +
+ vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
switch (mac_cb->dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
tmp_port = 0;
@@ -193,10 +200,10 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
case DSAF_MODE_DISABLE_6PORT_2VM:
case DSAF_MODE_DISABLE_6PORT_4VM:
case DSAF_MODE_DISABLE_6PORT_16VM:
- tmp_port = vmid;
+ tmp_port = vm_queue_id;
break;
default:
- dev_err(mac_cb->dev, "dsaf mode invalid,%s mac%d!\n",
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
return -EINVAL;
}
@@ -275,7 +282,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
if (ret) {
dev_err(dsaf_dev->dev,
- "set mac mc port failed,%s mac%d ret = %#x!\n",
+ "set mac mc port failed, %s mac%d ret = %#x!\n",
mac_cb->dsaf_dev->ae_dev.name,
mac_cb->mac_id, ret);
return ret;
@@ -305,7 +312,7 @@ int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac)
old_mac = &mac_cb->addr_entry_idx[vfn];
} else {
dev_err(mac_cb->dev,
- "vf queue is too large,%s mac%d queue = %#x!\n",
+ "vf queue is too large, %s mac%d queue = %#x!\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vfn);
return -EINVAL;
}
@@ -547,7 +554,7 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
- dev_err(mac_cb->dev, "enable autoneg is not allowed!");
+ dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n");
return -ENOTSUPP;
}
@@ -571,7 +578,7 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
if (is_ver1 && (tx_en || rx_en)) {
- dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
+ dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n");
return -EINVAL;
}
}
@@ -644,21 +651,6 @@ hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
return addr;
}
-static int hns_mac_phydev_match(struct device *dev, void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
-static struct
-platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_mac_phydev_match);
- return dev ? to_platform_device(dev) : NULL;
-}
-
static int
hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
u32 addr)
@@ -724,7 +716,7 @@ static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
return;
/* dev address in adev */
- pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
+ pdev = hns_dsaf_find_platform_device(acpi_fwnode_handle(args.adev));
mii_bus = platform_get_drvdata(pdev);
rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
if (!rc)
@@ -941,7 +933,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
ret = hns_mac_get_mode(mac_cb->phy_if);
if (ret < 0) {
dev_err(dsaf_dev->dev,
- "hns_mac_get_mode failed,mac%d ret = %#x!\n",
+ "hns_mac_get_mode failed, mac%d ret = %#x!\n",
mac_cb->mac_id, ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 4cbdf14f5c16..d3a1f72ece0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num);
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index eb448dff7564..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
DSAF_CFG_MIX_MODE_S, !!en);
}
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
-{
- if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
- return;
-
- dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
- DSAFV2_SERDES_LBK_EN_B, !!en);
-}
-
/**
* hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id
@@ -2761,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
{.compatible = "hisilicon,hns-dsaf-v2"},
{}
};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
@@ -2780,7 +2771,7 @@ module_platform_driver(g_dsaf_driver);
* @enable: false - request reset , true - drop reset
* retuen 0 - success , negative -fail
*/
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{
struct dsaf_device *dsaf_dev;
struct platform_device *pdev;
@@ -2809,24 +2800,44 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
{DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
};
- if (!is_of_node(dsaf_fwnode)) {
- pr_err("hisi_dsaf: Only support DT node!\n");
+ /* find the platform device corresponding to fwnode */
+ if (is_of_node(dsaf_fwnode)) {
+ pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+ } else if (is_acpi_device_node(dsaf_fwnode)) {
+ pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
+ } else {
+ pr_err("fwnode is neither OF or ACPI type\n");
return -EINVAL;
}
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+
+ /* check if we were a success in fetching pdev */
+ if (!pdev) {
+ pr_err("couldn't find platform device for node\n");
+ return -ENODEV;
+ }
+
+ /* retrieve the dsaf_device from the driver data */
dsaf_dev = dev_get_drvdata(&pdev->dev);
+ if (!dsaf_dev) {
+ dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+ return -ENODEV;
+ }
+
+ /* now, make sure we are running on compatible SoC */
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
dsaf_dev->ae_dev.name);
return -ENODEV;
}
- if (!enable) {
- /* Reset rocee-channels in dsaf and rocee */
- hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false);
- hns_dsaf_roce_srst(dsaf_dev, false);
+ /* do reset or de-reset according to the flag */
+ if (!dereset) {
+ /* reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ false);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
} else {
- /* Configure dsaf tx roce correspond to port map and sl map */
+ /* configure dsaf tx roce correspond to port map and sl map */
mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
dsaf_set_field(mp, 7 << i * 3, i * 3,
@@ -2840,12 +2851,13 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable)
sl_map[i][DSAF_ROCE_6PORT_MODE]);
dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
- /* De-reset rocee-channels in dsaf and rocee */
- hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true);
+ /* de-reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ true);
msleep(SRST_TIME_INTERVAL);
- hns_dsaf_roce_srst(dsaf_dev, true);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
- /* Eanble dsaf channel rocee credit */
+ /* enable dsaf channel rocee credit */
credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index f3681d566ae6..c494fc52be74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -305,7 +305,7 @@ struct dsaf_misc_op {
void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
- /* reset seris function, it will be reset if the dereseet is 0 */
+ /* reset series function, it will be reset if the dereset is 0 */
void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
@@ -313,6 +313,9 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
+ bool dereset);
+ void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -445,10 +448,6 @@ int hns_dsaf_get_mac_entry_by_index(
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable);
-
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable);
-
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
@@ -467,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 *en);
int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en);
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 36b9f791cf2f..67accce1d33d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -26,6 +26,8 @@ enum _dsm_rst_type {
HNS_XGE_CORE_RESET_FUNC = 0x3,
HNS_XGE_RESET_FUNC = 0x4,
HNS_GE_RESET_FUNC = 0x5,
+ HNS_DSAF_CHN_RESET_FUNC = 0x6,
+ HNS_ROCE_RESET_FUNC = 0x7,
};
const u8 hns_dsaf_acpi_dsm_uuid[] = {
@@ -241,11 +243,11 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
* bit18-19 for com/dfx
* @enable: false - request reset , true - drop reset
*/
-void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable)
+void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
u32 reg_addr;
- if (!enable)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
@@ -253,9 +255,27 @@ void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable)
dsaf_write_sub(dsaf_dev, reg_addr, msk);
}
-void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable)
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ * @enable: false - request reset , true - drop reset
+ */
+void
+hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
{
- if (!enable) {
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_CHN_RESET_FUNC,
+ msk, dereset);
+}
+
+void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
} else {
dsaf_write_sub(dsaf_dev,
@@ -267,6 +287,12 @@ void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable)
}
}
+void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_ROCE_RESET_FUNC, 0, dereset);
+}
+
static void
hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
u32 port, bool dereset)
@@ -575,6 +601,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -591,6 +619,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -603,3 +633,18 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
return (void *)misc_op;
}
+
+static int hns_dsaf_dev_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_dsaf_dev_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index f06bb03d47a6..310e80261366 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -34,5 +34,6 @@
#define DSAF_LED_ANCHOR_B 5
struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
-
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index ef1107777c08..f0ed80d6ef9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs(
"error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
+
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (timeout == 0)
+ /* set timeout to 0, Disable gap time */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 0);
+ else
+ /* set timeout non 0, restore gap time to 1 */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 1);
+ }
+
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 13c16ab7be48..878950a42e6c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -78,10 +78,10 @@
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
-#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
-#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
+#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
+#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
@@ -417,6 +417,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_INT_GAP_TIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
@@ -898,6 +899,9 @@
#define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1
+#define PPE_INT_GAPTIME_B 0
+#define PPE_INT_GAPTIME_M 0x3ff
+
#define PPE_COMMON_CNT_CLR_CE_B 0
#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
#define RCB_COM_TSO_MODE_B 0
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 059aaeda46b1..dff7b60345d8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -574,7 +574,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
- struct ethhdr *eh;
unsigned char *va;
int bnum, length, i;
int pull_len;
@@ -600,7 +599,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
- skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -682,14 +680,6 @@ out_bnum_err:
return -EFAULT;
}
- /* filter out multicast pkt with the same src mac as this port */
- eh = eth_hdr(skb);
- if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
- ether_addr_equal(ndev->dev_addr, eh->h_source))) {
- dev_kfree_skb_any(skb);
- return -EFAULT;
- }
-
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
@@ -747,25 +737,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
ndev->last_rx = jiffies;
}
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v)
{
struct hnae_ring *ring = ring_data->ring;
struct sk_buff *skb;
- int num, bnum, ex_num;
+ int num, bnum;
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0;
-recv:
+ num -= unused_count;
+
while (recv_pkts < budget && recv_bds < num) {
/* reuse or realloc buffers */
- if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
clean_count = 0;
+ unused_count = hns_desc_unused(ring);
}
/* poll one pkt */
@@ -786,21 +788,11 @@ recv:
recv_pkts++;
}
- /* make all data has been write before submit */
- if (recv_pkts < budget) {
- ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
-
- if (ex_num > clean_count) {
- num += ex_num - clean_count;
- rmb(); /*complete read rx ring bd number*/
- goto recv;
- }
- }
-
out:
/* make all data has been write before submit */
- if (clean_count > 0)
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
return recv_pkts;
}
@@ -810,6 +802,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
struct hnae_ring *ring = ring_data->ring;
int num = 0;
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
/* for hardware bug fixed */
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
@@ -821,6 +815,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num == 0)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
int *bytes, int *pkts)
{
@@ -922,7 +930,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ int head;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -932,6 +944,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head == ring->next_to_clean)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
@@ -963,10 +987,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
if (clean_complete >= 0 && clean_complete < budget) {
napi_complete(napi);
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring_data->ring, 0);
- if (ring_data->fini_process)
- ring_data->fini_process(ring_data);
+ ring_data->fini_process(ring_data);
return 0;
}
@@ -1562,6 +1583,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
return stats;
}
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ /* fix hardware broadcast/multicast packets queue loopback */
+ if (!AE_IS_VER1(priv->enet_ver) &&
+ is_multicast_ether_addr(eth_hdr->h_dest))
+ return 0;
+ else
+ return fallback(ndev, skb);
+}
+
static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_open = hns_nic_net_open,
.ndo_stop = hns_nic_net_stop,
@@ -1577,6 +1613,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_poll_controller = hns_nic_poll_controller,
#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
+ .ndo_select_queue = hns_nic_select_queue,
};
static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1738,7 +1775,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
- rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+ hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1750,7 +1788,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
- rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+ hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 47e59bbfd061..87d5c94b2810 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev,
break;
}
+ if (!ret) {
+ if (loop == MAC_LOOP_NONE)
+ h->dev->ops->set_promisc_mode(
+ h, ndev->flags & IFF_PROMISC);
+ else
+ h->dev->ops->set_promisc_mode(h, 1);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
{.compatible = "hisilicon,hns-mdio"},
{}
};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
static const struct acpi_device_id hns_mdio_acpi_match[] = {
{ "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 54efa9a5167b..bd719e25dd76 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev)
netif_info(port, ifup, dev, "enabling port\n");
+ netif_carrier_off(dev);
+
ret = ehea_up(dev);
if (!ret) {
port_napi_enable(port);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bfe17d9c022d..4f3281a03e7e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1190,7 +1190,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1461,14 +1461,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
return rc;
req_rx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
i = adapter->req_tx_queues;
req_tx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
release_sub_crqs_no_irqs(adapter);
return rc;
}
@@ -1503,9 +1505,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->max_rx_add_entries_per_subcrq > entries_page ?
entries_page : adapter->max_rx_add_entries_per_subcrq;
- /* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->max_tx_queues;
- adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
+ adapter->req_rx_queues = adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
@@ -3232,6 +3233,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3267,15 +3289,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
@@ -3284,8 +3298,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3654,6 +3667,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
if (adapter->failover) {
adapter->failover = false;
@@ -3691,7 +3705,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct net_device *netdev;
unsigned char *mac_addr_p;
struct dentry *ent;
- char buf[16]; /* debugfs name buf */
+ char buf[17]; /* debugfs name buf */
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3725,6 +3739,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@ -3792,6 +3807,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev);
if (rc) {
@@ -3828,6 +3844,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
+ dma_unmap_single(&dev->dev, adapter->stats_token,
+ sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
+
if (adapter->ras_comps)
dma_free_coherent(&dev->dev,
adapter->ras_comp_num *
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
/**************************************************************************/
#define IBMVNIC_NAME "ibmvnic"
-#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2030d7c1dc94..6d61e443bdf8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac1faee2a5b8..31c97e3937a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
-/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) {
@@ -9056,7 +9034,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags, 0, 0, filter_mask, NULL);
+ 0, 0, nlflags, filter_mask, NULL);
}
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a244d9a67264..bd93d823cc25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9135,10 +9135,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
+
+ if (netif_running(pdev)) {
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ }
+
return fwd_adapter;
fwd_add_err:
/* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 55831188bc32..5b12022adf1f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
+ temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
+ temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
@@ -2968,6 +2970,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2994,7 +3012,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
@@ -3090,6 +3108,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
@@ -3129,7 +3148,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (pd->phy_node) {
mp->phy = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!mp->phy)
err = -ENODEV;
else
@@ -3187,8 +3206,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
- SET_NETDEV_DEV(dev, &pdev->dev);
-
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f05ea56dcff2..941c8e2c944e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
static void sky2_shutdown(struct pci_dev *pdev)
{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *ndev = hw->dev[port];
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+ netif_device_detach(ndev);
+ }
+ rtnl_unlock();
+ }
sky2_suspend(&pdev->dev);
pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ad4ab979507b..4a62ffd7729d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2323,6 +2323,41 @@ free_netdev:
return err;
}
+static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
+{
+ u32 val[2], id[4];
+
+ regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
+ regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
+
+ id[3] = ((val[0] >> 16) & 0xff) - '0';
+ id[2] = ((val[0] >> 24) & 0xff) - '0';
+ id[1] = (val[1] & 0xff) - '0';
+ id[0] = ((val[1] >> 8) & 0xff) - '0';
+
+ *chip_id = (id[3] * 1000) + (id[2] * 100) +
+ (id[1] * 10) + id[0];
+
+ if (!(*chip_id)) {
+ dev_err(eth->dev, "failed to get chip id\n");
+ return -ENODEV;
+ }
+
+ dev_info(eth->dev, "chip id = %d\n", *chip_id);
+
+ return 0;
+}
+
+static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
+{
+ switch (eth->chip_id) {
+ case MT7623_ETH:
+ return true;
+ }
+
+ return false;
+}
+
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2362,8 +2397,6 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
- eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
-
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
@@ -2388,6 +2421,12 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
return err;
+ err = mtk_get_chip_id(eth, &eth->chip_id);
+ if (err)
+ return err;
+
+ eth->hwlro = mtk_is_hwlro_supported(eth);
+
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 30031959d6de..99b1c8e9f16f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -342,6 +342,11 @@
#define GPIO_BIAS_CTRL 0xed0
#define GPIO_DRV_SEL10 0xf00
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3 0x0
+#define ETHSYS_CHIPID4_7 0x4
+#define MT7623_ETH 7623
+
/* ethernet subsystem config register */
#define ETHSYS_SYSCFG0 0x14
#define SYSCFG0_GE_MASK 0x3
@@ -534,6 +539,7 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
+ u32 chip_id;
bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
+ /* If the comm channel has not yet been initialized,
+ * skip reporting the internal error event to all
+ * the communication channels.
+ */
+ if (!priv->mfunc.comm)
+ return;
+
/* Report an internal error event to all
* communication channels.
*/
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
-
+ int xdp_index;
+
+ /* The xdp tx irq must align with the rx ring that forwards to
+ * it, so reindex these from 0. This should only happen when
+ * tx_ring_num is not a multiple of rx_ring_num.
+ */
+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
+ if (xdp_index >= 0)
+ cq_idx = xdp_index;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e703bed7b82..3a47e83d3e07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i]->napi);
+
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- en_dbg(HW, priv, "Failed dumping statistics\n");
+ if (!mlx4_is_slave(mdev->dev))
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
return !loopback_ok;
}
+static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ int i = 0;
+
+ err = mlx4_test_async(mdev->dev);
+ /* When not in MSI_X or slave, test only async */
+ if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
+ return err;
+
+ /* A loop over all completion vectors of current port,
+ * for each vector check whether it works by mapping command
+ * completions to that vector and performing a NOP command
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
+ if (err)
+ break;
+ }
+
+ return err;
+}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
- buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
+/* A test that verifies that we can accept interrupts
+ * on the vector allocated for asynchronous events
+ */
+int mlx4_test_async(struct mlx4_dev *dev)
+{
+ return mlx4_NOP(dev);
+}
+EXPORT_SYMBOL(mlx4_test_async);
+
+/* A test that verifies that we can accept interrupts
+ * on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
-int mlx4_test_interrupts(struct mlx4_dev *dev)
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
int err;
- err = mlx4_NOP(dev);
- /* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
- return err;
-
- /* A loop over all completion vectors, for each vector we will check
- * whether it works by mapping command completions to that vector
- * and performing a NOP command
- */
- for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
- /* Make sure request_irq was called */
- if (!priv->eq_table.eq[i].have_irq)
- continue;
-
- /* Temporary use polling for command completions */
- mlx4_cmd_use_polling(dev);
-
- /* Map the new eq to handle all asynchronous events */
- err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[i].eqn);
- if (err) {
- mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
- mlx4_cmd_use_events(dev);
- break;
- }
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
- /* Go back to using events */
- mlx4_cmd_use_events(dev);
- err = mlx4_NOP(dev);
+ /* Map the new eq to handle all asynchronous events */
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
+ priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ goto out;
}
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+
/* Return to default */
+ mlx4_cmd_use_polling(dev);
+out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ mlx4_cmd_use_events(dev);
+
return err;
}
-EXPORT_SYMBOL(mlx4_test_interrupts);
+EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f9cbc67f1694..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos = true;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
@@ -160,6 +160,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)",
[35] = "QinQ VST mode support",
+ [36] = "sl to vl mapping table change event support"
};
int i;
@@ -789,6 +790,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
+#define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
@@ -904,6 +906,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
+ if (field & (1 << 5))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
if (field & 0x1)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
@@ -2783,7 +2788,6 @@ static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
int mlx4_config_mad_demux(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
- int secure_host_active;
int err;
/* Check if mad_demux is supported */
@@ -2806,7 +2810,8 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
goto out;
}
- secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
+ if (mlx4_check_smp_firewall_active(dev, mailbox))
+ dev->flags |= MLX4_FLAG_SECURE_HOST;
/* Config mad_demux to handle all MADs returned by the query above */
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
@@ -2817,7 +2822,7 @@ int mlx4_config_mad_demux(struct mlx4_dev *dev)
goto out;
}
- if (secure_host_active)
+ if (dev->flags & MLX4_FLAG_SECURE_HOST)
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
out:
mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..6f4e67bc3538 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
+ if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
+ mlx4_err(mdev,
+ "Requested port type for port %d is not supported on this HCA\n",
+ info->port);
+ err = -EINVAL;
+ goto err_sup;
+ }
+
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-
+err_sup:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
- RES_EQ,
+ RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
+ RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
- struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
- u32 in_mod, struct mlx4_cmd_mailbox *outbox)
-{
- return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
-}
-
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
- if (slave != dev->caps.function)
- return 0;
- return mlx4_common_dump_eth_stats(dev, slave,
- vhcr->in_modifier, outbox);
+ return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7eaf38020a8f..84e8b250e2af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = priv->params.num_tc;
+ c->xdp = !!priv->xdp_prog;
if (priv->params.rx_am_enabled)
rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_tx_cqs;
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation) : 0;
+ if (err)
+ goto err_close_rx_cq;
+
napi_enable(&c->napi);
err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
- if (priv->xdp_prog) {
- /* XDP SQ CQ params are same as normal TXQ sq CQ params */
- err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
- priv->params.tx_cq_moderation);
- if (err)
- goto err_close_sqs;
-
- err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
- if (err) {
- mlx5e_close_cq(&c->xdp_sq.cq);
- goto err_close_sqs;
- }
- }
+ err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+ if (err)
+ goto err_close_sqs;
- c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_xdp_sq;
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return 0;
err_close_xdp_sq:
- mlx5e_close_sq(&c->xdp_sq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1522,6 +1519,10 @@ err_close_icosq:
err_disable_napi:
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
+
+err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
err_close_tx_cqs:
@@ -1971,9 +1972,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3401,6 +3400,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3419,6 +3430,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
@@ -4035,7 +4049,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
@@ -4052,6 +4065,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..bf1c09ca73c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
#endif
- netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
+ netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
eth_hw_addr_random(netdev);
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ce8c54d18906..6bb21b31cfeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
- if (mask->vlan_id) {
+ if (mask->vlan_id || mask->vlan_priority) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c55ad8d00c05..d239f5d0ea36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- action = attr->action;
+ /* per flow vlan pop/push is emulated, don't set that into the firmware */
+ action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..914e5466f729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
@@ -1687,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
{
steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
- if (IS_ERR_OR_NULL(steering->root_ns))
+ if (!steering->root_ns)
goto cleanup;
if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..3eb931585b3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
@@ -1226,6 +1226,9 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ dev->pdev = pdev;
+ dev->event = mlx5_core_event;
+
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
mlx5_core_warn(dev,
"selected profile out of range, selecting default (%d)\n",
@@ -1233,8 +1236,6 @@ static int init_one(struct pci_dev *pdev,
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
- dev->pdev = pdev;
- dev->event = mlx5_core_event;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -1313,10 +1314,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -1373,11 +1380,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1429,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d4585154151d..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
@@ -287,7 +297,7 @@ retry:
goto retry;
}
- MLX5_SET64(manage_pages_in, in, pas[i], addr);
+ MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@@ -344,7 +354,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
if (fwp->func_id != func_id)
continue;
- MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
+ MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index e742bd4e8894..912f71f84209 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1838,11 +1838,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
@@ -1909,7 +1915,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1ec0a4ce3c46..dda5761e91bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
span_entry->used = true;
span_entry->id = index;
- span_entry->ref_count = 0;
+ span_entry->ref_count = 1;
span_entry->local_port = local_port;
return span_entry;
}
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
span_entry = mlxsw_sp_span_entry_find(port);
if (span_entry) {
+ /* Already exists, just take a reference */
span_entry->ref_count++;
return span_entry;
}
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
+ WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 9b22863a924b..97bbc1d21df8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
- u16 vid;
+ u16 fid;
u16 mid;
unsigned int ref_count;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 78fc557d6dd7..e83072da6272 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
@@ -591,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
+
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
- unsigned char addr[sizeof(struct in6_addr)];
- struct net_device *dev;
+ struct neighbour *n;
};
struct mlxsw_sp_neigh_entry {
struct rhash_head ht_node;
struct mlxsw_sp_neigh_key key;
u16 rif;
- struct neighbour *n;
bool offloaded;
struct delayed_work dw;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -643,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
- struct net_device *dev, u16 rif,
- struct neighbour *n)
+mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
if (!neigh_entry)
return NULL;
- memcpy(neigh_entry->key.addr, addr, addr_len);
- neigh_entry->key.dev = dev;
+ neigh_entry->key.n = n;
neigh_entry->rif = rif;
- neigh_entry->n = n;
INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
return neigh_entry;
@@ -668,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
}
static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
- size_t addr_len, struct net_device *dev)
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
- struct mlxsw_sp_neigh_key key = {{ 0 } };
+ struct mlxsw_sp_neigh_key key;
- memcpy(key.addr, addr, addr_len);
- key.dev = dev;
+ key.n = n;
return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
&key, mlxsw_sp_neigh_ht_params);
}
@@ -686,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_rif *r;
- u32 dip;
int err;
if (n->tbl != &arp_tbl)
return 0;
- dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
- n->dev);
- if (neigh_entry) {
- WARN_ON(neigh_entry->n != n);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (neigh_entry)
return 0;
- }
r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
if (WARN_ON(!r))
return -EINVAL;
- neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
- r->rif, n);
+ neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
if (!neigh_entry)
return -ENOMEM;
err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
@@ -724,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
- u32 dip;
if (n->tbl != &arp_tbl)
return;
- dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
- n->dev);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!neigh_entry)
return;
mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
@@ -814,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
}
}
+static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
+{
+ u8 num_rec, last_rec_index, num_entries;
+
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ last_rec_index = num_rec - 1;
+
+ if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
+ return false;
+ if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
+ MLXSW_REG_RAUHTD_TYPE_IPV6)
+ return true;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ last_rec_index);
+ if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
+ return true;
+ return false;
+}
+
static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
{
char *rauhtd_pl;
@@ -840,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < num_rec; i++)
mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
i);
- } while (num_rec);
+ } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
rtnl_unlock();
kfree(rauhtd_pl);
@@ -859,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
* is active regardless of the traffic.
*/
if (!list_empty(&neigh_entry->nexthop_list))
- neigh_event_send(neigh_entry->n, NULL);
+ neigh_event_send(neigh_entry->key.n, NULL);
}
rtnl_unlock();
}
@@ -905,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
rtnl_lock();
list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
nexthop_neighs_list_node) {
- if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+ if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
!list_empty(&neigh_entry->nexthop_list))
- neigh_event_send(neigh_entry->n, NULL);
+ neigh_event_send(neigh_entry->key.n, NULL);
}
rtnl_unlock();
@@ -924,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
{
struct mlxsw_sp_neigh_entry *neigh_entry =
container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
- struct neighbour *n = neigh_entry->n;
+ struct neighbour *n = neigh_entry->key.n;
struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char rauht_pl[MLXSW_REG_RAUHT_LEN];
@@ -1027,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
dip = ntohl(*((__be32 *) n->primary_key));
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
- &dip,
- sizeof(__be32),
- dev);
- if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (WARN_ON(!neigh_entry)) {
mlxsw_sp_port_dev_put(mlxsw_sp_port);
return NOTIFY_DONE;
}
@@ -1340,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
struct fib_nh *fib_nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
- u32 gwip = ntohl(fib_nh->nh_gw);
struct net_device *dev = fib_nh->nh_dev;
struct neighbour *n;
u8 nud_state;
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
- sizeof(gwip), dev);
- if (!neigh_entry) {
- __be32 gwipn = htonl(gwip);
-
- n = neigh_create(&arp_tbl, &gwipn, dev);
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be detructed before the nexthop entry is finished.
+ * The reference is taken either in neigh_lookup() or
+ * in neith_create() in case n is not found.
+ */
+ n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
+ if (!n) {
+ n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
- neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
- sizeof(gwip), dev);
- if (!neigh_entry) {
- neigh_release(n);
- return -EINVAL;
- }
- } else {
- /* Take a reference of neigh here ensuring that neigh would
- * not be detructed before the nexthop entry is finished.
- * The second branch takes the reference in neith_create()
- */
- n = neigh_entry->n;
- neigh_clone(n);
+ }
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+ if (!neigh_entry) {
+ neigh_release(n);
+ return -EINVAL;
}
/* If that is the first nexthop connected to that neigh, add to
@@ -1400,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
if (list_empty(&nh->neigh_entry->nexthop_list))
list_del(&nh->neigh_entry->nexthop_neighs_list_node);
- neigh_release(neigh_entry->n);
+ neigh_release(neigh_entry->key.n);
}
static struct mlxsw_sp_nexthop_group *
@@ -1460,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
for (i = 0; i < fi->fib_nhs; i++) {
struct fib_nh *fib_nh = &fi->fib_nh[i];
- u32 gwip = ntohl(fib_nh->nh_gw);
+ struct neighbour *n = nh->neigh_entry->key.n;
- if (memcmp(nh->neigh_entry->key.addr,
- &gwip, sizeof(u32)) == 0 &&
- nh->neigh_entry->key.dev == fib_nh->nh_dev)
+ if (memcmp(n->primary_key, &fib_nh->nh_gw,
+ sizeof(fib_nh->nh_gw)) == 0 &&
+ n->dev == fib_nh->nh_dev)
return true;
}
return false;
@@ -1820,19 +1819,17 @@ err_fib_entry_insert:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1840,7 +1837,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1862,7 +1858,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
@@ -1873,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
-static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_fib_entry *fib_entry;
struct mlxsw_sp_fib_entry *tmp;
struct mlxsw_sp_vr *vr;
int i;
- int err;
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
+
if (!vr->used)
continue;
@@ -1900,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
break;
}
}
+}
+
+static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ mlxsw_sp_router_fib_flush(mlxsw_sp);
mlxsw_sp->router.aborted = true;
err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
if (err)
@@ -1957,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct fib_entry_notifier_info *fen_info = ptr;
int err;
+ if (!net_eq(fen_info->info.net, &init_net))
+ return NOTIFY_DONE;
+
switch (event) {
case FIB_EVENT_ENTRY_ADD:
err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5e00c79e8133..1e2c8eca3af1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
const unsigned char *addr,
- u16 vid)
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
+ if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
return NULL;
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
const unsigned char *addr,
- u16 vid)
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
u16 mid_idx;
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
ether_addr_copy(mid->addr, addr);
- mid->vid = vid;
+ mid->fid = fid;
mid->mid = mid_idx;
mid->ref_count = 0;
list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (switchdev_trans_ph_prepare(trans))
return 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
u16 mid_idx;
int err = 0;
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
+ mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index c0c23e2f3275..92bda8703f87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1088,6 +1088,7 @@ err_port_stp_state_set:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 42e34076d2de..b14f0305aa31 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -821,7 +821,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
}
if (oldfilter != priv->rxfilter)
- queue_kthread_work(&priv->kworker, &priv->setrx_work);
+ kthread_queue_work(&priv->kworker, &priv->setrx_work);
}
static void encx24j600_hw_tx(struct encx24j600_priv *priv)
@@ -879,7 +879,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
- queue_kthread_work(&priv->kworker, &priv->tx_work);
+ kthread_queue_work(&priv->kworker, &priv->tx_work);
return NETDEV_TX_OK;
}
@@ -1037,9 +1037,9 @@ static int encx24j600_spi_probe(struct spi_device *spi)
goto out_free;
}
- init_kthread_worker(&priv->kworker);
- init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
- init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+ kthread_init_worker(&priv->kworker);
+ kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+ kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
"encx24j600");
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0df1391f9663..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,15 +107,7 @@ config QEDE
---help---
This enables the support for ...
-config INFINIBAND_QEDR
- tristate "QLogic qede RoCE sources [debug]"
- depends on QEDE && 64BIT
- select QED_LL2
- default n
- ---help---
- This provides a temporary node that allows the compilation
- and logical testing of the InfiniBand over Ethernet support
- for QLogic QED. This would be replaced by the 'real' option
- once the QEDR driver is added [+relocated].
+config QED_RDMA
+ bool
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* ILT constants */
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE 4
-#else
-#define ILT_DEFAULT_HW_P_SIZE 3
-#endif
+#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
-enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
-enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
-enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
-enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
-enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
{
u32 num_errors, num_warnnings;
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
-enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
- enum qed_dbg_features feature_idx)
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
-enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_dbg_features feature_idx)
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
- 0) * 2;
+ NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ NULL);
n_eqes += 2 * num_cons;
}
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
- * status blocks equally between L2 / RoCE but with consideration as
- * to how many l2 queues / cnqs we have
- */
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
-#endif
+
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 72eee29c677f..2777d5bb4380 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,9 +727,6 @@ struct core_tx_bd_flags {
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
-
};
struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index a6db10717d5c..f95385cbbd40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
p_posting_packet = list_first_entry(&p_rx->posting_descq,
struct qed_ll2_rx_packet,
list_entry);
- list_del(&p_posting_packet->list_entry);
- list_add_tail(&p_posting_packet->list_entry,
- &p_rx->active_descq);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
b_notify_fw = true;
}
@@ -1120,12 +1119,10 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
CORE_TX_BD_FLAGS_START_BD_SHIFT;
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
+ SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
- SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
- type);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1185,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
@@ -1517,7 +1513,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
struct qed_ll2_info ll2_info;
- struct qed_ll2_buffer *buffer;
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
int rc, i;
@@ -1587,7 +1583,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Post all Rx buffers to FW */
spin_lock_bh(&cdev->ll2->lock);
- list_for_each_entry(buffer, &cdev->ll2->list, list) {
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
cdev->ll2->handle,
buffer->phys_addr, 0, buffer, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..333c7442e48a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
-#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- int num_l2_queues;
-#endif
+ int num_l2_queues = 0;
int rc;
int i;
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- num_l2_queues = 0;
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return 0;
+
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
-#endif
return 0;
}
@@ -843,13 +839,14 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- params->rdma_pf_params.num_qps = QED_ROCE_QPS;
- params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
- /* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
- params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
-#endif
+ if (IS_ENABLED(CONFIG_QED_RDMA)) {
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+ }
+
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -880,6 +877,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
@@ -1432,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 23430059471c..f3a825a8f8d5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
-{
- return QED_CAU_DEF_RX_TIMER_RES;
-}
-
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
p_rdma_info->num_qps = num_cons / 2;
@@ -275,7 +271,7 @@ free_rdma_info:
return rc;
}
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
return rc;
}
-int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
-void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
-int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params, u16 *icid)
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
return rc;
}
-int qed_rdma_resize_cq(void *rdma_cxt,
- struct qed_rdma_resize_cq_in_params *in_params,
- struct qed_rdma_resize_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_resize_cq_output_params *p_ramrod_res;
- struct rdma_resize_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- u8 fw_return_code;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_resize_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed resize cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_RESIZE_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_resize_cq;
-
- p_ramrod->flags = 0;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
- in_params->icid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
- in_params->pbl_two_level);
-
- p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
- p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
- p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- goto err;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- rc = -EINVAL;
- goto err;
- }
-
- out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
- out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
-
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
- DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
-
- return rc;
-}
-
-int qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
return rc;
}
-int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
return rc;
}
-int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
-struct qed_rdma_qp *
+static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -2809,11 +2720,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
- if (!cdev) {
- DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
- return -EINVAL;
- }
-
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
@@ -2850,7 +2756,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
- if (!cdev || !pkt || !params) {
+ if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
@@ -2947,7 +2853,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.roce_ll2_stats = &qed_roce_ll2_stats,
};
-const struct qed_rdma_ops *qed_get_rdma_ops()
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
{
return &qed_rdma_ops_pass;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
-struct qed_rdma_resize_cq_in_params {
- u16 icid;
- u32 cq_size;
- bool pbl_two_level;
- u64 pbl_ptr;
- u16 pbl_num_pages;
- u8 pbl_page_size_log;
-};
-
-struct qed_rdma_resize_cq_out_params {
- u32 prod;
- u32 cons;
-};
-
-struct qed_rdma_resize_cnq_in_params {
- u32 cnq_id;
- u32 pbl_page_size_log;
- u64 pbl_ptr;
-};
-
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
-int
-qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params);
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
-void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
-int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params);
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
-int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
-int qed_rdma_stop(void *rdma_cxt);
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
-u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
-int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params);
-int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params);
-
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
#else
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo,
+ bool b_last_packet) {}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 652c90819758..b2c08e4d2a9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
- struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff41544898..9fbaf9429fd0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
-#endif
/***************************************************************************
* Structures & Definitions
@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
-#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..974689a13337 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..7567cc464b88 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
int tc;
- for (j = 0; j < QEDE_NUM_RQSTATS; j++)
- sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d: %s", i, qede_rqstats_arr[j].string);
- k += QEDE_NUM_RQSTATS;
- for (tc = 0; tc < edev->num_tc; tc++) {
- for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ for (j = 0; j < QEDE_NUM_RQSTATS; j++)
sprintf(buf + (k + j) * ETH_GSTRING_LEN,
- "%d.%d: %s", i, tc,
- qede_tqstats_arr[j].string);
- k += QEDE_NUM_TQSTATS;
+ "%d: %s", i,
+ qede_rqstats_arr[j].string);
+ k += QEDE_NUM_RQSTATS;
+ }
+
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < edev->num_tc; tc++) {
+ for (j = 0; j < QEDE_NUM_TQSTATS; j++)
+ sprintf(buf + (k + j) *
+ ETH_GSTRING_LEN,
+ "%d.%d: %s", i, tc,
+ qede_tqstats_arr[j].string);
+ k += QEDE_NUM_TQSTATS;
+ }
}
}
@@ -756,6 +763,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -820,6 +829,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -1053,6 +1069,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1184,8 +1206,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
@@ -1199,8 +1221,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1241,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..85f46dbecd5b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -2840,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
}
mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev,
"Failed to map TPA replacement buffer\n");
@@ -2941,7 +2940,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring) {
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -2952,7 +2951,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index 9ba568db576f..d7720bf92d49 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -26,6 +26,7 @@ config QCA7000
config QCOM_EMAC
tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support"
+ depends on HAS_DMA && HAS_IOMEM
select CRC32
select PHYLIB
---help---
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..0b4deb31e742 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
mac |= TXEN | RXEN; /* enable RX/TX */
- /* We don't have ethtool support yet, so force flow-control mode
- * to 'full' always.
- */
- mac |= TXFC | RXFC;
+ /* Configure MAC flow control to match the PHY's settings. */
+ if (phydev->pause)
+ mac |= RXFC;
+ if (phydev->pause != phydev->asym_pause)
+ mac |= TXFC;
/* setup link speed */
mac &= ~SPEED_MASK;
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
+ /* Enable pause frames. Without this feature, the EMAC has been shown
+ * to receive (and drop) frames with FCS errors at gigabit connections.
+ */
+ adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
phy_start(adpt->phydev);
@@ -1021,14 +1028,18 @@ void emac_mac_down(struct emac_adapter *adpt)
napi_disable(&adpt->rx_q.napi);
phy_stop(adpt->phydev);
- phy_disconnect(adpt->phydev);
- /* disable mac irq */
+ /* Interrupts must be disabled before the PHY is disconnected, to
+ * avoid a race condition where adjust_link is null when we get
+ * an interrupt.
+ */
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
free_irq(adpt->irq.irq, &adpt->irq);
+ phy_disconnect(adpt->phydev);
+
emac_mac_reset(adpt);
emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 75c1b530e39e..72fe343c7a36 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
/* CDR Settings */
{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
- {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)},
+ {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
/* TX/RX Settings */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9bf3b2b82e95..4fede4b86538 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -575,6 +575,7 @@ static const struct of_device_id emac_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id emac_acpi_match[] = {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e55638c7505a..bf000d819a21 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8273,7 +8273,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 5424fb341613..24b746406bc7 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
- if (found)
- *index = found->index;
updating = found && adding;
removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
resolved = false;
} else if (removing) {
ofdpa_neigh_del(trans, found);
+ *index = found->index;
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
+ *index = found->index;
} else {
err = -ENOENT;
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cf3557106c2..6b89e4a7b164 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
*channel = *old_channel;
channel->napi_dev = NULL;
+ INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+ channel->napi_str.napi_id = 0;
+ channel->napi_str.state = 0;
memset(&channel->eventq, 0, sizeof(channel->eventq));
for (j = 0; j < EFX_TXQ_TYPES; j++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c732b8ce2528..4b78168a5f3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -61,13 +61,13 @@ config DWMAC_LPC18XX
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
- depends on OF && (ARCH_MESON || COMPILE_TEST)
+ depends on OF && COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
help
Support for Ethernet controller on Amlogic Meson SoCs.
This selects the Amlogic Meson SoC glue layer support for
- the stmmac device driver. This driver is used for Meson6 and
- Meson8 SoCs.
+ the stmmac device driver. This driver is used for Meson6,
+ Meson8, Meson8b and GXBB SoCs.
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
@@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_STM32
tristate "STM32 DWMAC support"
default ARCH_STM32
- depends on OF && HAS_IOMEM
+ depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STM32 SOCs.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index f0c9396fa28e..5d6ece5919b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -9,7 +9,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
-obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 2920e2ee3864..489ef146201e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -63,8 +63,8 @@
#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
#define TSE_PCS_SW_RESET_TIMEOUT 100
-#define TSE_PCS_USE_SGMII_AN_MASK BIT(2)
-#define TSE_PCS_USE_SGMII_ENA BIT(1)
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
+#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index d3292c4a6eda..6d2de4e01f6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -120,14 +120,17 @@ struct stmmac_extra_stats {
unsigned long ip_csum_bypassed;
unsigned long ipv4_pkt_rcvd;
unsigned long ipv6_pkt_rcvd;
- unsigned long rx_msg_type_ext_no_ptp;
- unsigned long rx_msg_type_sync;
- unsigned long rx_msg_type_follow_up;
- unsigned long rx_msg_type_delay_req;
- unsigned long rx_msg_type_delay_resp;
- unsigned long rx_msg_type_pdelay_req;
- unsigned long rx_msg_type_pdelay_resp;
- unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long no_ptp_rx_msg_type_ext;
+ unsigned long ptp_rx_msg_type_sync;
+ unsigned long ptp_rx_msg_type_follow_up;
+ unsigned long ptp_rx_msg_type_delay_req;
+ unsigned long ptp_rx_msg_type_delay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_req;
+ unsigned long ptp_rx_msg_type_pdelay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_rx_msg_type_announce;
+ unsigned long ptp_rx_msg_type_management;
+ unsigned long ptp_rx_msg_pkt_reserved_type;
unsigned long ptp_frame_type;
unsigned long ptp_ver;
unsigned long timestamp_dropped;
@@ -482,11 +485,12 @@ struct stmmac_ops {
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
- u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate);
+ u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+ int gmac4);
int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
int (*config_addend) (void __iomem *ioaddr, u32 addend);
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub);
+ int add_sub, int gmac4);
u64(*get_systime) (void __iomem *ioaddr);
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 2e4c171a2b41..e3c86d422109 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -155,14 +155,18 @@
#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
/* Extended RDES4 message type definitions */
-#define RDES_EXT_NO_PTP 0
-#define RDES_EXT_SYNC 1
-#define RDES_EXT_FOLLOW_UP 2
-#define RDES_EXT_DELAY_REQ 3
-#define RDES_EXT_DELAY_RESP 4
-#define RDES_EXT_PDELAY_REQ 5
-#define RDES_EXT_PDELAY_RESP 6
-#define RDES_EXT_PDELAY_FOLLOW_UP 7
+#define RDES_EXT_NO_PTP 0x0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define RDES_PTP_ANNOUNCE 0x8
+#define RDES_PTP_MANAGEMENT 0x9
+#define RDES_PTP_SIGNALING 0xa
+#define RDES_PTP_PKT_RESERVED_TYPE 0xf
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index c1bac1912b37..309d99536a2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -1,5 +1,5 @@
/*
- * Amlogic Meson DWMAC glue layer
+ * Amlogic Meson6 and Meson8 DWMAC glue layer
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
*
@@ -96,5 +96,5 @@ static struct platform_driver meson6_dwmac_driver = {
module_platform_driver(meson6_dwmac_driver);
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
-MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_DESCRIPTION("Amlogic Meson6 and Meson8 DWMAC glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
new file mode 100644
index 000000000000..250e4ceafc8d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -0,0 +1,324 @@
+/*
+ * Amlogic Meson8b and GXBB DWMAC glue layer
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define PRG_ETH0 0x0
+
+#define PRG_ETH0_RGMII_MODE BIT(0)
+
+/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
+#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
+#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
+
+#define PRG_ETH0_TXDLY_SHIFT 5
+#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
+#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT)
+#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT)
+
+/* divider for the result of m250_sel */
+#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
+#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
+
+/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */
+#define PRG_ETH0_CLK_M25_DIV_SHIFT 10
+#define PRG_ETH0_CLK_M25_DIV_WIDTH 1
+
+#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
+#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+
+#define MUX_CLK_NUM_PARENTS 2
+
+struct meson8b_dwmac {
+ struct platform_device *pdev;
+
+ void __iomem *regs;
+
+ phy_interface_t phy_mode;
+
+ struct clk_mux m250_mux;
+ struct clk *m250_mux_clk;
+ struct clk *m250_mux_parent[MUX_CLK_NUM_PARENTS];
+
+ struct clk_divider m250_div;
+ struct clk *m250_div_clk;
+
+ struct clk_divider m25_div;
+ struct clk *m25_div_clk;
+};
+
+static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
+ u32 mask, u32 value)
+{
+ u32 data;
+
+ data = readl(dwmac->regs + reg);
+ data &= ~mask;
+ data |= (value & mask);
+
+ writel(data, dwmac->regs + reg);
+}
+
+static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
+{
+ struct clk_init_data init;
+ int i, ret;
+ struct device *dev = &dwmac->pdev->dev;
+ char clk_name[32];
+ const char *clk_div_parents[1];
+ const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+ static struct clk_div_table clk_25m_div_table[] = {
+ { .val = 0, .div = 5 },
+ { .val = 1, .div = 10 },
+ { /* sentinel */ },
+ };
+
+ /* get the mux parents from DT */
+ for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
+ char name[16];
+
+ snprintf(name, sizeof(name), "clkin%d", i);
+ dwmac->m250_mux_parent[i] = devm_clk_get(dev, name);
+ if (IS_ERR(dwmac->m250_mux_parent[i])) {
+ ret = PTR_ERR(dwmac->m250_mux_parent[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Missing clock %s\n", name);
+ return ret;
+ }
+
+ mux_parent_names[i] =
+ __clk_get_name(dwmac->m250_mux_parent[i]);
+ }
+
+ /* create the m250_mux */
+ snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
+ init.name = clk_name;
+ init.ops = &clk_mux_ops;
+ init.flags = 0;
+ init.parent_names = mux_parent_names;
+ init.num_parents = MUX_CLK_NUM_PARENTS;
+
+ dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
+ dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ dwmac->m250_mux.flags = 0;
+ dwmac->m250_mux.table = NULL;
+ dwmac->m250_mux.hw.init = &init;
+
+ dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw);
+ if (WARN_ON(IS_ERR(dwmac->m250_mux_clk)))
+ return PTR_ERR(dwmac->m250_mux_clk);
+
+ /* create the m250_div */
+ snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev));
+ init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->m250_div.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
+ dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
+ dwmac->m250_div.hw.init = &init;
+ dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+
+ dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
+ if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
+ return PTR_ERR(dwmac->m250_div_clk);
+
+ /* create the m25_div */
+ snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev));
+ init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
+ init.parent_names = clk_div_parents;
+ init.num_parents = ARRAY_SIZE(clk_div_parents);
+
+ dwmac->m25_div.reg = dwmac->regs + PRG_ETH0;
+ dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT;
+ dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH;
+ dwmac->m25_div.table = clk_25m_div_table;
+ dwmac->m25_div.hw.init = &init;
+ dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw);
+ if (WARN_ON(IS_ERR(dwmac->m25_div_clk)))
+ return PTR_ERR(dwmac->m25_div_clk);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+ unsigned long clk_rate;
+
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* Generate a 25MHz clock for the PHY */
+ clk_rate = 25 * 1000 * 1000;
+
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
+ PRG_ETH0_RGMII_MODE);
+
+ /* only relevant for RMII mode -> disable in RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK, 0);
+
+ /* TX clock delay - all known boards use a 1/4 cycle delay */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
+ PRG_ETH0_TXDLY_QUARTER);
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ /* Use the rate of the mux clock for the internal RMII PHY */
+ clk_rate = clk_get_rate(dwmac->m250_mux_clk);
+
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE,
+ 0);
+
+ /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK,
+ PRG_ETH0_INVERTED_RMII_CLK);
+
+ /* TX clock delay cannot be configured in RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
+ 0);
+
+ break;
+
+ default:
+ dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(dwmac->m25_div_clk);
+ if (ret) {
+ dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(dwmac->m25_div_clk, clk_rate);
+ if (ret) {
+ clk_disable_unprepare(dwmac->m25_div_clk);
+
+ dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n");
+ return ret;
+ }
+
+ /* enable TX_CLK and PHY_REF_CLK generator */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
+ PRG_ETH0_TX_AND_PHY_REF_CLK);
+
+ return 0;
+}
+
+static int meson8b_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct resource *res;
+ struct meson8b_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dwmac->regs))
+ return PTR_ERR(dwmac->regs);
+
+ dwmac->pdev = pdev;
+ dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ if (dwmac->phy_mode < 0) {
+ dev_err(&pdev->dev, "missing phy-mode property\n");
+ return -EINVAL;
+ }
+
+ ret = meson8b_init_clk(dwmac);
+ if (ret)
+ return ret;
+
+ ret = meson8b_init_prg_eth(dwmac);
+ if (ret)
+ return ret;
+
+ plat_dat->bsp_priv = dwmac;
+
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
+static int meson8b_dwmac_remove(struct platform_device *pdev)
+{
+ struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+
+ clk_disable_unprepare(dwmac->m25_div_clk);
+
+ return stmmac_pltfr_remove(pdev);
+}
+
+static const struct of_device_id meson8b_dwmac_match[] = {
+ { .compatible = "amlogic,meson8b-dwmac" },
+ { .compatible = "amlogic,meson-gxbb-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
+
+static struct platform_driver meson8b_dwmac_driver = {
+ .probe = meson8b_dwmac_probe,
+ .remove = meson8b_dwmac_remove,
+ .driver = {
+ .name = "meson8b-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson8b_dwmac_match,
+ },
+};
+module_platform_driver(meson8b_dwmac_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson8b and GXBB DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a601f8d43b75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
if (rdes1 & RDES1_PTP_PACKET_TYPE)
x->ptp_frame_type++;
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{
- return (p->des3 & TDES3_TIMESTAMP_STATUS)
- >> TDES3_TIMESTAMP_STATUS_SHIFT;
+ /* Context type from W/B descriptor must be zero */
+ if (p->des3 & TDES3_CONTEXT_TYPE)
+ return -EINVAL;
+
+ /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+ if (p->des3 & TDES3_TIMESTAMP_STATUS)
+ return 0;
+
+ return 1;
}
-/* NOTE: For RX CTX bit has to be checked before
- * HAVE a specific function for TX and another one for RX
- */
-static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u32 own, ctxt;
+ int ret = 1;
+
+ own = p->des3 & RDES3_OWN;
+ ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+ if (likely(!own && ctxt)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ /* Corrupted value */
+ ret = -EINVAL;
+ else
+ /* A valid Timestamp is ready to be read */
+ ret = 0;
+ }
+
+ /* Timestamp not ready */
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ int ret = -EINVAL;
+
+ /* Get the status from normal w/b descriptor */
+ if (likely(p->des3 & TDES3_RS1V)) {
+ if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
+ int i = 0;
+
+ /* Check if timestamp is OK from context descriptor */
+ do {
+ ret = dwmac4_rx_check_timestamp(desc);
+ if (ret < 0)
+ goto exit;
+ i++;
- return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
- >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+ } while ((ret == 1) || (i < 10));
+
+ if (i == 10)
+ ret = -EBUSY;
+ }
+ }
+exit:
+ return ret;
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -347,10 +400,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
@@ -374,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
- .get_timestamp = dwmac4_wrback_get_timestamp,
- .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
+ .get_timestamp = dwmac4_get_timestamp,
.set_tx_ic = dwmac4_rd_set_tx_ic,
.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0902a2edeaa9..9736c505211a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -59,10 +59,13 @@
#define TDES3_CTXT_TCMSSV BIT(26)
/* TDES3 Common */
+#define TDES3_RS1V BIT(26)
+#define TDES3_RS1V_SHIFT 26
#define TDES3_LAST_DESCRIPTOR BIT(28)
#define TDES3_LAST_DESCRIPTOR_SHIFT 28
#define TDES3_FIRST_DESCRIPTOR BIT(29)
#define TDES3_CONTEXT_TYPE BIT(30)
+#define TDES3_CONTEXT_TYPE_SHIFT 30
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
@@ -117,6 +120,7 @@
#define RDES3_LAST_DESCRIPTOR BIT(28)
#define RDES3_FIRST_DESCRIPTOR BIT(29)
#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
/* RDES3 (read format) */
#define RDES3_BUFFER1_VALID_ADDR BIT(24)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 38f19c99cf59..e75549327c34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
x->ipv4_pkt_rcvd++;
if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
+
if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++;
if (rdes4 & ERDES4_PTP_VER)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8dc9056c1001..4d2a759b8465 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -129,6 +129,7 @@ struct stmmac_priv {
int irq_wake;
spinlock_t ptp_lock;
void __iomem *mmcaddr;
+ void __iomem *ptpaddr;
u32 rx_tail_addr;
u32 tx_tail_addr;
u32 mss;
@@ -145,7 +146,7 @@ int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1e06173fc9d7..c5d0142adda2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(ip_csum_bypassed),
STMMAC_STAT(ipv4_pkt_rcvd),
STMMAC_STAT(ipv6_pkt_rcvd),
- STMMAC_STAT(rx_msg_type_ext_no_ptp),
- STMMAC_STAT(rx_msg_type_sync),
- STMMAC_STAT(rx_msg_type_follow_up),
- STMMAC_STAT(rx_msg_type_delay_req),
- STMMAC_STAT(rx_msg_type_delay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_req),
- STMMAC_STAT(rx_msg_type_pdelay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(no_ptp_rx_msg_type_ext),
+ STMMAC_STAT(ptp_rx_msg_type_sync),
+ STMMAC_STAT(ptp_rx_msg_type_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_delay_req),
+ STMMAC_STAT(ptp_rx_msg_type_delay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_announce),
+ STMMAC_STAT(ptp_rx_msg_type_management),
+ STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
STMMAC_STAT(ptp_frame_type),
STMMAC_STAT(ptp_ver),
STMMAC_STAT(timestamp_dropped),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a77f68918010..10d6059b2f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
}
static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
- u32 ptp_clock)
+ u32 ptp_clock, int gmac4)
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
- /* Convert the ptp_clock to nano second
- * formula = (2/ptp_clock) * 1000000000
- * where, ptp_clock = 50MHz.
+ /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where ptp_clock is 50MHz if fine method is used to update system
*/
- data = (2000000000ULL / ptp_clock);
+ if (value & PTP_TCR_TSCFUPDT)
+ data = (1000000000ULL / 50000000);
+ else
+ data = (1000000000ULL / ptp_clock);
/* 0.465ns accuracy */
if (!(value & PTP_TCR_TSCTRLSSR))
data = (data * 1000) / 465;
+ data &= PTP_SSIR_SSINC_MASK;
+
+ if (gmac4)
+ data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+
writel(data, ioaddr + PTP_SSIR);
return data;
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
}
static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub)
+ int add_sub, int gmac4)
{
u32 value;
int limit;
+ if (add_sub) {
+ /* If the new sec value needs to be subtracted with
+ * the system time, then MAC_STSUR reg should be
+ * programmed with (2^32 – <new_sec_value>)
+ */
+ if (gmac4)
+ sec = (100000000ULL - sec);
+
+ value = readl(ioaddr + PTP_TCR);
+ if (value & PTP_TCR_TSCTRLSSR)
+ nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
+ else
+ nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
+ }
+
writel(sec, ioaddr + PTP_STSUR);
- writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
- ioaddr + PTP_STNSUR);
+ value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+ writel(value, ioaddr + PTP_STNSUR);
+
/* issue command to initialize the system time value */
value = readl(ioaddr + PTP_TCR);
value |= PTP_TCR_TSUPDT;
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
{
u64 ns;
+ /* Get the TSSS value */
ns = readl(ioaddr + PTP_STNSR);
- /* convert sec time value to nanosecond */
+ /* Get the TSS and convert sec time value to nanosecond */
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
return ns;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4c8c60af7985..1f9ec02fa7f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -340,18 +340,17 @@ out:
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read timestamp from the descriptor & pass it to stack.
* and also perform some sanity checks.
*/
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+ struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_tx_en)
return;
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
- if (priv->adv_ts)
- desc = (priv->dma_etx + entry);
- else
- desc = (priv->dma_tx + entry);
-
/* check tx tstamp status */
- if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
- return;
+ if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get the valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
- memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp.hwtstamp = ns_to_ktime(ns);
- /* pass tstamp to stack */
- skb_tstamp_tx(skb, &shhwtstamp);
+ netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read received packet's timestamp from the descriptor
* and pass it to stack. It also perform some sanity checks.
*/
-static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_rx_en)
return;
- if (priv->adv_ts)
- desc = (priv->dma_erx + entry);
- else
- desc = (priv->dma_rx + entry);
-
- /* exit if rx tstamp is not valid */
- if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
- return;
+ /* Check if timestamp is available */
+ if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4)
+ ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
+ else
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
- shhwtstamp = skb_hwtstamps(skb);
- memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+ }
}
/**
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
else {
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
tstamp_all | ptp_v2 | ptp_over_ethernet |
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
ts_master_en | snap_type_sel);
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
/* program Sub Second Increment reg */
sec_inc = priv->hw->ptp->config_sub_second_increment(
- priv->ioaddr, priv->clk_ptp_rate);
+ priv->ptpaddr, priv->clk_ptp_rate,
+ priv->plat->has_gmac4);
temp = div_u64(1000000000ULL, sec_inc);
/* calculate default added value:
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
*/
temp = (u64)(temp << 32);
priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
- priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->hw->ptp->config_addend(priv->ptpaddr,
priv->default_addend);
/* initialize system time */
ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */
- priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
+ priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
now.tv_nsec);
}
@@ -650,26 +647,35 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (IS_ERR(priv->clk_ptp_ref)) {
priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
priv->clk_ptp_ref = NULL;
+ netdev_dbg(priv->dev, "PTP uses main clock\n");
} else {
clk_prepare_enable(priv->clk_ptp_ref);
priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
+ netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
}
priv->adv_ts = 0;
- if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ /* Check if adv_ts can be enabled for dwmac 4.x core */
+ if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+ /* Dwmac 3.x core with extend_desc can support adv_ts */
+ else if (priv->extend_desc && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
- if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
- pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+ if (priv->dma_cap.time_stamp)
+ netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
- if (netif_msg_hw(priv) && priv->adv_ts)
- pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
+ if (priv->adv_ts)
+ netdev_info(priv->dev,
+ "IEEE 1588-2008 Advanced Timestamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -871,6 +877,13 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
+ /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
+ * subsequent PHY polling, make sure we force a link transition if
+ * we have a UP/DOWN/UP transition
+ */
+ if (phydev->is_pseudo_fixed_link)
+ phydev->irq = PHY_POLL;
+
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
@@ -1324,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
}
- stmmac_get_tx_hwtstamp(priv, entry, skb);
+ stmmac_get_tx_hwtstamp(priv, p, skb);
}
if (likely(priv->tx_skbuff_dma[entry].buf)) {
@@ -1470,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- else
+ } else {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+ }
dwmac_mmc_intr_all_mask(priv->mmcaddr);
@@ -1702,8 +1718,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
- if (ret && ret != -EOPNOTSUPP)
- pr_warn("%s: failed PTP initialisation\n", __func__);
+ if (ret)
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
@@ -2468,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
void *rx_head;
- pr_debug("%s: descriptor ring:\n", __func__);
+ pr_info(">>>>>> %s: descriptor ring:\n", __func__);
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
@@ -2479,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
while (count < limit) {
int status;
struct dma_desc *p;
+ struct dma_desc *np;
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_erx + entry);
@@ -2498,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
next_entry = priv->cur_rx;
if (priv->extend_desc)
- prefetch(priv->dma_erx + next_entry);
+ np = (struct dma_desc *)(priv->dma_erx + next_entry);
else
- prefetch(priv->dma_rx + next_entry);
+ np = priv->dma_rx + next_entry;
+
+ prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
@@ -2552,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
- pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
@@ -2609,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
DMA_FROM_DEVICE);
}
- stmmac_get_rx_hwtstamp(priv, entry, skb);
-
if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len);
}
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+
stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index ffeb8d9e2b2e..64e147f53a9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -30,4 +30,12 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
int stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
+static inline void *get_stmmac_bsp_priv(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return priv->plat->bsp_priv;
+}
+
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 6e3b82972ce8..3eb281d1db08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->config_addend(priv->ioaddr, addend);
+ priv->hw->ptp->config_addend(priv->ptpaddr, addend);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+ priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
+ priv->plat->has_gmac4);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_lock_irqsave(&priv->ptp_lock, flags);
- ns = priv->hw->ptp->get_systime(priv->ioaddr);
+ ns = priv->hw->ptp->get_systime(priv->ptpaddr);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+ priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
@@ -177,7 +178,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,13 +186,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
} else if (priv->ptp_clock)
- pr_debug("Added PTP HW clock successfully on %s\n",
- priv->dev->name);
-
- return 0;
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4535df37c227..c06938c47af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -22,51 +22,53 @@
Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
******************************************************************************/
-#ifndef __STMMAC_PTP_H__
-#define __STMMAC_PTP_H__
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
-/* IEEE 1588 PTP register offsets */
-#define PTP_TCR 0x0700 /* Timestamp Control Reg */
-#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
-#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
-#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
-#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
-#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
-#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
-#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
-#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
-#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
-#define PTP_TSR 0x0728 /* Timestamp Status */
+#define PTP_GMAC4_OFFSET 0xb00
+#define PTP_GMAC3_X_OFFSET 0x700
-#define PTP_STNSUR_ADDSUB_SHIFT 31
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
-/* PTP TCR defines */
-#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
-#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
-#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
-#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
-/* Timestamp Interrupt Trigger Enable */
-#define PTP_TCR_TSTRIG 0x00000010
-#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
-#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
-/* Timestamp Digital or Binary Rollover Control */
-#define PTP_TCR_TSCTRLSSR 0x00000200
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+/* PTP Timestamp control register defines */
+#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
/* Enable PTP packet Processing for Version 2 Format */
-#define PTP_TCR_TSVER2ENA 0x00000400
+#define PTP_TCR_TSVER2ENA BIT(10)
/* Enable Processing of PTP over Ethernet Frames */
-#define PTP_TCR_TSIPENA 0x00000800
+#define PTP_TCR_TSIPENA BIT(11)
/* Enable Processing of PTP Frames Sent over IPv6-UDP */
-#define PTP_TCR_TSIPV6ENA 0x00001000
+#define PTP_TCR_TSIPV6ENA BIT(12)
/* Enable Processing of PTP Frames Sent over IPv4-UDP */
-#define PTP_TCR_TSIPV4ENA 0x00002000
+#define PTP_TCR_TSIPV4ENA BIT(13)
/* Enable Timestamp Snapshot for Event Messages */
-#define PTP_TCR_TSEVNTENA 0x00004000
+#define PTP_TCR_TSEVNTENA BIT(14)
/* Enable Snapshot for Messages Relevant to Master */
-#define PTP_TCR_TSMSTRENA 0x00008000
+#define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
-#define PTP_TCR_TSENMACADDR 0x00040000
+#define PTP_TCR_TSENMACADDR BIT(18)
+
+/* SSIR defines */
+#define PTP_SSIR_SSINC_MASK 0xff
+#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
-#endif /* __STMMAC_PTP_H__ */
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index e15bf84fc6b2..0ac449acaf5b 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -11,7 +11,6 @@
#include <linux/highmem.h>
#include <linux/if_vlan.h>
#include <linux/init.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index aa4f9d2d8fa9..02f452730d52 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
+ __u32 bblk_dvma = (__u32)bp->bblock_dvma;
unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
bregs + BMAC_XIFCFG);
/* Tell the QEC where the ring descriptors are. */
- sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
cregs + CREG_RXDS);
- sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
cregs + CREG_TXDS);
/* Setup the FIFO pointers into QEC local memory. */
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 06dd21707353..532fc56830cf 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -291,7 +291,7 @@ struct bigmac {
void __iomem *bregs; /* BigMAC Registers */
void __iomem *tregs; /* BigMAC Transceiver */
struct bmac_init_block *bmac_block; /* RX and TX descriptors */
- __u32 bblock_dvma; /* RX and TX descriptors */
+ dma_addr_t bblock_dvma; /* RX and TX descriptors */
spinlock_t lock;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 9b825780b3be..9582948145c1 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
+ __u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
- sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
- sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+ __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 581781b6b2fa..ae190b77431b 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -334,12 +334,12 @@ struct sunqe {
void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */
- __u32 qblock_dvma; /* RX and TX descriptors */
+ dma_addr_t qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */
- __u32 buffers_dvma; /* DVMA visible address. */
+ dma_addr_t buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 0d0053128542..5eedac495077 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
if (netif_msg_probe(lp))
phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index c3e85acfdc70..ba1e45ff6aae 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -30,6 +30,8 @@
#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
+#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
#define GMII_SEL_MODE_MASK 0x3
@@ -48,6 +50,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
u32 reg;
u32 mask;
u32 mode = 0;
+ bool rgmii_id = false;
reg = readl(priv->gmii_sel);
@@ -57,10 +60,14 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
break;
case PHY_INTERFACE_MODE_RGMII:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
mode = AM33XX_GMII_SEL_MODE_RGMII;
+ rgmii_id = true;
break;
default:
@@ -83,6 +90,13 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
}
+ if (rgmii_id) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+ else
+ mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+ }
+
reg &= ~mask;
reg |= mode;
@@ -162,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
}
dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ of_node_put(node);
priv = dev_get_drvdata(dev);
priv->cpsw_phy_sel(priv, phy_mode, slave);
+
+ put_device(dev);
}
EXPORT_SYMBOL_GPL(cpsw_phy_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c6cff3d2ff05..58947aae31c7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
* to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node);
- if (ret)
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
return ret;
+ }
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
u32 phyid;
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
+ put_device(&mdio->dev);
} else {
dev_err(&pdev->dev,
"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@@ -2440,6 +2444,46 @@ no_phy_slave:
return 0;
}
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0;
+
+ for_each_available_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
+ if (of_phy_is_fixed_link(slave_node)) {
+ struct phy_device *phydev;
+
+ phydev = of_phy_find_device(slave_node);
+ if (phydev) {
+ fixed_phy_unregister(phydev);
+ /* Put references taken by
+ * of_phy_find_device() and
+ * of_phy_register_fixed_link().
+ */
+ phy_device_free(phydev);
+ phy_device_free(phydev);
+ }
+ }
+
+ of_node_put(slave_data->phy_node);
+
+ i++;
+ if (i == data->slaves)
+ break;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+}
+
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
@@ -2547,6 +2591,9 @@ static int cpsw_probe(struct platform_device *pdev)
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
cpsw->dev = &pdev->dev;
ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@@ -2584,11 +2631,19 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&cpsw->data, pdev)) {
- dev_err(&pdev->dev, "cpsw: platform data missing\n");
- ret = -ENODEV;
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
+
+ ret = cpsw_probe_dt(&cpsw->data, pdev);
+ if (ret)
+ goto clean_dt_ret;
+
data = &cpsw->data;
cpsw->rx_ch_num = 1;
cpsw->tx_ch_num = 1;
@@ -2608,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!cpsw->slaves) {
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < data->slaves; i++)
cpsw->slaves[i].slave_num = i;
@@ -2620,7 +2675,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
@@ -2628,26 +2683,17 @@ static int cpsw_probe(struct platform_device *pdev)
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->regs = ss_regs;
- /* Need to enable clocks with runtime PM api to access module
- * registers
- */
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
- goto clean_runtime_disable_ret;
- }
cpsw->version = readl(&cpsw->regs->id_ver);
- pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(cpsw->wr_regs)) {
ret = PTR_ERR(cpsw->wr_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
memset(&dma_params, 0, sizeof(dma_params));
@@ -2684,7 +2730,7 @@ static int cpsw_probe(struct platform_device *pdev)
default:
dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i];
@@ -2713,7 +2759,7 @@ static int cpsw_probe(struct platform_device *pdev)
if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@@ -2811,16 +2857,23 @@ static int cpsw_probe(struct platform_device *pdev)
ret = cpsw_probe_dual_emac(priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_ale_ret;
+ goto clean_unregister_netdev_ret;
}
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
+clean_unregister_netdev_ret:
+ unregister_netdev(ndev);
clean_ale_ret:
cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(pdev);
+ pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
clean_ndev_ret:
@@ -2846,7 +2899,7 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
- of_platform_depopulate(&pdev->dev);
+ cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (cpsw->data.dual_emac)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2fd94a5bc1f3..84fbe5714f8b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = NULL;
+ struct device *phy = NULL;
ret = pm_runtime_get_sync(&priv->pdev->dev);
if (ret < 0) {
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- struct device *phy;
-
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
match_first_device);
- if (phy)
+ if (phy) {
priv->phy_id = dev_name(phy);
+ if (!priv->phy_id || !*priv->phy_id)
+ put_device(phy);
+ }
}
if (!phydev && priv->phy_id && *priv->phy_id) {
phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
-
+ put_device(phy); /* reference taken by bus_find_device */
if (IS_ERR(phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index ece0ea0f6b38..6c7ec1ddd475 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -610,8 +610,8 @@ err_out_regions:
#ifdef CONFIG_PCI
if (pdev)
pci_release_regions(pdev);
-#endif
err_out:
+#endif
if (pdev)
pci_disable_device(pdev);
return rc;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index bc258d7e41df..272f2b1cb7ad 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1769,7 +1769,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
gelic_ether_setup_netdev_ops(netdev, &card->napi);
result = gelic_net_setup_netdev(netdev, card);
if (result) {
- dev_dbg(&dev->core, "%s: setup_netdev failed %d",
+ dev_dbg(&dev->core, "%s: setup_netdev failed %d\n",
__func__, result);
goto fail_setup_netdev;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 446ea580ad42..928c1dca2673 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
pr_debug("%s: bssid matched\n", __func__);
break;
} else {
- pr_debug("%s: bssid unmached\n", __func__);
+ pr_debug("%s: bssid unmatched\n", __func__);
continue;
}
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 37ab46cdbec4..d2349a1bc6ba 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 0b37ce9f28f1..ca31a57dbc86 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/kconfig.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 69e2a833a84f..c688d68c39aa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -431,8 +431,7 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp,
- struct device *dev, off_t offset)
+static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
@@ -468,8 +467,8 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -818,7 +817,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path");
+ dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -867,7 +866,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path");
+ dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -1338,8 +1337,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
- __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
axienet_mdio_wait_until_ready(lp);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7f127dc1b7ba..fa32391720fe 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
if (!qmgr_stat_below_low_watermark(rxq) &&
napi_reschedule(napi)) { /* not empty again */
#if DEBUG_RX
- printk(KERN_DEBUG "%s: eth_poll"
- " napi_reschedule successed\n",
+ printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3c20e87bb761..42edd7b7902f 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
- struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
static void geneve_sock_release(struct geneve_dev *geneve)
{
- __geneve_sock_release(geneve->sock4);
+ struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
#if IS_ENABLED(CONFIG_IPV6)
- __geneve_sock_release(geneve->sock6);
+ struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
+
+ rcu_assign_pointer(geneve->sock6, NULL);
+#endif
+
+ rcu_assign_pointer(geneve->sock4, NULL);
+ synchronize_net();
+
+ __geneve_sock_release(gs4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(gs6);
#endif
}
@@ -586,10 +596,10 @@ out:
gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- geneve->sock6 = gs;
+ rcu_assign_pointer(geneve->sock6, gs);
else
#endif
- geneve->sock4 = gs;
+ rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
bool metadata = geneve->collect_md;
int ret = 0;
- geneve->sock4 = NULL;
#if IS_ENABLED(CONFIG_IPV6)
- geneve->sock6 = NULL;
if (ipv6 || metadata)
ret = geneve_sock_add(geneve, true);
#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct rtable *rt = NULL;
__u8 tos;
+ if (!rcu_dereference(geneve->sock4))
+ return ERR_PTR(-EIO);
+
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
+ struct geneve_sock *gs6;
__u8 prio;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ return ERR_PTR(-EIO);
+
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4 = geneve->sock4;
+ struct geneve_sock *gs4;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
int err = -EINVAL;
@@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4);
+ if (!gs4)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
const struct iphdr *iip; /* interior IP header */
+ struct geneve_sock *gs6;
int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
@@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 52eeb2f67276..f6382150b16a 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -442,14 +442,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
net_trans_info = get_net_transport_info(skb, &hdr_offset);
- if (net_trans_info == TRANSPORT_INFO_NOT_IP)
- goto do_send;
/*
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb)) {
+ if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -478,56 +476,29 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
- goto do_send;
- }
-
- if ((skb->ip_summed == CHECKSUM_NONE) ||
- (skb->ip_summed == CHECKSUM_UNNECESSARY))
- goto do_send;
-
- rndis_msg_size += NDIS_CSUM_PPI_SIZE;
- ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
- TCPIP_CHKSUM_PKTINFO);
-
- csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
- ppi->ppi_offset);
-
- if (net_trans_info & (INFO_IPV4 << 16))
- csum_info->transmit.is_ipv4 = 1;
- else
- csum_info->transmit.is_ipv6 = 1;
-
- if (net_trans_info & INFO_TCP) {
- csum_info->transmit.tcp_checksum = 1;
- csum_info->transmit.tcp_header_offset = hdr_offset;
- } else if (net_trans_info & INFO_UDP) {
- /* UDP checksum offload is not supported on ws2008r2.
- * Furthermore, on ws2012 and ws2012r2, there are some
- * issues with udp checksum offload from Linux guests.
- * (these are host issues).
- * For now compute the checksum here.
- */
- struct udphdr *uh;
- u16 udp_len;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- goto no_memory;
-
- uh = udp_hdr(skb);
- udp_len = ntohs(uh->len);
- uh->check = 0;
- uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- udp_len, IPPROTO_UDP,
- csum_partial(uh, udp_len, 0));
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
-
- csum_info->transmit.udp_checksum = 0;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (net_trans_info & INFO_TCP) {
+ rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+ ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+ TCPIP_CHKSUM_PKTINFO);
+
+ csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
+ ppi->ppi_offset);
+
+ if (net_trans_info & (INFO_IPV4 << 16))
+ csum_info->transmit.is_ipv4 = 1;
+ else
+ csum_info->transmit.is_ipv6 = 1;
+
+ csum_info->transmit.tcp_checksum = 1;
+ csum_info->transmit.tcp_header_offset = hdr_offset;
+ } else {
+ /* UDP checksum (and other) offload is not supported. */
+ if (skb_checksum_help(skb))
+ goto drop;
+ }
}
-do_send:
/* Start filling in the page buffers with the rndis hdr */
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
@@ -636,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
@@ -725,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3ea47f28e143..d2e61e002926 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3234fcdea317..d2d6f12a112f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1278,6 +1278,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct net_device *lowerdev;
int err;
int macmode;
+ bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -1304,12 +1305,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
+ create = true;
}
port = macvlan_port_get_rtnl(lowerdev);
/* Only 1 macvlan device can be created in passthru mode */
- if (port->passthru)
- return -EINVAL;
+ if (port->passthru) {
+ /* The macvlan port must be not created this time,
+ * still goto destroy_macvlan_port for readability.
+ */
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
vlan->lowerdev = lowerdev;
vlan->dev = dev;
@@ -1325,24 +1332,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
- if (port->count)
- return -EINVAL;
+ if (port->count) {
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
port->passthru = true;
eth_hw_addr_inherit(dev, lowerdev);
}
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
- if (vlan->mode != MACVLAN_MODE_SOURCE)
- return -EINVAL;
+ if (vlan->mode != MACVLAN_MODE_SOURCE) {
+ err = -EINVAL;
+ goto destroy_macvlan_port;
+ }
macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
err = macvlan_changelink_sources(vlan, macmode, data);
if (err)
- return err;
+ goto destroy_macvlan_port;
}
err = register_netdevice(dev);
if (err < 0)
- return err;
+ goto destroy_macvlan_port;
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
@@ -1357,7 +1368,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
-
+destroy_macvlan_port:
+ if (create)
+ macvlan_port_destroy(port->dev);
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 5078a0d0db64..2651c8d8de2f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -142,6 +142,7 @@ config MDIO_THUNDER
config MDIO_XGENE
tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
help
This module provides a driver for the MDIO busses found in the
APM X-Gene SoC's.
@@ -320,13 +321,6 @@ config XILINX_GMII2RGMII
the Reduced Gigabit Media Independent Interface(RGMII) between
Ethernet physical media devices and the Gigabit Ethernet controller.
-config MDIO_XGENE
- tristate "APM X-Gene SoC MDIO bus controller"
- depends on ARCH_XGENE || COMPILE_TEST
- help
- This module provides a driver for the MDIO busses found in the
- APM X-Gene SoC's.
-
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f279a897a5c7..a52b560e428b 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -209,7 +214,6 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
@@ -225,16 +229,6 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -243,7 +237,6 @@ done:
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
@@ -251,17 +244,6 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -381,6 +363,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -432,6 +444,7 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
#define TI_DP83848C_PHY_ID 0x20005ca0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
+#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
+ { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c649c101bbab..eb5167210681 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
-
+ of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a17573e3bd8a..77a6671d572e 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+#include <linux/netdevice.h>
enum rgmii_rx_clock_delay {
RGMII_RX_CLK_DELAY_0_2_NS = 0,
@@ -37,6 +38,7 @@ enum rgmii_rx_clock_delay {
#define MII_VSC85XX_INT_MASK 25
#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_MASK_WOL 0x0040
#define MII_VSC85XX_INT_STATUS 26
#define MSCC_PHY_WOL_MAC_CONTROL 27
@@ -52,6 +54,17 @@ enum rgmii_rx_clock_delay {
#define RGMII_RX_CLK_DELAY_MASK 0x0070
#define RGMII_RX_CLK_DELAY_POS 4
+#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
+#define MSCC_PHY_WOL_MID_MAC_ADDR 22
+#define MSCC_PHY_WOL_UPPER_MAC_ADDR 23
+#define MSCC_PHY_WOL_LOWER_PASSWD 24
+#define MSCC_PHY_WOL_MID_PASSWD 25
+#define MSCC_PHY_WOL_UPPER_PASSWD 26
+
+#define MSCC_PHY_WOL_MAC_CONTROL 27
+#define SECURE_ON_ENABLE 0x8000
+#define SECURE_ON_PASSWD_LEN_4 0x4000
+
/* Microsemi PHY ID's */
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8541 0x00070770
@@ -81,6 +94,117 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
return rc;
}
+static int vsc85xx_wol_set(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int rc;
+ u16 reg_val;
+ u8 i;
+ u16 pwd[3] = {0, 0, 0};
+ struct ethtool_wolinfo *wol_conf = wol;
+ u8 *mac_addr = phydev->attached_dev->dev_addr;
+
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Store the device address for the magic packet */
+ for (i = 0; i < ARRAY_SIZE(pwd); i++)
+ pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 |
+ mac_addr[5 - i * 2];
+ phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]);
+ phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]);
+ phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]);
+ } else {
+ phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0);
+ phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0);
+ phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0);
+ }
+
+ if (wol_conf->wolopts & WAKE_MAGICSECURE) {
+ for (i = 0; i < ARRAY_SIZE(pwd); i++)
+ pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 |
+ wol_conf->sopass[5 - i * 2];
+ phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]);
+ phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]);
+ phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]);
+ } else {
+ phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0);
+ phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0);
+ phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0);
+ }
+
+ reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ if (wol_conf->wolopts & WAKE_MAGICSECURE)
+ reg_val |= SECURE_ON_ENABLE;
+ else
+ reg_val &= ~SECURE_ON_ENABLE;
+ phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val);
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+ if (rc != 0)
+ goto out_unlock;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Enable the WOL interrupt */
+ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
+ reg_val |= MII_VSC85XX_INT_MASK_WOL;
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+ } else {
+ /* Disable the WOL interrupt */
+ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK);
+ reg_val &= (~MII_VSC85XX_INT_MASK_WOL);
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+ }
+ /* Clear WOL iterrupt status */
+ reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
+}
+
+static void vsc85xx_wol_get(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int rc;
+ u16 reg_val;
+ u8 i;
+ u16 pwd[3] = {0, 0, 0};
+ struct ethtool_wolinfo *wol_conf = wol;
+
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
+
+ reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL);
+ if (reg_val & SECURE_ON_ENABLE)
+ wol_conf->wolopts |= WAKE_MAGICSECURE;
+ if (wol_conf->wolopts & WAKE_MAGICSECURE) {
+ pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD);
+ pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD);
+ pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD);
+ for (i = 0; i < ARRAY_SIZE(pwd); i++) {
+ wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff;
+ wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00)
+ >> 8;
+ }
+ }
+
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+}
+
static u8 edge_rate_magic_get(u16 vddmac,
int slowdown)
{
@@ -301,6 +425,8 @@ static struct phy_driver vsc85xx_driver[] = {
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
},
{
.phy_id = PHY_ID_VSC8541,
@@ -318,6 +444,8 @@ static struct phy_driver vsc85xx_driver[] = {
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
}
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c6f66832a1a6..f424b867f73e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -608,6 +608,21 @@ void phy_start_machine(struct phy_device *phydev)
}
/**
+ * phy_trigger_machine - trigger the state machine to run
+ *
+ * @phydev: the phy_device struct
+ *
+ * Description: There has been a change in state which requires that the
+ * state machine runs.
+ */
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+ cancel_delayed_work_sync(&phydev->state_queue);
+ queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+}
+
+/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
*
@@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
+
+ phy_trigger_machine(phydev);
}
/**
@@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
}
/* reschedule state queue work to run as soon as possible */
- cancel_delayed_work_sync(&phydev->state_queue);
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
+ phy_trigger_machine(phydev);
return;
ignore:
@@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
/* if phy was suspended, bring the physical link up again */
if (do_resume)
phy_resume(phydev);
+
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e977ba931878..1a4bf8acad78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
phydev = to_phy_device(d);
rc = phy_connect_direct(dev, phydev, handler, interface);
+ put_device(d);
if (rc)
return ERR_PTR(rc);
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phydev = to_phy_device(d);
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
+ put_device(d);
if (rc)
return ERR_PTR(rc);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2e37eb337d48..24b4a09468dd 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -62,6 +62,10 @@
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC8601 Extended PHY Control Register 1 */
+#define MII_VSC8601_EPHY_CTL 0x17
+#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
+
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
return err;
}
+/* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+static int vsc8601_add_skew(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
+ if (ret < 0)
+ return ret;
+
+ ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
+ return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
+}
+
+static int vsc8601_config_init(struct phy_device *phydev)
+{
+ int ret = 0;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ ret = vsc8601_add_skew(phydev);
+
+ if (ret < 0)
+ return ret;
+
+ return genphy_config_init(phydev);
+}
+
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_init = &genphy_config_init,
+ .config_init = &vsc8601_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index e6338c16081a..8a6675d92b98 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info cypress_GX3_info = {
+ .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info dlink_dub1312_info = {
.description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
.bind = ax88179_bind,
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
+ USB_DEVICE(0x04b4, 0x3610),
+ .driver_info = (unsigned long)&cypress_GX3_info,
+}, {
/* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
USB_DEVICE(0x2001, 0x4a00),
.driver_info = (unsigned long)&dlink_dub1312_info,
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d1fce8a6e84..3ff76c6db4f6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -59,6 +59,10 @@ enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
};
+enum qmi_wwan_quirks {
+ QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
+};
+
static void qmi_wwan_netdev_setup(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -411,9 +415,14 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
* clearing out state the clients might need.
*
* MDM9x30 is the first QMI chipset with USB3 support. Abuse
- * this fact to enable the quirk.
+ * this fact to enable the quirk for all USB3 devices.
+ *
+ * There are also chipsets with the same "set DTR" requirement
+ * but without USB3 support. Devices based on these chips
+ * need a quirk flag in the device ID table.
*/
- if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
+ if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR ||
+ le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
qmi_wwan_manage_power(dev, 1);
qmi_wwan_change_dtr(dev, true);
}
@@ -526,6 +535,16 @@ static const struct driver_info qmi_wwan_info = {
.rx_fixup = qmi_wwan_rx_fixup,
};
+static const struct driver_info qmi_wwan_info_quirk_dtr = {
+ .description = "WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .rx_fixup = qmi_wwan_rx_fixup,
+ .data = QMI_WWAN_QUIRK_DTR,
+};
+
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -533,6 +552,11 @@ static const struct driver_info qmi_wwan_info = {
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_info
+/* devices requiring "set DTR" quirk */
+#define QMI_QUIRK_SET_DTR(vend, prod, num) \
+ USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
+
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 3)
@@ -895,6 +919,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 44d439f50961..efb84f092492 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01)
+ if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
checksum = CHECKSUM_NONE;
else
checksum = CHECKSUM_UNNECESSARY;
- } else if (RD_IPV6_CS) {
+ } else if (opts2 & RD_IPV6_CS) {
if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
checksum = CHECKSUM_UNNECESSARY;
else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
goto out;
res = usb_autopm_get_interface(tp->intf);
- if (res < 0) {
- free_all_mem(tp);
- goto out;
- }
+ if (res < 0)
+ goto out_free;
mutex_lock(&tp->control);
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
- free_all_mem(tp);
- } else {
- napi_enable(&tp->napi);
+ goto out_unlock;
}
+ napi_enable(&tp->napi);
mutex_unlock(&tp->control);
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
tp->pm_notifier.notifier_call = rtl_notifier;
register_pm_notifier(&tp->pm_notifier);
#endif
+ return 0;
+out_unlock:
+ mutex_unlock(&tp->control);
+ usb_autopm_put_interface(tp->intf);
+out_free:
+ free_all_mem(tp);
out:
return res;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fad84f3f4109..7276d5a95bd0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
netif_napi_del(&vi->rq[i].napi);
}
+ /* We called napi_hash_del() before netif_napi_del(),
+ * we need to respect an RCU grace period before freeing vi->rq
+ */
+ synchronize_net();
+
kfree(vi->rq);
kfree(vi->sq);
}
@@ -2038,23 +2043,33 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
+#define VIRTNET_FEATURES \
+ VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
+ VIRTIO_NET_F_MAC, \
+ VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
+ VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
+ VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
+ VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
+ VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
+ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
+ VIRTIO_NET_F_CTRL_MAC_ADDR, \
+ VIRTIO_NET_F_MTU
+
static unsigned int features[] = {
- VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
- VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
- VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
- VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
- VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
- VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
- VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
- VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
- VIRTIO_NET_F_CTRL_MAC_ADDR,
+ VIRTNET_FEATURES,
+};
+
+static unsigned int features_legacy[] = {
+ VIRTNET_FEATURES,
+ VIRTIO_NET_F_GSO,
VIRTIO_F_ANY_LAYOUT,
- VIRTIO_NET_F_MTU,
};
static struct virtio_driver virtio_net_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
+ .feature_table_legacy = features_legacy,
+ .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b5554f2ebee4..ef83ae3b0a44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
sz,
PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
}
-
- if (!dma_mapping_error(&adapter->pdev->dev,
- new_table_pa)) {
- new_mode |= VMXNET3_RXM_MCAST;
- rxConf->mfTablePA = cpu_to_le64(new_table_pa);
- } else {
+ if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa)
+ if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb->pkt_type = PACKET_HOST;
goto out;
}
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
{
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e7d16687538b..24532cdebb00 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
@@ -943,17 +943,22 @@ static bool vxlan_snoop(struct net_device *dev,
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct vxlan_sock *sock6;
+#endif
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (family == AF_INET && dev->vn4_sock &&
- atomic_read(&dev->vn4_sock->refcnt) == 1)
+ if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && dev->vn6_sock &&
- atomic_read(&dev->vn6_sock->refcnt) == 1)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
return false;
#endif
@@ -961,10 +966,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
@@ -1005,22 +1012,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ rcu_assign_pointer(vxlan->vn6_sock, NULL);
#endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
- if (ipv4) {
- udp_tunnel_sock_release(vxlan->vn4_sock->sock);
- kfree(vxlan->vn4_sock);
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6) {
- udp_tunnel_sock_release(vxlan->vn6_sock->sock);
- kfree(vxlan->vn6_sock);
+ if (__vxlan_sock_release_prep(sock6)) {
+ udp_tunnel_sock_release(sock6->sock);
+ kfree(sock6);
}
#endif
}
@@ -1036,18 +1046,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1067,18 +1080,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1828,11 +1844,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (!sock6)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1850,7 +1870,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- vxlan->vn6_sock->sock->sk,
+ sock6->sock->sk,
&ndst, &fl6);
if (err < 0)
return ERR_PTR(err);
@@ -1995,9 +2015,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- if (!vxlan->vn4_sock)
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+
+ if (!sock4)
goto drop;
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2072,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags;
- if (!vxlan->vn6_sock)
+ if (!sock6)
goto drop;
- sk = vxlan->vn6_sock->sock->sk;
+ sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2415,9 +2438,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!vxlan->vn4_sock)
+ if (!sock4)
return -EINVAL;
rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
@@ -2429,8 +2453,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
- if (!vxlan->vn6_sock)
- return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
@@ -2740,10 +2762,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- vxlan->vn6_sock = vs;
+ rcu_assign_pointer(vxlan->vn6_sock, vs);
else
#endif
- vxlan->vn4_sock = vs;
+ rcu_assign_pointer(vxlan->vn4_sock, vs);
vxlan_vs_add_dev(vs, vxlan);
return 0;
}
@@ -2754,9 +2776,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
int ret = 0;
- vxlan->vn4_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
- vxlan->vn6_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6 || metadata)
ret = __vxlan_sock_add(vxlan, true);
#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 5fbf83d5aa57..65647533b401 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -295,11 +295,11 @@ free_ucc_pram:
qe_muram_free(priv->ucc_pram_offset);
free_tx_bd:
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd),
+ TX_BD_RING_LEN * sizeof(struct qe_bd *),
priv->tx_bd_base, priv->dma_tx_bd);
free_rx_bd:
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd),
+ RX_BD_RING_LEN * sizeof(struct qe_bd *),
priv->rx_bd_base, priv->dma_rx_bd);
free_uccf:
ucc_fast_free(priv->uccf);
@@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
- RX_BD_RING_LEN * sizeof(struct qe_bd),
+ RX_BD_RING_LEN * sizeof(struct qe_bd *),
priv->rx_bd_base, priv->dma_rx_bd);
priv->rx_bd_base = NULL;
@@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv)
if (priv->tx_bd_base) {
dma_free_coherent(priv->dev,
- TX_BD_RING_LEN * sizeof(struct qe_bd),
+ TX_BD_RING_LEN * sizeof(struct qe_bd *),
priv->tx_bd_base, priv->dma_tx_bd);
priv->tx_bd_base = NULL;
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
return ret;
}
+static const struct spi_device_id slic_ds26522_id[] = {
+ { .name = "ds26522" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
+
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
+MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
+ .id_table = slic_ds26522_id,
};
static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b777e1b2f87a..78d9966a3957 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
/* store current 11d setting */
if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
&ifp->vif->is_11d)) {
- supports_11d = false;
+ is_11d = supports_11d = false;
} else {
country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
settings->beacon.tail_len,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index f6591c83d636..affe760c8c22 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -2422,14 +2422,12 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
*/
if (priv->mac80211_registered) {
char buf[100];
- struct dentry *mac80211_dir, *dev_dir, *root_dir;
+ struct dentry *mac80211_dir, *dev_dir;
dev_dir = dbgfs_dir->d_parent;
- root_dir = dev_dir->d_parent;
mac80211_dir = priv->hw->wiphy->debugfsdir;
- snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name,
- dev_dir->d_name.name);
+ snprintf(buf, 100, "../../%pd2", dev_dir);
if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
goto err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4fdc3dad3e85..b88e2048ae0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
ret = iwl_mvm_switch_to_d3(mvm);
if (ret)
return ret;
+ } else {
+ /* In theory, we wouldn't have to stop a running sched
+ * scan in order to start another one (for
+ * net-detect). But in practice this doesn't seem to
+ * work properly, so stop any running sched_scan now.
+ */
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ if (ret)
+ return ret;
}
/* rfkill release can be either for wowlan or netdetect */
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
out:
if (ret < 0) {
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
- ieee80211_restart_hw(mvm->hw);
+ if (mvm->restart_fw > 0) {
+ mvm->restart_fw--;
+ ieee80211_restart_hw(mvm->hw);
+ }
iwl_mvm_free_nd(mvm);
}
out_noreset:
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_update_changed_regdom(mvm);
if (mvm->net_detect) {
+ /* If this is a non-unified image, we restart the FW,
+ * so no need to stop the netdetect scan. If that
+ * fails, continue and try to get the wake-up reasons,
+ * but trigger a HW restart by keeping a failure code
+ * in ret.
+ */
+ if (unified_image)
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
+
iwl_mvm_query_netdetect_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
- int remaining_time = 10;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
mvm->d3_test_active = false;
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait);
- ieee80211_restart_hw(mvm->hw);
+ if (!unified_image) {
+ int remaining_time = 10;
- /* wait for restart and disconnect all interfaces */
- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- remaining_time > 0) {
- remaining_time--;
- msleep(1000);
- }
+ ieee80211_restart_hw(mvm->hw);
+
+ /* wait for restart and disconnect all interfaces */
+ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ remaining_time > 0) {
+ remaining_time--;
+ msleep(1000);
+ }
- if (remaining_time == 0)
- IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
+ if (remaining_time == 0)
+ IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+ }
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 2d6f44fbaf62..f4d75ffe3d8a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1571,8 +1571,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
if (!mvmvif->dbgfs_dir) {
- IWL_ERR(mvm, "Failed to create debugfs directory under %s\n",
- dbgfs_dir->d_name.name);
+ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+ dbgfs_dir);
return;
}
@@ -1627,17 +1627,15 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%s/%s/%s/%s",
- dbgfs_dir->d_parent->d_parent->d_name.name,
- dbgfs_dir->d_parent->d_name.name,
- dbgfs_dir->d_name.name,
- mvmvif->dbgfs_dir->d_name.name);
+ snprintf(buf, 100, "../../../%pd3/%pd",
+ dbgfs_dir,
+ mvmvif->dbgfs_dir);
mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
mvm->debugfs_dir, buf);
if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %s\n",
- dbgfs_dir->d_name.name);
+ IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
+ dbgfs_dir);
return;
err:
IWL_ERR(mvm, "Can't create debugfs entity\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 539d718df797..7b7d2a146e30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
.data = { &cmd, },
.len = { sizeof(cmd) },
};
- size_t delta, len;
- ssize_t ret;
+ size_t delta;
+ ssize_t ret, len;
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0);
@@ -1748,9 +1748,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%s/%s",
- dbgfs_dir->d_parent->d_parent->d_name.name,
- dbgfs_dir->d_parent->d_name.name);
+ snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
goto err;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 318efd814037..1db1dc13e988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
}
if (notif->sync)
- ret = wait_event_timeout(notif_waitq,
+ ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0,
HZ);
WARN_ON_ONCE(!ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d17cbf603f7c..c60703e0c246 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -937,6 +937,7 @@ struct iwl_mvm {
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
+ wait_queue_head_t rx_sync_waitq;
/* BT-Coex */
struct iwl_bt_coex_profile_notif last_bt_notif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 05fe6dd1a2c8..4d35deb628bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
+ init_waitqueue_head(&mvm->rx_sync_waitq);
atomic_set(&mvm->queue_sync_counter, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a57c6ef5bc14..6c802cee900c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
"Received expired RX queue sync message\n");
return;
}
- atomic_dec(&mvm->queue_sync_counter);
+ if (!atomic_dec_return(&mvm->queue_sync_counter))
+ wake_up(&mvm->rx_sync_waitq);
}
switch (internal_notif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f279fdd6eb44..fa9743205491 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
{
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
/* This looks a bit arbitrary, but the idea is that if we run
* out of possible simultaneous scans and the userspace is
* trying to run a scan type that is already running, we
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EBUSY;
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
case IWL_MVM_SCAN_NETDETECT:
- /* No need to stop anything for net-detect since the
- * firmware is restarted anyway. This way, any sched
- * scans that were running will be restarted when we
- * resume.
- */
- return 0;
+ /* For non-unified images, there's no need to stop
+ * anything for net-detect since the firmware is
+ * restarted anyway. This way, any sched scans that
+ * were running will be restarted when we resume.
+ */
+ if (!unified_image)
+ return 0;
+
+ /* If this is a unified image and we ran out of scans,
+ * we need to stop something. Prefer stopping regular
+ * scans, because the results are useless at this
+ * point, and we should be able to keep running
+ * another scheduled scan while suspended.
+ */
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+ true);
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+ true);
+
+ /* fall through, something is wrong if no scan was
+ * running but we ran out of scans.
+ */
default:
WARN_ON(1);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 001be406a3d3..2f8134b2a504 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI
-#define SPL_METHOD "SPLC"
-#define SPL_DOMAINTYPE_MODULE BIT(0)
-#define SPL_DOMAINTYPE_WIFI BIT(1)
-#define SPL_DOMAINTYPE_WIGIG BIT(2)
-#define SPL_DOMAINTYPE_RFEM BIT(3)
+#define ACPI_SPLC_METHOD "SPLC"
+#define ACPI_SPLC_DOMAIN_WIFI (0x07)
-static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
{
- union acpi_object *limits, *domain_type, *power_limit;
-
- if (splx->type != ACPI_TYPE_PACKAGE ||
- splx->package.count != 2 ||
- splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
- splx->package.elements[0].integer.value != 0) {
- IWL_ERR(trans, "Unsupported splx structure\n");
+ union acpi_object *data_pkg, *dflt_pwr_limit;
+ int i;
+
+ /* We need at least two elements, one for the revision and one
+ * for the data itself. Also check that the revision is
+ * supported (currently only revision 0).
+ */
+ if (splc->type != ACPI_TYPE_PACKAGE ||
+ splc->package.count < 2 ||
+ splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splc->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_INFO(trans,
+ "Unsupported structure returned by the SPLC method. Ignoring.\n");
return 0;
}
- limits = &splx->package.elements[1];
- if (limits->type != ACPI_TYPE_PACKAGE ||
- limits->package.count < 2 ||
- limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
- limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
- IWL_ERR(trans, "Invalid limits element\n");
- return 0;
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < splc->package.count; i++) {
+ union acpi_object *domain;
+
+ data_pkg = &splc->package.elements[i];
+
+ /* Skip anything that is not a package with the right
+ * amount of elements (i.e. at least 2 integers).
+ */
+ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
+ data_pkg->package.count < 2 ||
+ data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain = &data_pkg->package.elements[0];
+ if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
+ break;
+
+ data_pkg = NULL;
}
- domain_type = &limits->package.elements[0];
- power_limit = &limits->package.elements[1];
- if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
- IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
+ if (!data_pkg) {
+ IWL_DEBUG_INFO(trans,
+ "No element for the WiFi domain returned by the SPLC method.\n");
return 0;
}
- return power_limit->integer.value;
+ dflt_pwr_limit = &data_pkg->package.elements[1];
+ return dflt_pwr_limit->integer.value;
}
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{
acpi_handle pxsx_handle;
acpi_handle handle;
- struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
}
/* Get the method's handle */
- status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
+ &handle);
if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPL method not found\n");
+ IWL_DEBUG_INFO(trans, "SPLC method not found\n");
return;
}
/* Call SPLC with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ status = acpi_evaluate_object(handle, NULL, NULL, &splc);
if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return;
}
- trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit);
- kfree(splx.pointer);
+ kfree(splc.pointer);
}
#else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e9a278b60dfd..5f840f16f40b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -592,6 +592,7 @@ error:
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, u32 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
txq->need_update = false;
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return ret;
spin_lock_init(&txq->lock);
+
+ if (txq_id == trans_pcie->cmd_queue) {
+ static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
+ }
+
__skb_queue_head_init(&txq->overflow_q);
/*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 431f13b4faf6..d3bad5779376 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
data->bcn_delta = do_div(delta, bcn_int);
} else {
data->tsf_offset -= delta;
- data->bcn_delta = -do_div(delta, bcn_int);
+ data->bcn_delta = -(s64)do_div(delta, bcn_int);
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, false);
+ priv->wdev.iftype, 0, NULL, NULL);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
int count, ret = 0;
/* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
/* Switch DPDT_SEL_P output from register 0x65[2] */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 |= BIT(5);
rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
if (!err)
goto found_alt;
}
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
+ char *fw_name;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
+ fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
- .fw_name = "rtlwifi/rtl8188efw.bin",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8192cfwU.bin";
rtl8192ce_bt_reg_init(hw);
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
rtlpriv->max_fw_size = 0x4000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
- .fw_name = "rtlwifi/rtl8192cfw.bin",
.ops = &rtl8192ce_hal_ops,
.mod_params = &rtl92ce_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
+ char *fw_name;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
!IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
} else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
/* provide name of alternative file */
rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return err;
}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
- .fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ char *fw_name = "rtlwifi/rtl8192defw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
- .fw_name = "rtlwifi/rtl8192defw.bin",
.ops = &rtl8192de_hal_ops,
.mod_params = &rtl92de_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
+ char *fw_name;
rtl92ee_bt_reg_init(hw);
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin";
+ fw_name = "rtlwifi/rtl8192eefw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
- .fw_name = "rtlwifi/rtl8192eefw.bin",
.ops = &rtl8192ee_hal_ops,
.mod_params = &rtl92ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_firmware *pfirmware = NULL;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
}
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
u16 earlyrxthreshold = 7;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
sizeof(struct fw_hdr);
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
- "Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ "Loading firmware %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
- .fw_name = "rtlwifi/rtl8192sefw.bin",
.ops = &rtl8192se_hal_ops,
.mod_params = &rtl92se_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int err = 0;
+ char *fw_name = "rtlwifi/rtl8723fw.bin";
rtl8723e_bt_reg_init(hw);
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_VENDOR_8723_A_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
- .fw_name = "rtlwifi/rtl8723efw.bin",
.ops = &rtl8723e_hal_ops,
.mod_params = &rtl8723e_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8723befw.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
}
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
- .fw_name = "rtlwifi/rtl8723befw.bin",
.ops = &rtl8723be_hal_ops,
.mod_params = &rtl8723be_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name, *wowlan_fw_name;
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8812aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
rtlpriv->max_fw_size = 0x8000;
/*load normal firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
/*load wowlan firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ pr_info("Using firmware %s\n", wowlan_fw_name);
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->wowlan_fw_name,
+ wowlan_fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
- .fw_name = "rtlwifi/rtl8821aefw.bin",
.ops = &rtl8821ae_hal_ops,
.mod_params = &rtl8821ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
u8 bar_id;
bool write_readback;
char *name;
- char *fw_name;
char *alt_fw_name;
- char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
- kfree(glue);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
index 11e02be9db1a..d49798a46b51 100644
--- a/drivers/net/xen-netback/Makefile
+++ b/drivers/net/xen-netback/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
-xen-netback-y := netback.o xenbus.o interface.o hash.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o rx.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index b38fb2cf3364..3ce1f7da8647 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -91,13 +91,6 @@ struct xenvif_rx_meta {
*/
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
-/* It's possible for an skb to have a maximal number of frags
- * but still be less than MAX_BUFFER_OFFSET in size. Thus the
- * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
- * ring slot.
- */
-#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
-
#define NETBACK_INVALID_HANDLE -1
/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
@@ -133,6 +126,15 @@ struct xenvif_stats {
unsigned long tx_frag_overflow;
};
+#define COPY_BATCH_SIZE 64
+
+struct xenvif_copy_state {
+ struct gnttab_copy op[COPY_BATCH_SIZE];
+ RING_IDX idx[COPY_BATCH_SIZE];
+ unsigned int num;
+ struct sk_buff_head *completed;
+};
+
struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int id; /* Queue ID, 0-based */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
@@ -189,12 +191,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned long last_rx_time;
bool stalled;
- struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
-
- /* We create one meta structure per ring request we consume, so
- * the maximum number is the same as the ring size.
- */
- struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
+ struct xenvif_copy_state rx_copy;
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
@@ -260,7 +257,6 @@ struct xenvif {
/* Frontend feature information. */
int gso_mask;
- int gso_prefix_mask;
u8 can_sg:1;
u8 ip_csum:1;
@@ -359,6 +355,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -410,4 +407,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
+#ifdef CONFIG_DEBUG_FS
+void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
+#endif
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 613bac057650..e8c5dddc54ba 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -360,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
+#ifdef CONFIG_DEBUG_FS
+void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
+{
+ unsigned int i;
+
+ switch (vif->hash.alg) {
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+ seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
+ break;
+
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+ seq_puts(m, "Hash Algorithm: NONE\n");
+ /* FALLTHRU */
+ default:
+ return;
+ }
+
+ if (vif->hash.flags) {
+ seq_puts(m, "\nHash Flags:\n");
+
+ if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
+ seq_puts(m, "- IPv4\n");
+ if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
+ seq_puts(m, "- IPv4 + TCP\n");
+ if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
+ seq_puts(m, "- IPv6\n");
+ if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
+ seq_puts(m, "- IPv6 + TCP\n");
+ }
+
+ seq_puts(m, "\nHash Key:\n");
+
+ for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
+ unsigned int j, n;
+
+ n = 8;
+ if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
+ n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
+
+ seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
+
+ for (j = 0; j < n; j++, i++)
+ seq_printf(m, "%02x ", vif->hash.key[i]);
+
+ seq_puts(m, "\n");
+ }
+
+ if (vif->hash.size != 0) {
+ seq_puts(m, "\nHash Mapping:\n");
+
+ for (i = 0; i < vif->hash.size; ) {
+ unsigned int j, n;
+
+ n = 8;
+ if (i + n >= vif->hash.size)
+ n = vif->hash.size - i;
+
+ seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
+
+ for (j = 0; j < n; j++, i++)
+ seq_printf(m, "%4u ", vif->hash.mapping[i]);
+
+ seq_puts(m, "\n");
+ }
+ }
+}
+#endif /* CONFIG_DEBUG_FS */
+
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fb50c6d5f6c3..74dc2bf71428 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -149,17 +149,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
- if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
- u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
-
- /* Make sure there is no hash information in the socket
- * buffer otherwise it would be incorrectly forwarded
- * to the frontend.
- */
- skb_clear_hash(skb);
-
- return index;
- }
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return fallback(dev, skb) % dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
@@ -208,6 +199,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + vif->drain_timeout;
+ /* If there is no hash algorithm configured then make sure there
+ * is no hash information in the socket buffer otherwise it
+ * would be incorrectly forwarded to the frontend.
+ */
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ skb_clear_hash(skb);
+
xenvif_rx_queue_tail(queue, skb);
xenvif_kick_thread(queue);
@@ -319,9 +317,9 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
if (!vif->can_sg)
features &= ~NETIF_F_SG;
- if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
+ if (~(vif->gso_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
- if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+ if (~(vif->gso_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6;
if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
@@ -467,7 +465,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3d0c989384b5..47b481095d77 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -106,13 +106,6 @@ static void push_tx_responses(struct xenvif_queue *queue);
static inline int tx_work_todo(struct xenvif_queue *queue);
-static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
- u16 id,
- s8 st,
- u16 offset,
- u16 size,
- u16 flags);
-
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
u16 idx)
{
@@ -155,571 +148,11 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
-{
- RING_IDX prod, cons;
- struct sk_buff *skb;
- int needed;
-
- skb = skb_peek(&queue->rx_queue);
- if (!skb)
- return false;
-
- needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
- if (skb_is_gso(skb))
- needed++;
- if (skb->sw_hash)
- needed++;
-
- do {
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
-
- if (prod - cons >= needed)
- return true;
-
- queue->rx.sring->req_event = prod + 1;
-
- /* Make sure event is visible before we check prod
- * again.
- */
- mb();
- } while (queue->rx.sring->req_prod != prod);
-
- return false;
-}
-
-void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&queue->rx_queue.lock, flags);
-
- __skb_queue_tail(&queue->rx_queue, skb);
-
- queue->rx_queue_len += skb->len;
- if (queue->rx_queue_len > queue->rx_queue_max)
- netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
-
- spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
-}
-
-static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
-{
- struct sk_buff *skb;
-
- spin_lock_irq(&queue->rx_queue.lock);
-
- skb = __skb_dequeue(&queue->rx_queue);
- if (skb)
- queue->rx_queue_len -= skb->len;
-
- spin_unlock_irq(&queue->rx_queue.lock);
-
- return skb;
-}
-
-static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
-{
- spin_lock_irq(&queue->rx_queue.lock);
-
- if (queue->rx_queue_len < queue->rx_queue_max)
- netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
-
- spin_unlock_irq(&queue->rx_queue.lock);
-}
-
-
-static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
-{
- struct sk_buff *skb;
- while ((skb = xenvif_rx_dequeue(queue)) != NULL)
- kfree_skb(skb);
-}
-
-static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
-{
- struct sk_buff *skb;
-
- for(;;) {
- skb = skb_peek(&queue->rx_queue);
- if (!skb)
- break;
- if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
- break;
- xenvif_rx_dequeue(queue);
- kfree_skb(skb);
- }
-}
-
-struct netrx_pending_operations {
- unsigned copy_prod, copy_cons;
- unsigned meta_prod, meta_cons;
- struct gnttab_copy *copy;
- struct xenvif_rx_meta *meta;
- int copy_off;
- grant_ref_t copy_gref;
-};
-
-static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
- struct netrx_pending_operations *npo)
-{
- struct xenvif_rx_meta *meta;
- struct xen_netif_rx_request req;
-
- RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
-
- meta = npo->meta + npo->meta_prod++;
- meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
- meta->gso_size = 0;
- meta->size = 0;
- meta->id = req.id;
-
- npo->copy_off = 0;
- npo->copy_gref = req.gref;
-
- return meta;
-}
-
-struct gop_frag_copy {
- struct xenvif_queue *queue;
- struct netrx_pending_operations *npo;
- struct xenvif_rx_meta *meta;
- int head;
- int gso_type;
- int protocol;
- int hash_present;
-
- struct page *page;
-};
-
-static void xenvif_setup_copy_gop(unsigned long gfn,
- unsigned int offset,
- unsigned int *len,
- struct gop_frag_copy *info)
-{
- struct gnttab_copy *copy_gop;
- struct xen_page_foreign *foreign;
- /* Convenient aliases */
- struct xenvif_queue *queue = info->queue;
- struct netrx_pending_operations *npo = info->npo;
- struct page *page = info->page;
-
- BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
-
- if (npo->copy_off == MAX_BUFFER_OFFSET)
- info->meta = get_next_rx_buffer(queue, npo);
-
- if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
- *len = MAX_BUFFER_OFFSET - npo->copy_off;
-
- copy_gop = npo->copy + npo->copy_prod++;
- copy_gop->flags = GNTCOPY_dest_gref;
- copy_gop->len = *len;
-
- foreign = xen_page_foreign(page);
- if (foreign) {
- copy_gop->source.domid = foreign->domid;
- copy_gop->source.u.ref = foreign->gref;
- copy_gop->flags |= GNTCOPY_source_gref;
- } else {
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = gfn;
- }
- copy_gop->source.offset = offset;
-
- copy_gop->dest.domid = queue->vif->domid;
- copy_gop->dest.offset = npo->copy_off;
- copy_gop->dest.u.ref = npo->copy_gref;
-
- npo->copy_off += *len;
- info->meta->size += *len;
-
- if (!info->head)
- return;
-
- /* Leave a gap for the GSO descriptor. */
- if ((1 << info->gso_type) & queue->vif->gso_mask)
- queue->rx.req_cons++;
-
- /* Leave a gap for the hash extra segment. */
- if (info->hash_present)
- queue->rx.req_cons++;
-
- info->head = 0; /* There must be something in this buffer now */
-}
-
-static void xenvif_gop_frag_copy_grant(unsigned long gfn,
- unsigned offset,
- unsigned int len,
- void *data)
-{
- unsigned int bytes;
-
- while (len) {
- bytes = len;
- xenvif_setup_copy_gop(gfn, offset, &bytes, data);
- offset += bytes;
- len -= bytes;
- }
-}
-
-/*
- * Set up the grant operations for this fragment. If it's a flipping
- * interface, we also set up the unmap request from here.
- */
-static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
- struct netrx_pending_operations *npo,
- struct page *page, unsigned long size,
- unsigned long offset, int *head)
-{
- struct gop_frag_copy info = {
- .queue = queue,
- .npo = npo,
- .head = *head,
- .gso_type = XEN_NETIF_GSO_TYPE_NONE,
- /* xenvif_set_skb_hash() will have either set a s/w
- * hash or cleared the hash depending on
- * whether the the frontend wants a hash for this skb.
- */
- .hash_present = skb->sw_hash,
- };
- unsigned long bytes;
-
- if (skb_is_gso(skb)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- }
-
- /* Data must not cross a page boundary. */
- BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
-
- info.meta = npo->meta + npo->meta_prod - 1;
-
- /* Skip unused frames from start of page */
- page += offset >> PAGE_SHIFT;
- offset &= ~PAGE_MASK;
-
- while (size > 0) {
- BUG_ON(offset >= PAGE_SIZE);
-
- bytes = PAGE_SIZE - offset;
- if (bytes > size)
- bytes = size;
-
- info.page = page;
- gnttab_foreach_grant_in_range(page, offset, bytes,
- xenvif_gop_frag_copy_grant,
- &info);
- size -= bytes;
- offset = 0;
-
- /* Next page */
- if (size) {
- BUG_ON(!PageCompound(page));
- page++;
- }
- }
-
- *head = info.head;
-}
-
-/*
- * Prepare an SKB to be transmitted to the frontend.
- *
- * This function is responsible for allocating grant operations, meta
- * structures, etc.
- *
- * It returns the number of meta structures consumed. The number of
- * ring slots used is always equal to the number of meta slots used
- * plus the number of GSO descriptors used. Currently, we use either
- * zero GSO descriptors (for non-GSO packets) or one descriptor (for
- * frontend-side LRO).
- */
-static int xenvif_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo,
- struct xenvif_queue *queue)
-{
- struct xenvif *vif = netdev_priv(skb->dev);
- int nr_frags = skb_shinfo(skb)->nr_frags;
- int i;
- struct xen_netif_rx_request req;
- struct xenvif_rx_meta *meta;
- unsigned char *data;
- int head = 1;
- int old_meta_prod;
- int gso_type;
-
- old_meta_prod = npo->meta_prod;
-
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- if (skb_is_gso(skb)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- }
-
- /* Set up a GSO prefix descriptor, if necessary */
- if ((1 << gso_type) & vif->gso_prefix_mask) {
- RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
- meta = npo->meta + npo->meta_prod++;
- meta->gso_type = gso_type;
- meta->gso_size = skb_shinfo(skb)->gso_size;
- meta->size = 0;
- meta->id = req.id;
- }
-
- RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
- meta = npo->meta + npo->meta_prod++;
-
- if ((1 << gso_type) & vif->gso_mask) {
- meta->gso_type = gso_type;
- meta->gso_size = skb_shinfo(skb)->gso_size;
- } else {
- meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
- meta->gso_size = 0;
- }
-
- meta->size = 0;
- meta->id = req.id;
- npo->copy_off = 0;
- npo->copy_gref = req.gref;
-
- data = skb->data;
- while (data < skb_tail_pointer(skb)) {
- unsigned int offset = offset_in_page(data);
- unsigned int len = PAGE_SIZE - offset;
-
- if (data + len > skb_tail_pointer(skb))
- len = skb_tail_pointer(skb) - data;
-
- xenvif_gop_frag_copy(queue, skb, npo,
- virt_to_page(data), len, offset, &head);
- data += len;
- }
-
- for (i = 0; i < nr_frags; i++) {
- xenvif_gop_frag_copy(queue, skb, npo,
- skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].page_offset,
- &head);
- }
-
- return npo->meta_prod - old_meta_prod;
-}
-
-/*
- * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
- * used to set up the operations on the top of
- * netrx_pending_operations, which have since been done. Check that
- * they didn't give any errors and advance over them.
- */
-static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
- struct netrx_pending_operations *npo)
-{
- struct gnttab_copy *copy_op;
- int status = XEN_NETIF_RSP_OKAY;
- int i;
-
- for (i = 0; i < nr_meta_slots; i++) {
- copy_op = npo->copy + npo->copy_cons++;
- if (copy_op->status != GNTST_okay) {
- netdev_dbg(vif->dev,
- "Bad status %d from copy to DOM%d.\n",
- copy_op->status, vif->domid);
- status = XEN_NETIF_RSP_ERROR;
- }
- }
-
- return status;
-}
-
-static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
- struct xenvif_rx_meta *meta,
- int nr_meta_slots)
-{
- int i;
- unsigned long offset;
-
- /* No fragments used */
- if (nr_meta_slots <= 1)
- return;
-
- nr_meta_slots--;
-
- for (i = 0; i < nr_meta_slots; i++) {
- int flags;
- if (i == nr_meta_slots - 1)
- flags = 0;
- else
- flags = XEN_NETRXF_more_data;
-
- offset = 0;
- make_rx_response(queue, meta[i].id, status, offset,
- meta[i].size, flags);
- }
-}
-
void xenvif_kick_thread(struct xenvif_queue *queue)
{
wake_up(&queue->wq);
}
-static void xenvif_rx_action(struct xenvif_queue *queue)
-{
- struct xenvif *vif = queue->vif;
- s8 status;
- u16 flags;
- struct xen_netif_rx_response *resp;
- struct sk_buff_head rxq;
- struct sk_buff *skb;
- LIST_HEAD(notify);
- int ret;
- unsigned long offset;
- bool need_to_notify = false;
-
- struct netrx_pending_operations npo = {
- .copy = queue->grant_copy_op,
- .meta = queue->meta,
- };
-
- skb_queue_head_init(&rxq);
-
- while (xenvif_rx_ring_slots_available(queue)
- && (skb = xenvif_rx_dequeue(queue)) != NULL) {
- queue->last_rx_time = jiffies;
-
- XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
-
- __skb_queue_tail(&rxq, skb);
- }
-
- BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
-
- if (!npo.copy_prod)
- goto done;
-
- BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
- gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
-
- while ((skb = __skb_dequeue(&rxq)) != NULL) {
- struct xen_netif_extra_info *extra = NULL;
-
- if ((1 << queue->meta[npo.meta_cons].gso_type) &
- vif->gso_prefix_mask) {
- resp = RING_GET_RESPONSE(&queue->rx,
- queue->rx.rsp_prod_pvt++);
-
- resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
-
- resp->offset = queue->meta[npo.meta_cons].gso_size;
- resp->id = queue->meta[npo.meta_cons].id;
- resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
-
- npo.meta_cons++;
- XENVIF_RX_CB(skb)->meta_slots_used--;
- }
-
-
- queue->stats.tx_bytes += skb->len;
- queue->stats.tx_packets++;
-
- status = xenvif_check_gop(vif,
- XENVIF_RX_CB(skb)->meta_slots_used,
- &npo);
-
- if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
- flags = 0;
- else
- flags = XEN_NETRXF_more_data;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
- flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
- else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- /* remote but checksummed. */
- flags |= XEN_NETRXF_data_validated;
-
- offset = 0;
- resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
- status, offset,
- queue->meta[npo.meta_cons].size,
- flags);
-
- if ((1 << queue->meta[npo.meta_cons].gso_type) &
- vif->gso_mask) {
- extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&queue->rx,
- queue->rx.rsp_prod_pvt++);
-
- resp->flags |= XEN_NETRXF_extra_info;
-
- extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
- extra->u.gso.pad = 0;
- extra->u.gso.features = 0;
-
- extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
- extra->flags = 0;
- }
-
- if (skb->sw_hash) {
- /* Since the skb got here via xenvif_select_queue()
- * we know that the hash has been re-calculated
- * according to a configuration set by the frontend
- * and therefore we know that it is legitimate to
- * pass it to the frontend.
- */
- if (resp->flags & XEN_NETRXF_extra_info)
- extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
- else
- resp->flags |= XEN_NETRXF_extra_info;
-
- extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&queue->rx,
- queue->rx.rsp_prod_pvt++);
-
- extra->u.hash.algorithm =
- XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
-
- if (skb->l4_hash)
- extra->u.hash.type =
- skb->protocol == htons(ETH_P_IP) ?
- _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
- _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
- else
- extra->u.hash.type =
- skb->protocol == htons(ETH_P_IP) ?
- _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
- _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
-
- *(uint32_t *)extra->u.hash.value =
- skb_get_hash_raw(skb);
-
- extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
- extra->flags = 0;
- }
-
- xenvif_add_frag_responses(queue, status,
- queue->meta + npo.meta_cons + 1,
- XENVIF_RX_CB(skb)->meta_slots_used);
-
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
-
- need_to_notify |= !!ret;
-
- npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
- dev_kfree_skb(skb);
- }
-
-done:
- if (need_to_notify)
- notify_remote_via_irq(queue->rx_irq);
-}
-
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
{
int more_to_do;
@@ -1951,29 +1384,6 @@ static void push_tx_responses(struct xenvif_queue *queue)
notify_remote_via_irq(queue->tx_irq);
}
-static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
- u16 id,
- s8 st,
- u16 offset,
- u16 size,
- u16 flags)
-{
- RING_IDX i = queue->rx.rsp_prod_pvt;
- struct xen_netif_rx_response *resp;
-
- resp = RING_GET_RESPONSE(&queue->rx, i);
- resp->offset = offset;
- resp->flags = flags;
- resp->id = id;
- resp->status = (s16)size;
- if (st < 0)
- resp->status = (s16)st;
-
- queue->rx.rsp_prod_pvt = ++i;
-
- return resp;
-}
-
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
@@ -2055,170 +1465,6 @@ err:
return err;
}
-static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
-{
- struct xenvif *vif = queue->vif;
-
- queue->stalled = true;
-
- /* At least one queue has stalled? Disable the carrier. */
- spin_lock(&vif->lock);
- if (vif->stalled_queues++ == 0) {
- netdev_info(vif->dev, "Guest Rx stalled");
- netif_carrier_off(vif->dev);
- }
- spin_unlock(&vif->lock);
-}
-
-static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
-{
- struct xenvif *vif = queue->vif;
-
- queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
- queue->stalled = false;
-
- /* All queues are ready? Enable the carrier. */
- spin_lock(&vif->lock);
- if (--vif->stalled_queues == 0) {
- netdev_info(vif->dev, "Guest Rx ready");
- netif_carrier_on(vif->dev);
- }
- spin_unlock(&vif->lock);
-}
-
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
-{
- RING_IDX prod, cons;
-
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
-
- return !queue->stalled && prod - cons < 1
- && time_after(jiffies,
- queue->last_rx_time + queue->vif->stall_timeout);
-}
-
-static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
-{
- RING_IDX prod, cons;
-
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
-
- return queue->stalled && prod - cons >= 1;
-}
-
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
-{
- return xenvif_rx_ring_slots_available(queue)
- || (queue->vif->stall_timeout &&
- (xenvif_rx_queue_stalled(queue)
- || xenvif_rx_queue_ready(queue)))
- || kthread_should_stop()
- || queue->vif->disabled;
-}
-
-static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
-{
- struct sk_buff *skb;
- long timeout;
-
- skb = skb_peek(&queue->rx_queue);
- if (!skb)
- return MAX_SCHEDULE_TIMEOUT;
-
- timeout = XENVIF_RX_CB(skb)->expires - jiffies;
- return timeout < 0 ? 0 : timeout;
-}
-
-/* Wait until the guest Rx thread has work.
- *
- * The timeout needs to be adjusted based on the current head of the
- * queue (and not just the head at the beginning). In particular, if
- * the queue is initially empty an infinite timeout is used and this
- * needs to be reduced when a skb is queued.
- *
- * This cannot be done with wait_event_timeout() because it only
- * calculates the timeout once.
- */
-static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
-{
- DEFINE_WAIT(wait);
-
- if (xenvif_have_rx_work(queue))
- return;
-
- for (;;) {
- long ret;
-
- prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
- if (xenvif_have_rx_work(queue))
- break;
- ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
- if (!ret)
- break;
- }
- finish_wait(&queue->wq, &wait);
-}
-
-int xenvif_kthread_guest_rx(void *data)
-{
- struct xenvif_queue *queue = data;
- struct xenvif *vif = queue->vif;
-
- if (!vif->stall_timeout)
- xenvif_queue_carrier_on(queue);
-
- for (;;) {
- xenvif_wait_for_rx_work(queue);
-
- if (kthread_should_stop())
- break;
-
- /* This frontend is found to be rogue, disable it in
- * kthread context. Currently this is only set when
- * netback finds out frontend sends malformed packet,
- * but we cannot disable the interface in softirq
- * context so we defer it here, if this thread is
- * associated with queue 0.
- */
- if (unlikely(vif->disabled && queue->id == 0)) {
- xenvif_carrier_off(vif);
- break;
- }
-
- if (!skb_queue_empty(&queue->rx_queue))
- xenvif_rx_action(queue);
-
- /* If the guest hasn't provided any Rx slots for a
- * while it's probably not responsive, drop the
- * carrier so packets are dropped earlier.
- */
- if (vif->stall_timeout) {
- if (xenvif_rx_queue_stalled(queue))
- xenvif_queue_carrier_off(queue);
- else if (xenvif_rx_queue_ready(queue))
- xenvif_queue_carrier_on(queue);
- }
-
- /* Queued packets may have foreign pages from other
- * domains. These cannot be queued indefinitely as
- * this would starve guests of grant refs and transmit
- * slots.
- */
- xenvif_rx_queue_drop_expired(queue);
-
- xenvif_rx_queue_maybe_wake(queue);
-
- cond_resched();
- }
-
- /* Bin any remaining skbs */
- xenvif_rx_queue_purge(queue);
-
- return 0;
-}
-
static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
{
/* Dealloc thread must remain running until all inflight
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
new file mode 100644
index 000000000000..b1cf7c6f407a
--- /dev/null
+++ b/drivers/net/xen-netback/rx.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ * Copyright (c) 2002-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "common.h"
+
+#include <linux/kthread.h>
+
+#include <xen/xen.h>
+#include <xen/events.h>
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+ RING_IDX prod, cons;
+ struct sk_buff *skb;
+ int needed;
+
+ skb = skb_peek(&queue->rx_queue);
+ if (!skb)
+ return false;
+
+ needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+ if (skb_is_gso(skb))
+ needed++;
+ if (skb->sw_hash)
+ needed++;
+
+ do {
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
+
+ if (prod - cons >= needed)
+ return true;
+
+ queue->rx.sring->req_event = prod + 1;
+
+ /* Make sure event is visible before we check prod
+ * again.
+ */
+ mb();
+ } while (queue->rx.sring->req_prod != prod);
+
+ return false;
+}
+
+void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
+
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
+ if (queue->rx_queue_len > queue->rx_queue_max) {
+ struct net_device *dev = queue->vif->dev;
+
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+ }
+
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+}
+
+static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
+{
+ struct sk_buff *skb;
+
+ spin_lock_irq(&queue->rx_queue.lock);
+
+ skb = __skb_dequeue(&queue->rx_queue);
+ if (skb) {
+ queue->rx_queue_len -= skb->len;
+ if (queue->rx_queue_len < queue->rx_queue_max) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
+ netif_tx_wake_queue(txq);
+ }
+ }
+
+ spin_unlock_irq(&queue->rx_queue.lock);
+
+ return skb;
+}
+
+static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
+{
+ struct sk_buff *skb;
+
+ while ((skb = xenvif_rx_dequeue(queue)) != NULL)
+ kfree_skb(skb);
+}
+
+static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_peek(&queue->rx_queue);
+ if (!skb)
+ break;
+ if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
+ break;
+ xenvif_rx_dequeue(queue);
+ kfree_skb(skb);
+ }
+}
+
+static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
+{
+ unsigned int i;
+ int notify;
+
+ gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
+
+ for (i = 0; i < queue->rx_copy.num; i++) {
+ struct gnttab_copy *op;
+
+ op = &queue->rx_copy.op[i];
+
+ /* If the copy failed, overwrite the status field in
+ * the corresponding response.
+ */
+ if (unlikely(op->status != GNTST_okay)) {
+ struct xen_netif_rx_response *rsp;
+
+ rsp = RING_GET_RESPONSE(&queue->rx,
+ queue->rx_copy.idx[i]);
+ rsp->status = op->status;
+ }
+ }
+
+ queue->rx_copy.num = 0;
+
+ /* Push responses for all completed packets. */
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
+ if (notify)
+ notify_remote_via_irq(queue->rx_irq);
+
+ __skb_queue_purge(queue->rx_copy.completed);
+}
+
+static void xenvif_rx_copy_add(struct xenvif_queue *queue,
+ struct xen_netif_rx_request *req,
+ unsigned int offset, void *data, size_t len)
+{
+ struct gnttab_copy *op;
+ struct page *page;
+ struct xen_page_foreign *foreign;
+
+ if (queue->rx_copy.num == COPY_BATCH_SIZE)
+ xenvif_rx_copy_flush(queue);
+
+ op = &queue->rx_copy.op[queue->rx_copy.num];
+
+ page = virt_to_page(data);
+
+ op->flags = GNTCOPY_dest_gref;
+
+ foreign = xen_page_foreign(page);
+ if (foreign) {
+ op->source.domid = foreign->domid;
+ op->source.u.ref = foreign->gref;
+ op->flags |= GNTCOPY_source_gref;
+ } else {
+ op->source.u.gmfn = virt_to_gfn(data);
+ op->source.domid = DOMID_SELF;
+ }
+
+ op->source.offset = xen_offset_in_page(data);
+ op->dest.u.ref = req->gref;
+ op->dest.domid = queue->vif->domid;
+ op->dest.offset = offset;
+ op->len = len;
+
+ queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
+ queue->rx_copy.num++;
+}
+
+static unsigned int xenvif_gso_type(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ return XEN_NETIF_GSO_TYPE_TCPV4;
+ else
+ return XEN_NETIF_GSO_TYPE_TCPV6;
+ }
+ return XEN_NETIF_GSO_TYPE_NONE;
+}
+
+struct xenvif_pkt_state {
+ struct sk_buff *skb;
+ size_t remaining_len;
+ struct sk_buff *frag_iter;
+ int frag; /* frag == -1 => frag_iter->head */
+ unsigned int frag_offset;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+ unsigned int extra_count;
+ unsigned int slot;
+};
+
+static void xenvif_rx_next_skb(struct xenvif_queue *queue,
+ struct xenvif_pkt_state *pkt)
+{
+ struct sk_buff *skb;
+ unsigned int gso_type;
+
+ skb = xenvif_rx_dequeue(queue);
+
+ queue->stats.tx_bytes += skb->len;
+ queue->stats.tx_packets++;
+
+ /* Reset packet state. */
+ memset(pkt, 0, sizeof(struct xenvif_pkt_state));
+
+ pkt->skb = skb;
+ pkt->frag_iter = skb;
+ pkt->remaining_len = skb->len;
+ pkt->frag = -1;
+
+ gso_type = xenvif_gso_type(skb);
+ if ((1 << gso_type) & queue->vif->gso_mask) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ extra->u.gso.type = gso_type;
+ extra->u.gso.size = skb_shinfo(skb)->gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+
+ pkt->extra_count++;
+ }
+
+ if (skb->sw_hash) {
+ struct xen_netif_extra_info *extra;
+
+ extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+ extra->u.hash.algorithm =
+ XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+ if (skb->l4_hash)
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+ else
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+ *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+ extra->flags = 0;
+
+ pkt->extra_count++;
+ }
+}
+
+static void xenvif_rx_complete(struct xenvif_queue *queue,
+ struct xenvif_pkt_state *pkt)
+{
+ /* All responses are ready to be pushed. */
+ queue->rx.rsp_prod_pvt = queue->rx.req_cons;
+
+ __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
+}
+
+static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
+{
+ struct sk_buff *frag_iter = pkt->frag_iter;
+ unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
+
+ pkt->frag++;
+ pkt->frag_offset = 0;
+
+ if (pkt->frag >= nr_frags) {
+ if (frag_iter == pkt->skb)
+ pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
+ else
+ pkt->frag_iter = frag_iter->next;
+
+ pkt->frag = -1;
+ }
+}
+
+static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
+ struct xenvif_pkt_state *pkt,
+ unsigned int offset, void **data,
+ size_t *len)
+{
+ struct sk_buff *frag_iter = pkt->frag_iter;
+ void *frag_data;
+ size_t frag_len, chunk_len;
+
+ BUG_ON(!frag_iter);
+
+ if (pkt->frag == -1) {
+ frag_data = frag_iter->data;
+ frag_len = skb_headlen(frag_iter);
+ } else {
+ skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
+
+ frag_data = skb_frag_address(frag);
+ frag_len = skb_frag_size(frag);
+ }
+
+ frag_data += pkt->frag_offset;
+ frag_len -= pkt->frag_offset;
+
+ chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
+ chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
+ xen_offset_in_page(frag_data));
+
+ pkt->frag_offset += chunk_len;
+
+ /* Advance to next frag? */
+ if (frag_len == chunk_len)
+ xenvif_rx_next_frag(pkt);
+
+ *data = frag_data;
+ *len = chunk_len;
+}
+
+static void xenvif_rx_data_slot(struct xenvif_queue *queue,
+ struct xenvif_pkt_state *pkt,
+ struct xen_netif_rx_request *req,
+ struct xen_netif_rx_response *rsp)
+{
+ unsigned int offset = 0;
+ unsigned int flags;
+
+ do {
+ size_t len;
+ void *data;
+
+ xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
+ xenvif_rx_copy_add(queue, req, offset, data, len);
+
+ offset += len;
+ pkt->remaining_len -= len;
+
+ } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
+
+ if (pkt->remaining_len > 0)
+ flags = XEN_NETRXF_more_data;
+ else
+ flags = 0;
+
+ if (pkt->slot == 0) {
+ struct sk_buff *skb = pkt->skb;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ flags |= XEN_NETRXF_csum_blank |
+ XEN_NETRXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ flags |= XEN_NETRXF_data_validated;
+
+ if (pkt->extra_count != 0)
+ flags |= XEN_NETRXF_extra_info;
+ }
+
+ rsp->offset = 0;
+ rsp->flags = flags;
+ rsp->id = req->id;
+ rsp->status = (s16)offset;
+}
+
+static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
+ struct xenvif_pkt_state *pkt,
+ struct xen_netif_rx_request *req,
+ struct xen_netif_rx_response *rsp)
+{
+ struct xen_netif_extra_info *extra = (void *)rsp;
+ unsigned int i;
+
+ pkt->extra_count--;
+
+ for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
+ if (pkt->extras[i].type) {
+ *extra = pkt->extras[i];
+
+ if (pkt->extra_count != 0)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+
+ pkt->extras[i].type = 0;
+ return;
+ }
+ }
+ BUG();
+}
+
+void xenvif_rx_skb(struct xenvif_queue *queue)
+{
+ struct xenvif_pkt_state pkt;
+
+ xenvif_rx_next_skb(queue, &pkt);
+
+ queue->last_rx_time = jiffies;
+
+ do {
+ struct xen_netif_rx_request *req;
+ struct xen_netif_rx_response *rsp;
+
+ req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
+ rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
+
+ /* Extras must go after the first data slot */
+ if (pkt.slot != 0 && pkt.extra_count != 0)
+ xenvif_rx_extra_slot(queue, &pkt, req, rsp);
+ else
+ xenvif_rx_data_slot(queue, &pkt, req, rsp);
+
+ queue->rx.req_cons++;
+ pkt.slot++;
+ } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
+
+ xenvif_rx_complete(queue, &pkt);
+}
+
+#define RX_BATCH_SIZE 64
+
+void xenvif_rx_action(struct xenvif_queue *queue)
+{
+ struct sk_buff_head completed_skbs;
+ unsigned int work_done = 0;
+
+ __skb_queue_head_init(&completed_skbs);
+ queue->rx_copy.completed = &completed_skbs;
+
+ while (xenvif_rx_ring_slots_available(queue) &&
+ work_done < RX_BATCH_SIZE) {
+ xenvif_rx_skb(queue);
+ work_done++;
+ }
+
+ /* Flush any pending copies and complete all skbs. */
+ xenvif_rx_copy_flush(queue);
+}
+
+static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+{
+ RING_IDX prod, cons;
+
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
+
+ return !queue->stalled &&
+ prod - cons < 1 &&
+ time_after(jiffies,
+ queue->last_rx_time + queue->vif->stall_timeout);
+}
+
+static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
+{
+ RING_IDX prod, cons;
+
+ prod = queue->rx.sring->req_prod;
+ cons = queue->rx.req_cons;
+
+ return queue->stalled && prod - cons >= 1;
+}
+
+static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+{
+ return xenvif_rx_ring_slots_available(queue) ||
+ (queue->vif->stall_timeout &&
+ (xenvif_rx_queue_stalled(queue) ||
+ xenvif_rx_queue_ready(queue))) ||
+ kthread_should_stop() ||
+ queue->vif->disabled;
+}
+
+static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
+{
+ struct sk_buff *skb;
+ long timeout;
+
+ skb = skb_peek(&queue->rx_queue);
+ if (!skb)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ timeout = XENVIF_RX_CB(skb)->expires - jiffies;
+ return timeout < 0 ? 0 : timeout;
+}
+
+/* Wait until the guest Rx thread has work.
+ *
+ * The timeout needs to be adjusted based on the current head of the
+ * queue (and not just the head at the beginning). In particular, if
+ * the queue is initially empty an infinite timeout is used and this
+ * needs to be reduced when a skb is queued.
+ *
+ * This cannot be done with wait_event_timeout() because it only
+ * calculates the timeout once.
+ */
+static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
+{
+ DEFINE_WAIT(wait);
+
+ if (xenvif_have_rx_work(queue))
+ return;
+
+ for (;;) {
+ long ret;
+
+ prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
+ if (xenvif_have_rx_work(queue))
+ break;
+ ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
+ if (!ret)
+ break;
+ }
+ finish_wait(&queue->wq, &wait);
+}
+
+static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
+{
+ struct xenvif *vif = queue->vif;
+
+ queue->stalled = true;
+
+ /* At least one queue has stalled? Disable the carrier. */
+ spin_lock(&vif->lock);
+ if (vif->stalled_queues++ == 0) {
+ netdev_info(vif->dev, "Guest Rx stalled");
+ netif_carrier_off(vif->dev);
+ }
+ spin_unlock(&vif->lock);
+}
+
+static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
+{
+ struct xenvif *vif = queue->vif;
+
+ queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
+ queue->stalled = false;
+
+ /* All queues are ready? Enable the carrier. */
+ spin_lock(&vif->lock);
+ if (--vif->stalled_queues == 0) {
+ netdev_info(vif->dev, "Guest Rx ready");
+ netif_carrier_on(vif->dev);
+ }
+ spin_unlock(&vif->lock);
+}
+
+int xenvif_kthread_guest_rx(void *data)
+{
+ struct xenvif_queue *queue = data;
+ struct xenvif *vif = queue->vif;
+
+ if (!vif->stall_timeout)
+ xenvif_queue_carrier_on(queue);
+
+ for (;;) {
+ xenvif_wait_for_rx_work(queue);
+
+ if (kthread_should_stop())
+ break;
+
+ /* This frontend is found to be rogue, disable it in
+ * kthread context. Currently this is only set when
+ * netback finds out frontend sends malformed packet,
+ * but we cannot disable the interface in softirq
+ * context so we defer it here, if this thread is
+ * associated with queue 0.
+ */
+ if (unlikely(vif->disabled && queue->id == 0)) {
+ xenvif_carrier_off(vif);
+ break;
+ }
+
+ if (!skb_queue_empty(&queue->rx_queue))
+ xenvif_rx_action(queue);
+
+ /* If the guest hasn't provided any Rx slots for a
+ * while it's probably not responsive, drop the
+ * carrier so packets are dropped earlier.
+ */
+ if (vif->stall_timeout) {
+ if (xenvif_rx_queue_stalled(queue))
+ xenvif_queue_carrier_off(queue);
+ else if (xenvif_rx_queue_ready(queue))
+ xenvif_queue_carrier_on(queue);
+ }
+
+ /* Queued packets may have foreign pages from other
+ * domains. These cannot be queued indefinitely as
+ * this would starve guests of grant refs and transmit
+ * slots.
+ */
+ xenvif_rx_queue_drop_expired(queue);
+
+ cond_resched();
+ }
+
+ /* Bin any remaining skbs */
+ xenvif_rx_queue_purge(queue);
+
+ return 0;
+}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index daf4c7867102..8674e188b697 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
return count;
}
-static int xenvif_dump_open(struct inode *inode, struct file *filp)
+static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
@@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp)
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
- .open = xenvif_dump_open,
+ .open = xenvif_io_ring_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
+static int xenvif_read_ctrl(struct seq_file *m, void *v)
+{
+ struct xenvif *vif = m->private;
+
+ xenvif_dump_hash_info(vif, m);
+
+ return 0;
+}
+
+static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, xenvif_read_ctrl, inode->i_private);
+}
+
+static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = xenvif_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void xenvif_debugfs_addif(struct xenvif *vif)
{
struct dentry *pfile;
@@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
pr_warn("Creation of io_ring file returned %ld!\n",
PTR_ERR(pfile));
}
+
+ if (vif->ctrl_irq) {
+ pfile = debugfs_create_file("ctrl",
+ S_IRUSR,
+ vif->xenvif_dbg_root,
+ vif,
+ &xenvif_dbg_ctrl_ops_fops);
+ if (IS_ERR_OR_NULL(pfile))
+ pr_warn("Creation of ctrl file returned %ld!\n",
+ PTR_ERR(pfile));
+ }
} else
netdev_warn(vif->dev,
"Creation of vif debugfs dir returned %ld!\n",
@@ -1135,7 +1168,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
vif->can_sg = !!val;
vif->gso_mask = 0;
- vif->gso_prefix_mask = 0;
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
"%d", &val) < 0)
@@ -1143,32 +1175,12 @@ static int read_xenbus_vif_flags(struct backend_info *be)
if (val)
vif->gso_mask |= GSO_BIT(TCPV4);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
- "%d", &val) < 0)
- val = 0;
- if (val)
- vif->gso_prefix_mask |= GSO_BIT(TCPV4);
-
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
"%d", &val) < 0)
val = 0;
if (val)
vif->gso_mask |= GSO_BIT(TCPV6);
- if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
- "%d", &val) < 0)
- val = 0;
- if (val)
- vif->gso_prefix_mask |= GSO_BIT(TCPV6);
-
- if (vif->gso_mask & vif->gso_prefix_mask) {
- xenbus_dev_fatal(dev, err,
- "%s: gso and gso prefix flags are not "
- "mutually exclusive",
- dev->otherend);
- return -EOPNOTSUPP;
- }
-
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
"%d", &val) < 0)
val = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e17879dd5d5a..bf2744e1e3db 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
queue->grant_rx_ref[id] = ref;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 83deda4bb4d6..6f9563a96488 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -133,7 +133,7 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
return -ENOMEM;
bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length);
- if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+ if (bytes_recv < 0 || bytes_recv < if_version_length) {
pr_err("Could not read IF version\n");
r = -EIO;
goto err;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 0d5c29ae51de..7310a261c858 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
module_param_named(xeon_b2b_usd_bar4_addr64,
xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
"XEON B2B USD BAR 4 64-bit address");
module_param_named(xeon_b2b_usd_bar4_addr32,
xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
"XEON B2B USD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_usd_bar5_addr32,
xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
"XEON B2B USD split-BAR 5 32-bit address");
module_param_named(xeon_b2b_dsd_bar2_addr64,
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
module_param_named(xeon_b2b_dsd_bar4_addr64,
xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
"XEON B2B DSD BAR 4 64-bit address");
module_param_named(xeon_b2b_dsd_bar4_addr32,
xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
"XEON B2B DSD split-BAR 4 32-bit address");
module_param_named(xeon_b2b_dsd_bar5_addr32,
xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
-MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
+MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
"XEON B2B DSD split-BAR 5 32-bit address");
#ifndef ioread64
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
XEON_B2B_MIN_SIZE);
if (!ndev->peer_mmio)
return -EIO;
+
+ ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
}
return 0;
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_mmio;
}
ndev->peer_mmio = ndev->self_mmio;
+ ndev->peer_addr = pci_resource_start(pdev, 0);
return 0;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8601c10acf74..4eb8adb34508 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -257,7 +257,7 @@ enum {
#define NTB_QP_DEF_NUM_ENTRIES 100
#define NTB_LINK_DOWN_TIMEOUT 10
#define DMA_RETRIES 20
-#define DMA_OUT_RESOURCE_TO 50
+#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 6a50f20bf1cd..e75d4fdc0866 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -72,7 +72,7 @@
#define MAX_THREADS 32
#define MAX_TEST_SIZE SZ_1M
#define MAX_SRCS 32
-#define DMA_OUT_RESOURCE_TO 50
+#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
#define DMA_RETRIES 20
#define SZ_4G (1ULL << 32)
#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
return -ENOMEM;
if (mutex_is_locked(&perf->run_mutex)) {
- out_off = snprintf(buf, 64, "running\n");
+ out_off = scnprintf(buf, 64, "running\n");
goto read_from_buf;
}
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
break;
if (pctx->status) {
- out_off += snprintf(buf + out_off, 1024 - out_off,
+ out_off += scnprintf(buf + out_off, 1024 - out_off,
"%d: error %d\n", i,
pctx->status);
continue;
}
rate = div64_u64(pctx->copied, pctx->diff_us);
- out_off += snprintf(buf + out_off, 1024 - out_off,
+ out_off += scnprintf(buf + out_off, 1024 - out_off,
"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
i, pctx->copied, pctx->diff_us, rate);
}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 7d311799fca1..435861189d97 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
static unsigned long db_init = 0x7;
module_param(db_init, ulong, 0644);
-MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer");
+MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
struct pp_ctx {
struct ntb_dev *ntb;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 935866fe5ec2..a8b6949a8778 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -217,6 +217,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
return rc;
if (cmd_rc < 0)
return cmd_rc;
+
+ nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
return clear_err.cleared;
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 4d7bbd2df5c0..7ceba08774b6 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -547,11 +547,12 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
+ gfp_t flags)
{
struct nd_poison *pl;
- pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kzalloc(sizeof(*pl), flags);
if (!pl)
return -ENOMEM;
@@ -567,7 +568,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
struct nd_poison *pl;
if (list_empty(&nvdimm_bus->poison_list))
- return add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
/*
* There is a chance this is a duplicate, check for those first.
@@ -587,7 +588,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
- return add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
@@ -602,6 +603,70 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t start, unsigned int len)
+{
+ struct list_head *poison_list = &nvdimm_bus->poison_list;
+ u64 clr_end = start + len - 1;
+ struct nd_poison *pl, *next;
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ WARN_ON_ONCE(list_empty(poison_list));
+
+ /*
+ * [start, clr_end] is the poison interval being cleared.
+ * [pl->start, pl_end] is the poison_list entry we're comparing
+ * the above interval against. The poison list entry may need
+ * to be modified (update either start or length), deleted, or
+ * split into two based on the overlap characteristics
+ */
+
+ list_for_each_entry_safe(pl, next, poison_list, list) {
+ u64 pl_end = pl->start + pl->length - 1;
+
+ /* Skip intervals with no intersection */
+ if (pl_end < start)
+ continue;
+ if (pl->start > clr_end)
+ continue;
+ /* Delete completely overlapped poison entries */
+ if ((pl->start >= start) && (pl_end <= clr_end)) {
+ list_del(&pl->list);
+ kfree(pl);
+ continue;
+ }
+ /* Adjust start point of partially cleared entries */
+ if ((start <= pl->start) && (clr_end > pl->start)) {
+ pl->length -= clr_end - pl->start + 1;
+ pl->start = clr_end + 1;
+ continue;
+ }
+ /* Adjust pl->length for partial clearing at the tail end */
+ if ((pl->start < start) && (pl_end <= clr_end)) {
+ /* pl->start remains the same */
+ pl->length = start - pl->start;
+ continue;
+ }
+ /*
+ * If clearing in the middle of an entry, we split it into
+ * two by modifying the current entry to represent one half of
+ * the split, and adding a new entry for the second half.
+ */
+ if ((pl->start < start) && (pl_end > clr_end)) {
+ u64 new_start = clr_end + 1;
+ u64 new_len = pl_end - new_start + 1;
+
+ /* Add new entry covering the right half */
+ add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+ /* Adjust this entry to cover the left half */
+ pl->length = start - pl->start;
+ continue;
+ }
+ }
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 71d12bb67339..619834e144d1 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,14 @@ static int nvdimm_probe(struct device *dev)
struct nvdimm_drvdata *ndd;
int rc;
+ rc = nvdimm_check_config_data(dev);
+ if (rc) {
+ /* not required for non-aliased nvdimm, ex. NVDIMM-N */
+ if (rc == -ENOTTY)
+ rc = 0;
+ return rc;
+ }
+
ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
if (!ndd)
return -ENOMEM;
@@ -72,6 +80,9 @@ static int nvdimm_remove(struct device *dev)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
+ if (!ndd)
+ return 0;
+
nvdimm_bus_lock(dev);
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d9bba5edd8dc..d614493ad5ac 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -28,28 +28,30 @@ static DEFINE_IDA(dimm_ida);
* Retrieve bus and dimm handle and return if this bus supports
* get_config_data commands
*/
-static int __validate_dimm(struct nvdimm_drvdata *ndd)
+int nvdimm_check_config_data(struct device *dev)
{
- struct nvdimm *nvdimm;
-
- if (!ndd)
- return -EINVAL;
-
- nvdimm = to_nvdimm(ndd->dev);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
- if (!nvdimm->cmd_mask)
- return -ENXIO;
- if (!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask))
- return -ENXIO;
+ if (!nvdimm->cmd_mask ||
+ !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
+ if (nvdimm->flags & NDD_ALIASING)
+ return -ENXIO;
+ else
+ return -ENOTTY;
+ }
return 0;
}
static int validate_dimm(struct nvdimm_drvdata *ndd)
{
- int rc = __validate_dimm(ndd);
+ int rc;
- if (rc && ndd)
+ if (!ndd)
+ return -EINVAL;
+
+ rc = nvdimm_check_config_data(ndd->dev);
+ if (rc)
dev_dbg(ndd->dev, "%pf: %s error: %d\n",
__builtin_return_address(0), __func__, rc);
return rc;
@@ -263,6 +265,12 @@ const char *nvdimm_name(struct nvdimm *nvdimm)
}
EXPORT_SYMBOL_GPL(nvdimm_name);
+struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
+{
+ return &nvdimm->dev.kobj;
+}
+EXPORT_SYMBOL_GPL(nvdimm_kobj);
+
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
{
return nvdimm->cmd_mask;
@@ -378,40 +386,166 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
}
EXPORT_SYMBOL_GPL(nvdimm_create);
+int alias_dpa_busy(struct device *dev, void *data)
+{
+ resource_size_t map_end, blk_start, new, busy;
+ struct blk_alloc_info *info = data;
+ struct nd_mapping *nd_mapping;
+ struct nd_region *nd_region;
+ struct nvdimm_drvdata *ndd;
+ struct resource *res;
+ int i;
+
+ if (!is_nd_pmem(dev))
+ return 0;
+
+ nd_region = to_nd_region(dev);
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ nd_mapping = &nd_region->mapping[i];
+ if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
+ break;
+ }
+
+ if (i >= nd_region->ndr_mappings)
+ return 0;
+
+ ndd = to_ndd(nd_mapping);
+ map_end = nd_mapping->start + nd_mapping->size - 1;
+ blk_start = nd_mapping->start;
+
+ /*
+ * In the allocation case ->res is set to free space that we are
+ * looking to validate against PMEM aliasing collision rules
+ * (i.e. BLK is allocated after all aliased PMEM).
+ */
+ if (info->res) {
+ if (info->res->start >= nd_mapping->start
+ && info->res->start < map_end)
+ /* pass */;
+ else
+ return 0;
+ }
+
+ retry:
+ /*
+ * Find the free dpa from the end of the last pmem allocation to
+ * the end of the interleave-set mapping that is not already
+ * covered by a blk allocation.
+ */
+ busy = 0;
+ for_each_dpa_resource(ndd, res) {
+ if ((res->start >= blk_start && res->start < map_end)
+ || (res->end >= blk_start
+ && res->end <= map_end)) {
+ if (strncmp(res->name, "pmem", 4) == 0) {
+ new = max(blk_start, min(map_end + 1,
+ res->end + 1));
+ if (new != blk_start) {
+ blk_start = new;
+ goto retry;
+ }
+ } else
+ busy += min(map_end, res->end)
+ - max(nd_mapping->start, res->start) + 1;
+ } else if (nd_mapping->start > res->start
+ && map_end < res->end) {
+ /* total eclipse of the PMEM region mapping */
+ busy += nd_mapping->size;
+ break;
+ }
+ }
+
+ /* update the free space range with the probed blk_start */
+ if (info->res && blk_start > info->res->start) {
+ info->res->start = max(info->res->start, blk_start);
+ if (info->res->start > info->res->end)
+ info->res->end = info->res->start - 1;
+ return 1;
+ }
+
+ info->available -= blk_start - nd_mapping->start + busy;
+
+ return 0;
+}
+
+static int blk_dpa_busy(struct device *dev, void *data)
+{
+ struct blk_alloc_info *info = data;
+ struct nd_mapping *nd_mapping;
+ struct nd_region *nd_region;
+ resource_size_t map_end;
+ int i;
+
+ if (!is_nd_pmem(dev))
+ return 0;
+
+ nd_region = to_nd_region(dev);
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ nd_mapping = &nd_region->mapping[i];
+ if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
+ break;
+ }
+
+ if (i >= nd_region->ndr_mappings)
+ return 0;
+
+ map_end = nd_mapping->start + nd_mapping->size - 1;
+ if (info->res->start >= nd_mapping->start
+ && info->res->start < map_end) {
+ if (info->res->end <= map_end) {
+ info->busy = 0;
+ return 1;
+ } else {
+ info->busy -= info->res->end - map_end;
+ return 0;
+ }
+ } else if (info->res->end >= nd_mapping->start
+ && info->res->end <= map_end) {
+ info->busy -= nd_mapping->start - info->res->start;
+ return 0;
+ } else {
+ info->busy -= nd_mapping->size;
+ return 0;
+ }
+}
+
/**
* nd_blk_available_dpa - account the unused dpa of BLK region
* @nd_mapping: container of dpa-resource-root + labels
*
- * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges.
+ * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
+ * we arrange for them to never start at an lower dpa than the last
+ * PMEM allocation in an aliased region.
*/
-resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping)
+resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- resource_size_t map_end, busy = 0, available;
+ struct blk_alloc_info info = {
+ .nd_mapping = nd_mapping,
+ .available = nd_mapping->size,
+ .res = NULL,
+ };
struct resource *res;
if (!ndd)
return 0;
- map_end = nd_mapping->start + nd_mapping->size - 1;
- for_each_dpa_resource(ndd, res)
- if (res->start >= nd_mapping->start && res->start < map_end) {
- resource_size_t end = min(map_end, res->end);
+ device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
- busy += end - res->start + 1;
- } else if (res->end >= nd_mapping->start
- && res->end <= map_end) {
- busy += res->end - nd_mapping->start;
- } else if (nd_mapping->start > res->start
- && nd_mapping->start < res->end) {
- /* total eclipse of the BLK region mapping */
- busy += nd_mapping->size;
- }
+ /* now account for busy blk allocations in unaliased dpa */
+ for_each_dpa_resource(ndd, res) {
+ if (strncmp(res->name, "blk", 3) != 0)
+ continue;
- available = map_end - nd_mapping->start + 1;
- if (busy < available)
- return available - busy;
- return 0;
+ info.res = res;
+ info.busy = resource_size(res);
+ device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
+ info.available -= info.busy;
+ }
+
+ return info.available;
}
/**
@@ -443,21 +577,16 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
map_start = nd_mapping->start;
map_end = map_start + nd_mapping->size - 1;
blk_start = max(map_start, map_end + 1 - *overlap);
- for_each_dpa_resource(ndd, res)
+ for_each_dpa_resource(ndd, res) {
if (res->start >= map_start && res->start < map_end) {
if (strncmp(res->name, "blk", 3) == 0)
- blk_start = min(blk_start, res->start);
- else if (res->start != map_start) {
+ blk_start = min(blk_start,
+ max(map_start, res->start));
+ else if (res->end > map_end) {
reason = "misaligned to iset";
goto err;
- } else {
- if (busy) {
- reason = "duplicate overlapping PMEM reservations?";
- goto err;
- }
+ } else
busy += resource_size(res);
- continue;
- }
} else if (res->end >= map_start && res->end <= map_end) {
if (strncmp(res->name, "blk", 3) == 0) {
/*
@@ -466,15 +595,14 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
* be used for BLK.
*/
blk_start = map_start;
- } else {
- reason = "misaligned to iset";
- goto err;
- }
+ } else
+ busy += resource_size(res);
} else if (map_start > res->start && map_start < res->end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
blk_start = map_start;
}
+ }
*overlap = map_end + 1 - blk_start;
available = blk_start - map_start;
@@ -483,10 +611,6 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
return 0;
err:
- /*
- * Something is wrong, PMEM must align with the start of the
- * interleave set, and there can only be one allocation per set.
- */
nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
return 0;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 96526dcfdd37..fac7cabe8f56 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -494,11 +494,13 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos)
{
- u64 cookie = nd_region_interleave_set_cookie(nd_region), rawsize;
+ u64 cookie = nd_region_interleave_set_cookie(nd_region);
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_namespace_label *victim_label;
+ struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
+ struct nd_label_id label_id;
+ struct resource *res;
unsigned long *free;
u32 nslot, slot;
size_t offset;
@@ -507,6 +509,16 @@ static int __pmem_label_update(struct nd_region *nd_region,
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return -ENXIO;
+ nd_label_gen_id(&label_id, nspm->uuid, 0);
+ for_each_dpa_resource(ndd, res)
+ if (strcmp(res->name, label_id.id) == 0)
+ break;
+
+ if (!res) {
+ WARN_ON_ONCE(1);
+ return -ENXIO;
+ }
+
/* allocate and write the label to the staging (next) index */
slot = nd_label_alloc_slot(ndd);
if (slot == UINT_MAX)
@@ -522,11 +534,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
- rawsize = div_u64(resource_size(&nspm->nsio.res),
- nd_region->ndr_mappings);
- nd_label->rawsize = __cpu_to_le64(rawsize);
- nd_label->dpa = __cpu_to_le64(nd_mapping->start);
+ nd_label->rawsize = __cpu_to_le64(resource_size(res));
+ nd_label->dpa = __cpu_to_le64(res->start);
nd_label->slot = __cpu_to_le32(slot);
+ nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
/* update label */
offset = nd_label_offset(ndd, nd_label);
@@ -536,38 +547,43 @@ static int __pmem_label_update(struct nd_region *nd_region,
return rc;
/* Garbage collect the previous label */
- victim_label = nd_mapping->labels[0];
- if (victim_label) {
- slot = to_slot(ndd, victim_label);
- nd_label_free_slot(ndd, slot);
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ if (!label_ent->label)
+ continue;
+ if (memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) != 0)
+ continue;
+ victim = label_ent;
+ list_move_tail(&victim->list, &nd_mapping->labels);
+ break;
+ }
+ if (victim) {
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
+ slot = to_slot(ndd, victim->label);
+ nd_label_free_slot(ndd, slot);
+ victim->label = NULL;
}
/* update index */
rc = nd_label_write_index(ndd, ndd->ns_next,
nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
- if (rc < 0)
- return rc;
-
- nd_mapping->labels[0] = nd_label;
-
- return 0;
-}
-
-static void del_label(struct nd_mapping *nd_mapping, int l)
-{
- struct nd_namespace_label *next_label, *nd_label;
- struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- unsigned int slot;
- int j;
-
- nd_label = nd_mapping->labels[l];
- slot = to_slot(ndd, nd_label);
- dev_vdbg(ndd->dev, "%s: clear: %d\n", __func__, slot);
+ if (rc == 0) {
+ list_for_each_entry(label_ent, &nd_mapping->labels, list)
+ if (!label_ent->label) {
+ label_ent->label = nd_label;
+ nd_label = NULL;
+ break;
+ }
+ dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
+ "failed to track label: %d\n",
+ to_slot(ndd, nd_label));
+ if (nd_label)
+ rc = -ENXIO;
+ }
+ mutex_unlock(&nd_mapping->lock);
- for (j = l; (next_label = nd_mapping->labels[j + 1]); j++)
- nd_mapping->labels[j] = next_label;
- nd_mapping->labels[j] = NULL;
+ return rc;
}
static bool is_old_resource(struct resource *res, struct resource **list, int n)
@@ -607,14 +623,16 @@ static int __blk_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
int num_labels)
{
- int i, l, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
+ int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_label *nd_label;
+ struct nd_label_ent *label_ent, *e;
struct nd_namespace_index *nsindex;
unsigned long *free, *victim_map = NULL;
struct resource *res, **old_res_list;
struct nd_label_id label_id;
u8 uuid[NSLABEL_UUID_LEN];
+ LIST_HEAD(list);
u32 nslot, slot;
if (!preamble_next(ndd, &nsindex, &free, &nslot))
@@ -736,15 +754,22 @@ static int __blk_label_update(struct nd_region *nd_region,
* entries in nd_mapping->labels
*/
nlabel = 0;
- for_each_label(l, nd_label, nd_mapping->labels) {
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ nd_label = label_ent->label;
+ if (!nd_label)
+ continue;
nlabel++;
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
continue;
nlabel--;
- del_label(nd_mapping, l);
- l--; /* retry with the new label at this index */
+ list_move(&label_ent->list, &list);
+ label_ent->label = NULL;
}
+ list_splice_tail_init(&list, &nd_mapping->labels);
+ mutex_unlock(&nd_mapping->lock);
+
if (nlabel + nsblk->num_resources > num_labels) {
/*
* Bug, we can't end up with more resources than
@@ -755,6 +780,15 @@ static int __blk_label_update(struct nd_region *nd_region,
goto out;
}
+ mutex_lock(&nd_mapping->lock);
+ label_ent = list_first_entry_or_null(&nd_mapping->labels,
+ typeof(*label_ent), list);
+ if (!label_ent) {
+ WARN_ON(1);
+ mutex_unlock(&nd_mapping->lock);
+ rc = -ENXIO;
+ goto out;
+ }
for_each_clear_bit_le(slot, free, nslot) {
nd_label = nd_label_base(ndd) + slot;
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
@@ -762,11 +796,19 @@ static int __blk_label_update(struct nd_region *nd_region,
continue;
res = to_resource(ndd, nd_label);
res->flags &= ~DPA_RESOURCE_ADJUSTED;
- dev_vdbg(&nsblk->common.dev, "assign label[%d] slot: %d\n",
- l, slot);
- nd_mapping->labels[l++] = nd_label;
+ dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
+ list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
+ if (label_ent->label)
+ continue;
+ label_ent->label = nd_label;
+ nd_label = NULL;
+ break;
+ }
+ if (nd_label)
+ dev_WARN(&nsblk->common.dev,
+ "failed to track label slot%d\n", slot);
}
- nd_mapping->labels[l] = NULL;
+ mutex_unlock(&nd_mapping->lock);
out:
kfree(old_res_list);
@@ -788,32 +830,28 @@ static int __blk_label_update(struct nd_region *nd_region,
static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
{
- int i, l, old_num_labels = 0;
+ int i, old_num_labels = 0;
+ struct nd_label_ent *label_ent;
struct nd_namespace_index *nsindex;
- struct nd_namespace_label *nd_label;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- size_t size = (num_labels + 1) * sizeof(struct nd_namespace_label *);
- for_each_label(l, nd_label, nd_mapping->labels)
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list)
old_num_labels++;
+ mutex_unlock(&nd_mapping->lock);
/*
* We need to preserve all the old labels for the mapping so
* they can be garbage collected after writing the new labels.
*/
- if (num_labels > old_num_labels) {
- struct nd_namespace_label **labels;
-
- labels = krealloc(nd_mapping->labels, size, GFP_KERNEL);
- if (!labels)
+ for (i = old_num_labels; i < num_labels; i++) {
+ label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
+ if (!label_ent)
return -ENOMEM;
- nd_mapping->labels = labels;
+ mutex_lock(&nd_mapping->lock);
+ list_add_tail(&label_ent->list, &nd_mapping->labels);
+ mutex_unlock(&nd_mapping->lock);
}
- if (!nd_mapping->labels)
- return -ENOMEM;
-
- for (i = old_num_labels; i <= num_labels; i++)
- nd_mapping->labels[i] = NULL;
if (ndd->ns_current == -1 || ndd->ns_next == -1)
/* pass */;
@@ -837,42 +875,45 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_namespace_label *nd_label;
+ struct nd_label_ent *label_ent, *e;
struct nd_namespace_index *nsindex;
u8 label_uuid[NSLABEL_UUID_LEN];
- int l, num_freed = 0;
unsigned long *free;
+ LIST_HEAD(list);
u32 nslot, slot;
+ int active = 0;
if (!uuid)
return 0;
/* no index || no labels == nothing to delete */
- if (!preamble_next(ndd, &nsindex, &free, &nslot)
- || !nd_mapping->labels)
+ if (!preamble_next(ndd, &nsindex, &free, &nslot))
return 0;
- for_each_label(l, nd_label, nd_mapping->labels) {
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+
+ if (!nd_label)
+ continue;
+ active++;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
+ active--;
slot = to_slot(ndd, nd_label);
nd_label_free_slot(ndd, slot);
dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
- del_label(nd_mapping, l);
- num_freed++;
- l--; /* retry with new label at this index */
+ list_move_tail(&label_ent->list, &list);
+ label_ent->label = NULL;
}
+ list_splice_tail_init(&list, &nd_mapping->labels);
- if (num_freed > l) {
- /*
- * num_freed will only ever be > l when we delete the last
- * label
- */
- kfree(nd_mapping->labels);
- nd_mapping->labels = NULL;
- dev_dbg(ndd->dev, "%s: no more labels\n", __func__);
+ if (active == 0) {
+ nd_mapping_free_labels(nd_mapping);
+ dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
}
+ mutex_unlock(&nd_mapping->lock);
return nd_label_write_index(ndd, ndd->ns_next,
nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
@@ -885,7 +926,9 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- int rc;
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct resource *res;
+ int rc, count = 0;
if (size == 0) {
rc = del_labels(nd_mapping, nspm->uuid);
@@ -894,7 +937,12 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
continue;
}
- rc = init_labels(nd_mapping, 1);
+ for_each_dpa_resource(ndd, res)
+ if (strncmp(res->name, "pmem", 3) == 0)
+ count++;
+ WARN_ON_ONCE(!count);
+
+ rc = init_labels(nd_mapping, count);
if (rc < 0)
return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index c5e3196c45b0..abe5c6bc756c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -12,8 +12,10 @@
*/
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/pmem.h>
+#include <linux/list.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
@@ -28,7 +30,10 @@ static void namespace_io_release(struct device *dev)
static void namespace_pmem_release(struct device *dev)
{
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ if (nspm->id >= 0)
+ ida_simple_remove(&nd_region->ns_ida, nspm->id);
kfree(nspm->alt_name);
kfree(nspm->uuid);
kfree(nspm);
@@ -62,17 +67,17 @@ static struct device_type namespace_blk_device_type = {
.release = namespace_blk_release,
};
-static bool is_namespace_pmem(struct device *dev)
+static bool is_namespace_pmem(const struct device *dev)
{
return dev ? dev->type == &namespace_pmem_device_type : false;
}
-static bool is_namespace_blk(struct device *dev)
+static bool is_namespace_blk(const struct device *dev)
{
return dev ? dev->type == &namespace_blk_device_type : false;
}
-static bool is_namespace_io(struct device *dev)
+static bool is_namespace_io(const struct device *dev)
{
return dev ? dev->type == &namespace_io_device_type : false;
}
@@ -168,7 +173,21 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
suffix = "s";
if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
- sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
+ int nsidx = 0;
+
+ if (is_namespace_pmem(&ndns->dev)) {
+ struct nd_namespace_pmem *nspm;
+
+ nspm = to_nd_namespace_pmem(&ndns->dev);
+ nsidx = nspm->id;
+ }
+
+ if (nsidx)
+ sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
+ suffix ? suffix : "");
+ else
+ sprintf(name, "pmem%d%s", nd_region->id,
+ suffix ? suffix : "");
} else if (is_namespace_blk(&ndns->dev)) {
struct nd_namespace_blk *nsblk;
@@ -294,7 +313,7 @@ static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
if (strcmp(res->name, label_id.id) != 0)
continue;
/*
- * Resources with unacknoweldged adjustments indicate a
+ * Resources with unacknowledged adjustments indicate a
* failure to update labels
*/
if (res->flags & DPA_RESOURCE_ADJUSTED)
@@ -510,19 +529,68 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
return rc ? n : 0;
}
-static bool space_valid(bool is_pmem, bool is_reserve,
- struct nd_label_id *label_id, struct resource *res)
+
+/**
+ * space_valid() - validate free dpa space against constraints
+ * @nd_region: hosting region of the free space
+ * @ndd: dimm device data for debug
+ * @label_id: namespace id to allocate space
+ * @prev: potential allocation that precedes free space
+ * @next: allocation that follows the given free space range
+ * @exist: first allocation with same id in the mapping
+ * @n: range that must satisfied for pmem allocations
+ * @valid: free space range to validate
+ *
+ * BLK-space is valid as long as it does not precede a PMEM
+ * allocation in a given region. PMEM-space must be contiguous
+ * and adjacent to an existing existing allocation (if one
+ * exists). If reserving PMEM any space is valid.
+ */
+static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
+ struct nd_label_id *label_id, struct resource *prev,
+ struct resource *next, struct resource *exist,
+ resource_size_t n, struct resource *valid)
{
- /*
- * For BLK-space any space is valid, for PMEM-space, it must be
- * contiguous with an existing allocation unless we are
- * reserving pmem.
- */
- if (is_reserve || !is_pmem)
- return true;
- if (!res || strcmp(res->name, label_id->id) == 0)
- return true;
- return false;
+ bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
+ bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
+
+ if (valid->start >= valid->end)
+ goto invalid;
+
+ if (is_reserve)
+ return;
+
+ if (!is_pmem) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_bus *nvdimm_bus;
+ struct blk_alloc_info info = {
+ .nd_mapping = nd_mapping,
+ .available = nd_mapping->size,
+ .res = valid,
+ };
+
+ WARN_ON(!is_nd_blk(&nd_region->dev));
+ nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+ device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
+ return;
+ }
+
+ /* allocation needs to be contiguous, so this is all or nothing */
+ if (resource_size(valid) < n)
+ goto invalid;
+
+ /* we've got all the space we need and no existing allocation */
+ if (!exist)
+ return;
+
+ /* allocation needs to be contiguous with the existing namespace */
+ if (valid->start == exist->end + 1
+ || valid->end == exist->start - 1)
+ return;
+
+ invalid:
+ /* truncate @valid size to 0 */
+ valid->end = valid->start - 1;
}
enum alloc_loc {
@@ -534,18 +602,24 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
resource_size_t n)
{
resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
- bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct resource *res, *exist = NULL, valid;
const resource_size_t to_allocate = n;
- struct resource *res;
int first;
+ for_each_dpa_resource(ndd, res)
+ if (strcmp(label_id->id, res->name) == 0)
+ exist = res;
+
+ valid.start = nd_mapping->start;
+ valid.end = mapping_end;
+ valid.name = "free space";
retry:
first = 0;
for_each_dpa_resource(ndd, res) {
- resource_size_t allocate, available = 0, free_start, free_end;
struct resource *next = res->sibling, *new_res = NULL;
+ resource_size_t allocate, available = 0;
enum alloc_loc loc = ALLOC_ERR;
const char *action;
int rc = 0;
@@ -558,32 +632,35 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
/* space at the beginning of the mapping */
if (!first++ && res->start > nd_mapping->start) {
- free_start = nd_mapping->start;
- available = res->start - free_start;
- if (space_valid(is_pmem, is_reserve, label_id, NULL))
+ valid.start = nd_mapping->start;
+ valid.end = res->start - 1;
+ space_valid(nd_region, ndd, label_id, NULL, next, exist,
+ to_allocate, &valid);
+ available = resource_size(&valid);
+ if (available)
loc = ALLOC_BEFORE;
}
/* space between allocations */
if (!loc && next) {
- free_start = res->start + resource_size(res);
- free_end = min(mapping_end, next->start - 1);
- if (space_valid(is_pmem, is_reserve, label_id, res)
- && free_start < free_end) {
- available = free_end + 1 - free_start;
+ valid.start = res->start + resource_size(res);
+ valid.end = min(mapping_end, next->start - 1);
+ space_valid(nd_region, ndd, label_id, res, next, exist,
+ to_allocate, &valid);
+ available = resource_size(&valid);
+ if (available)
loc = ALLOC_MID;
- }
}
/* space at the end of the mapping */
if (!loc && !next) {
- free_start = res->start + resource_size(res);
- free_end = mapping_end;
- if (space_valid(is_pmem, is_reserve, label_id, res)
- && free_start < free_end) {
- available = free_end + 1 - free_start;
+ valid.start = res->start + resource_size(res);
+ valid.end = mapping_end;
+ space_valid(nd_region, ndd, label_id, res, next, exist,
+ to_allocate, &valid);
+ available = resource_size(&valid);
+ if (available)
loc = ALLOC_AFTER;
- }
}
if (!loc || !available)
@@ -593,8 +670,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
case ALLOC_BEFORE:
if (strcmp(res->name, label_id->id) == 0) {
/* adjust current resource up */
- if (is_pmem && !is_reserve)
- return n;
rc = adjust_resource(res, res->start - allocate,
resource_size(res) + allocate);
action = "cur grow up";
@@ -604,8 +679,6 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
case ALLOC_MID:
if (strcmp(next->name, label_id->id) == 0) {
/* adjust next resource up */
- if (is_pmem && !is_reserve)
- return n;
rc = adjust_resource(next, next->start
- allocate, resource_size(next)
+ allocate);
@@ -629,12 +702,10 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
if (strcmp(action, "allocate") == 0) {
/* BLK allocate bottom up */
if (!is_pmem)
- free_start += available - allocate;
- else if (!is_reserve && free_start != nd_mapping->start)
- return n;
+ valid.start += available - allocate;
new_res = nvdimm_allocate_dpa(ndd, label_id,
- free_start, allocate);
+ valid.start, allocate);
if (!new_res)
rc = -EBUSY;
} else if (strcmp(action, "grow down") == 0) {
@@ -832,13 +903,45 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
return 0;
}
-static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
+static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
struct nd_namespace_pmem *nspm, resource_size_t size)
{
struct resource *res = &nspm->nsio.res;
+ resource_size_t offset = 0;
- res->start = nd_region->ndr_start;
- res->end = nd_region->ndr_start + size - 1;
+ if (size && !nspm->uuid) {
+ WARN_ON_ONCE(1);
+ size = 0;
+ }
+
+ if (size && nspm->uuid) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_label_id label_id;
+ struct resource *res;
+
+ if (!ndd) {
+ size = 0;
+ goto out;
+ }
+
+ nd_label_gen_id(&label_id, nspm->uuid, 0);
+
+ /* calculate a spa offset from the dpa allocation offset */
+ for_each_dpa_resource(ndd, res)
+ if (strcmp(res->name, label_id.id) == 0) {
+ offset = (res->start - nd_mapping->start)
+ * nd_region->ndr_mappings;
+ goto out;
+ }
+
+ WARN_ON_ONCE(1);
+ size = 0;
+ }
+
+ out:
+ res->start = nd_region->ndr_start + offset;
+ res->end = res->start + size - 1;
}
static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
@@ -929,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
- nd_namespace_pmem_set_size(nd_region, nspm,
+ nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
} else if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
@@ -1031,22 +1134,27 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
-static ssize_t uuid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static u8 *namespace_to_uuid(struct device *dev)
{
- u8 *uuid;
-
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
- uuid = nspm->uuid;
+ return nspm->uuid;
} else if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
- uuid = nsblk->uuid;
+ return nsblk->uuid;
} else
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
+}
+static ssize_t uuid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 *uuid = namespace_to_uuid(dev);
+
+ if (IS_ERR(uuid))
+ return PTR_ERR(uuid);
if (uuid)
return sprintf(buf, "%pUb\n", uuid);
return sprintf(buf, "\n");
@@ -1089,7 +1197,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
*
* FIXME: can we delete uuid with zero dpa allocated?
*/
- if (nd_mapping->labels)
+ if (list_empty(&nd_mapping->labels))
return -EBUSY;
}
@@ -1491,14 +1599,19 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nd_namespace_label *nd_label;
+ struct nd_label_ent *label_ent;
bool found_uuid = false;
- int l;
- for_each_label(l, nd_label, nd_mapping->labels) {
- u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
- u16 position = __le16_to_cpu(nd_label->position);
- u16 nlabel = __le16_to_cpu(nd_label->nlabel);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+ u16 position, nlabel;
+ u64 isetcookie;
+
+ if (!nd_label)
+ continue;
+ isetcookie = __le64_to_cpu(nd_label->isetcookie);
+ position = __le16_to_cpu(nd_label->position);
+ nlabel = __le16_to_cpu(nd_label->nlabel);
if (isetcookie != cookie)
continue;
@@ -1528,7 +1641,6 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
{
- struct nd_namespace_label *select = NULL;
int i;
if (!pmem_id)
@@ -1536,90 +1648,106 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nd_namespace_label *nd_label;
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_label *nd_label = NULL;
u64 hw_start, hw_end, pmem_start, pmem_end;
- int l;
+ struct nd_label_ent *label_ent;
- for_each_label(l, nd_label, nd_mapping->labels)
+ WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ nd_label = label_ent->label;
+ if (!nd_label)
+ continue;
if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
break;
+ nd_label = NULL;
+ }
if (!nd_label) {
WARN_ON(1);
return -EINVAL;
}
- select = nd_label;
/*
* Check that this label is compliant with the dpa
* range published in NFIT
*/
hw_start = nd_mapping->start;
hw_end = hw_start + nd_mapping->size;
- pmem_start = __le64_to_cpu(select->dpa);
- pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
- if (pmem_start == hw_start && pmem_end <= hw_end)
+ pmem_start = __le64_to_cpu(nd_label->dpa);
+ pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
+ if (pmem_start >= hw_start && pmem_start < hw_end
+ && pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
- else
+ else {
+ dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
+ dev_name(ndd->dev), nd_label->uuid);
return -EINVAL;
+ }
- nd_mapping->labels[0] = select;
- nd_mapping->labels[1] = NULL;
+ /* move recently validated label to the front of the list */
+ list_move(&label_ent->list, &nd_mapping->labels);
}
return 0;
}
/**
- * find_pmem_label_set - validate interleave set labelling, retrieve label0
+ * create_namespace_pmem - validate interleave set labelling, retrieve label0
* @nd_region: region with mappings to validate
+ * @nspm: target namespace to create
+ * @nd_label: target pmem namespace label to evaluate
*/
-static int find_pmem_label_set(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm)
+struct device *create_namespace_pmem(struct nd_region *nd_region,
+ struct nd_namespace_label *nd_label)
{
u64 cookie = nd_region_interleave_set_cookie(nd_region);
- struct nd_namespace_label *nd_label;
- u8 select_id[NSLABEL_UUID_LEN];
+ struct nd_label_ent *label_ent;
+ struct nd_namespace_pmem *nspm;
+ struct nd_mapping *nd_mapping;
resource_size_t size = 0;
- u8 *pmem_id = NULL;
- int rc = -ENODEV, l;
+ struct resource *res;
+ struct device *dev;
+ int rc = 0;
u16 i;
- if (cookie == 0)
- return -ENXIO;
+ if (cookie == 0) {
+ dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
+ return ERR_PTR(-ENXIO);
+ }
- /*
- * Find a complete set of labels by uuid. By definition we can start
- * with any mapping as the reference label
- */
- for_each_label(l, nd_label, nd_region->mapping[0].labels) {
- u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
+ if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
+ dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
+ nd_label->uuid);
+ return ERR_PTR(-EAGAIN);
+ }
- if (isetcookie != cookie)
- continue;
+ nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+ if (!nspm)
+ return ERR_PTR(-ENOMEM);
- for (i = 0; nd_region->ndr_mappings; i++)
- if (!has_uuid_at_pos(nd_region, nd_label->uuid,
- cookie, i))
- break;
- if (i < nd_region->ndr_mappings) {
- /*
- * Give up if we don't find an instance of a
- * uuid at each position (from 0 to
- * nd_region->ndr_mappings - 1), or if we find a
- * dimm with two instances of the same uuid.
- */
- rc = -EINVAL;
- goto err;
- } else if (pmem_id) {
- /*
- * If there is more than one valid uuid set, we
- * need userspace to clean this up.
- */
- rc = -EBUSY;
- goto err;
- }
- memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
- pmem_id = select_id;
+ nspm->id = -1;
+ dev = &nspm->nsio.common.dev;
+ dev->type = &namespace_pmem_device_type;
+ dev->parent = &nd_region->dev;
+ res = &nspm->nsio.res;
+ res->name = dev_name(&nd_region->dev);
+ res->flags = IORESOURCE_MEM;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++)
+ if (!has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
+ break;
+ if (i < nd_region->ndr_mappings) {
+ struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
+
+ /*
+ * Give up if we don't find an instance of a uuid at each
+ * position (from 0 to nd_region->ndr_mappings - 1), or if we
+ * find a dimm with two instances of the same uuid.
+ */
+ dev_err(&nd_region->dev, "%s missing label for %pUb\n",
+ dev_name(ndd->dev), nd_label->uuid);
+ rc = -EINVAL;
+ goto err;
}
/*
@@ -1630,14 +1758,23 @@ static int find_pmem_label_set(struct nd_region *nd_region,
* the dimm being enabled (i.e. nd_label_reserve_dpa()
* succeeded).
*/
- rc = select_pmem_id(nd_region, pmem_id);
+ rc = select_pmem_id(nd_region, nd_label->uuid);
if (rc)
goto err;
/* Calculate total size and populate namespace properties from label0 */
for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nd_namespace_label *label0 = nd_mapping->labels[0];
+ struct nd_namespace_label *label0;
+
+ nd_mapping = &nd_region->mapping[i];
+ label_ent = list_first_entry_or_null(&nd_mapping->labels,
+ typeof(*label_ent), list);
+ label0 = label_ent ? label_ent->label : 0;
+
+ if (!label0) {
+ WARN_ON(1);
+ continue;
+ }
size += __le64_to_cpu(label0->rawsize);
if (__le16_to_cpu(label0->position) != 0)
@@ -1654,10 +1791,11 @@ static int find_pmem_label_set(struct nd_region *nd_region,
goto err;
}
- nd_namespace_pmem_set_size(nd_region, nspm, size);
+ nd_namespace_pmem_set_resource(nd_region, nspm, size);
- return 0;
+ return dev;
err:
+ namespace_pmem_release(dev);
switch (rc) {
case -EINVAL:
dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
@@ -1670,55 +1808,7 @@ static int find_pmem_label_set(struct nd_region *nd_region,
__func__, rc);
break;
}
- return rc;
-}
-
-static struct device **create_namespace_pmem(struct nd_region *nd_region)
-{
- struct nd_namespace_pmem *nspm;
- struct device *dev, **devs;
- struct resource *res;
- int rc;
-
- nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
- if (!nspm)
- return NULL;
-
- dev = &nspm->nsio.common.dev;
- dev->type = &namespace_pmem_device_type;
- dev->parent = &nd_region->dev;
- res = &nspm->nsio.res;
- res->name = dev_name(&nd_region->dev);
- res->flags = IORESOURCE_MEM;
- rc = find_pmem_label_set(nd_region, nspm);
- if (rc == -ENODEV) {
- int i;
-
- /* Pass, try to permit namespace creation... */
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-
- kfree(nd_mapping->labels);
- nd_mapping->labels = NULL;
- }
-
- /* Publish a zero-sized namespace for userspace to configure. */
- nd_namespace_pmem_set_size(nd_region, nspm, 0);
-
- rc = 0;
- } else if (rc)
- goto err;
-
- devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
- if (!devs)
- goto err;
-
- devs[0] = dev;
- return devs;
-
- err:
- namespace_pmem_release(&nspm->nsio.common.dev);
- return NULL;
+ return ERR_PTR(rc);
}
struct resource *nsblk_add_resource(struct nd_region *nd_region,
@@ -1770,16 +1860,58 @@ static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
return &nsblk->common.dev;
}
-void nd_region_create_blk_seed(struct nd_region *nd_region)
+static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
+{
+ struct nd_namespace_pmem *nspm;
+ struct resource *res;
+ struct device *dev;
+
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
+
+ nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+ if (!nspm)
+ return NULL;
+
+ dev = &nspm->nsio.common.dev;
+ dev->type = &namespace_pmem_device_type;
+ dev->parent = &nd_region->dev;
+ res = &nspm->nsio.res;
+ res->name = dev_name(&nd_region->dev);
+ res->flags = IORESOURCE_MEM;
+
+ nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
+ if (nspm->id < 0) {
+ kfree(nspm);
+ return NULL;
+ }
+ dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
+ dev->parent = &nd_region->dev;
+ dev->groups = nd_namespace_attribute_groups;
+ nd_namespace_pmem_set_resource(nd_region, nspm, 0);
+
+ return dev;
+}
+
+void nd_region_create_ns_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
- nd_region->ns_seed = nd_namespace_blk_create(nd_region);
+
+ if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
+ return;
+
+ if (is_nd_blk(&nd_region->dev))
+ nd_region->ns_seed = nd_namespace_blk_create(nd_region);
+ else
+ nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
+
/*
* Seed creation failures are not fatal, provisioning is simply
* disabled until memory becomes available
*/
if (!nd_region->ns_seed)
- dev_err(&nd_region->dev, "failed to create blk namespace\n");
+ dev_err(&nd_region->dev, "failed to create %s namespace\n",
+ is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
else
nd_device_register(nd_region->ns_seed);
}
@@ -1820,43 +1952,137 @@ void nd_region_create_btt_seed(struct nd_region *nd_region)
dev_err(&nd_region->dev, "failed to create btt namespace\n");
}
-static struct device **create_namespace_blk(struct nd_region *nd_region)
+static int add_namespace_resource(struct nd_region *nd_region,
+ struct nd_namespace_label *nd_label, struct device **devs,
+ int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
- struct nd_namespace_label *nd_label;
- struct device *dev, **devs = NULL;
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u8 *uuid = namespace_to_uuid(devs[i]);
+ struct resource *res;
+
+ if (IS_ERR_OR_NULL(uuid)) {
+ WARN_ON(1);
+ continue;
+ }
+
+ if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
+ continue;
+ if (is_namespace_blk(devs[i])) {
+ res = nsblk_add_resource(nd_region, ndd,
+ to_nd_namespace_blk(devs[i]),
+ __le64_to_cpu(nd_label->dpa));
+ if (!res)
+ return -ENXIO;
+ nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
+ } else {
+ dev_err(&nd_region->dev,
+ "error: conflicting extents for uuid: %pUb\n",
+ nd_label->uuid);
+ return -ENXIO;
+ }
+ break;
+ }
+
+ return i;
+}
+
+struct device *create_namespace_blk(struct nd_region *nd_region,
+ struct nd_namespace_label *nd_label, int count)
+{
+
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_blk *nsblk;
- struct nvdimm_drvdata *ndd;
- int i, l, count = 0;
+ char *name[NSLABEL_NAME_LEN];
+ struct device *dev = NULL;
struct resource *res;
- if (nd_region->ndr_mappings == 0)
- return NULL;
+ nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
+ if (!nsblk)
+ return ERR_PTR(-ENOMEM);
+ dev = &nsblk->common.dev;
+ dev->type = &namespace_blk_device_type;
+ dev->parent = &nd_region->dev;
+ nsblk->id = -1;
+ nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
+ nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
+ GFP_KERNEL);
+ if (!nsblk->uuid)
+ goto blk_err;
+ memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
+ if (name[0])
+ nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
+ GFP_KERNEL);
+ res = nsblk_add_resource(nd_region, ndd, nsblk,
+ __le64_to_cpu(nd_label->dpa));
+ if (!res)
+ goto blk_err;
+ nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
+ return dev;
+ blk_err:
+ namespace_blk_release(dev);
+ return ERR_PTR(-ENXIO);
+}
+
+static int cmp_dpa(const void *a, const void *b)
+{
+ const struct device *dev_a = *(const struct device **) a;
+ const struct device *dev_b = *(const struct device **) b;
+ struct nd_namespace_blk *nsblk_a, *nsblk_b;
+ struct nd_namespace_pmem *nspm_a, *nspm_b;
- ndd = to_ndd(nd_mapping);
- for_each_label(l, nd_label, nd_mapping->labels) {
- u32 flags = __le32_to_cpu(nd_label->flags);
- char *name[NSLABEL_NAME_LEN];
+ if (is_namespace_io(dev_a))
+ return 0;
+
+ if (is_namespace_blk(dev_a)) {
+ nsblk_a = to_nd_namespace_blk(dev_a);
+ nsblk_b = to_nd_namespace_blk(dev_b);
+
+ return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
+ sizeof(resource_size_t));
+ }
+
+ nspm_a = to_nd_namespace_pmem(dev_a);
+ nspm_b = to_nd_namespace_pmem(dev_b);
+
+ return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
+ sizeof(resource_size_t));
+}
+
+static struct device **scan_labels(struct nd_region *nd_region)
+{
+ int i, count = 0;
+ struct device *dev, **devs = NULL;
+ struct nd_label_ent *label_ent, *e;
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
+
+ /* "safe" because create_namespace_pmem() might list_move() label_ent */
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
struct device **__devs;
+ u32 flags;
- if (flags & NSLABEL_FLAG_LOCAL)
- /* pass */;
+ if (!nd_label)
+ continue;
+ flags = __le32_to_cpu(nd_label->flags);
+ if (is_nd_blk(&nd_region->dev)
+ == !!(flags & NSLABEL_FLAG_LOCAL))
+ /* pass, region matches label type */;
else
continue;
- for (i = 0; i < count; i++) {
- nsblk = to_nd_namespace_blk(devs[i]);
- if (memcmp(nsblk->uuid, nd_label->uuid,
- NSLABEL_UUID_LEN) == 0) {
- res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
- if (!res)
- goto err;
- nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
- dev_name(&nsblk->common.dev));
- break;
- }
- }
+ /* skip labels that describe extents outside of the region */
+ if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
+ continue;
+
+ i = add_namespace_resource(nd_region, nd_label, devs, count);
+ if (i < 0)
+ goto err;
if (i < count)
continue;
__devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
@@ -1866,67 +2092,128 @@ static struct device **create_namespace_blk(struct nd_region *nd_region)
kfree(devs);
devs = __devs;
- nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
- if (!nsblk)
- goto err;
- dev = &nsblk->common.dev;
- dev->type = &namespace_blk_device_type;
- dev->parent = &nd_region->dev;
- dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
- devs[count++] = dev;
- nsblk->id = -1;
- nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
- nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
- GFP_KERNEL);
- if (!nsblk->uuid)
- goto err;
- memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
- if (name[0])
- nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
- GFP_KERNEL);
- res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
- if (!res)
- goto err;
- nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
- dev_name(&nsblk->common.dev));
+ if (is_nd_blk(&nd_region->dev)) {
+ dev = create_namespace_blk(nd_region, nd_label, count);
+ if (IS_ERR(dev))
+ goto err;
+ devs[count++] = dev;
+ } else {
+ dev = create_namespace_pmem(nd_region, nd_label);
+ if (IS_ERR(dev)) {
+ switch (PTR_ERR(dev)) {
+ case -EAGAIN:
+ /* skip invalid labels */
+ continue;
+ case -ENODEV:
+ /* fallthrough to seed creation */
+ break;
+ default:
+ goto err;
+ }
+ } else
+ devs[count++] = dev;
+ }
}
- dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
- __func__, count, count == 1 ? "" : "s");
+ dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
+ __func__, count, is_nd_blk(&nd_region->dev)
+ ? "blk" : "pmem", count == 1 ? "" : "s");
if (count == 0) {
/* Publish a zero-sized namespace for userspace to configure. */
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-
- kfree(nd_mapping->labels);
- nd_mapping->labels = NULL;
- }
+ nd_mapping_free_labels(nd_mapping);
devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
if (!devs)
goto err;
- nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
- if (!nsblk)
- goto err;
- dev = &nsblk->common.dev;
- dev->type = &namespace_blk_device_type;
+ if (is_nd_blk(&nd_region->dev)) {
+ struct nd_namespace_blk *nsblk;
+
+ nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
+ if (!nsblk)
+ goto err;
+ dev = &nsblk->common.dev;
+ dev->type = &namespace_blk_device_type;
+ } else {
+ struct nd_namespace_pmem *nspm;
+
+ nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
+ if (!nspm)
+ goto err;
+ dev = &nspm->nsio.common.dev;
+ dev->type = &namespace_pmem_device_type;
+ nd_namespace_pmem_set_resource(nd_region, nspm, 0);
+ }
dev->parent = &nd_region->dev;
devs[count++] = dev;
+ } else if (is_nd_pmem(&nd_region->dev)) {
+ /* clean unselected labels */
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct list_head *l, *e;
+ LIST_HEAD(list);
+ int j;
+
+ nd_mapping = &nd_region->mapping[i];
+ if (list_empty(&nd_mapping->labels)) {
+ WARN_ON(1);
+ continue;
+ }
+
+ j = count;
+ list_for_each_safe(l, e, &nd_mapping->labels) {
+ if (!j--)
+ break;
+ list_move_tail(l, &list);
+ }
+ nd_mapping_free_labels(nd_mapping);
+ list_splice_init(&list, &nd_mapping->labels);
+ }
}
+ if (count > 1)
+ sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
+
return devs;
-err:
- for (i = 0; i < count; i++) {
- nsblk = to_nd_namespace_blk(devs[i]);
- namespace_blk_release(&nsblk->common.dev);
+ err:
+ if (devs) {
+ for (i = 0; devs[i]; i++)
+ if (is_nd_blk(&nd_region->dev))
+ namespace_blk_release(devs[i]);
+ else
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
}
- kfree(devs);
return NULL;
}
+static struct device **create_namespaces(struct nd_region *nd_region)
+{
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct device **devs;
+ int i;
+
+ if (nd_region->ndr_mappings == 0)
+ return NULL;
+
+ /* lock down all mappings while we scan labels */
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ nd_mapping = &nd_region->mapping[i];
+ mutex_lock_nested(&nd_mapping->lock, i);
+ }
+
+ devs = scan_labels(nd_region);
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ int reverse = nd_region->ndr_mappings - 1 - i;
+
+ nd_mapping = &nd_region->mapping[reverse];
+ mutex_unlock(&nd_mapping->lock);
+ }
+
+ return devs;
+}
+
static int init_active_labels(struct nd_region *nd_region)
{
int i;
@@ -1935,6 +2222,7 @@ static int init_active_labels(struct nd_region *nd_region)
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ struct nd_label_ent *label_ent;
int count, j;
/*
@@ -1956,16 +2244,27 @@ static int init_active_labels(struct nd_region *nd_region)
dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
if (!count)
continue;
- nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
- GFP_KERNEL);
- if (!nd_mapping->labels)
- return -ENOMEM;
for (j = 0; j < count; j++) {
struct nd_namespace_label *label;
+ label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
+ if (!label_ent)
+ break;
label = nd_label_active(ndd, j);
- nd_mapping->labels[j] = label;
+ label_ent->label = label;
+
+ mutex_lock(&nd_mapping->lock);
+ list_add_tail(&label_ent->list, &nd_mapping->labels);
+ mutex_unlock(&nd_mapping->lock);
}
+
+ if (j >= count)
+ continue;
+
+ mutex_lock(&nd_mapping->lock);
+ nd_mapping_free_labels(nd_mapping);
+ mutex_unlock(&nd_mapping->lock);
+ return -ENOMEM;
}
return 0;
@@ -1990,10 +2289,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
devs = create_namespace_io(nd_region);
break;
case ND_DEVICE_NAMESPACE_PMEM:
- devs = create_namespace_pmem(nd_region);
- break;
case ND_DEVICE_NAMESPACE_BLK:
- devs = create_namespace_blk(nd_region);
+ devs = create_namespaces(nd_region);
break;
default:
break;
@@ -2014,6 +2311,13 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
GFP_KERNEL);
nsblk->id = id;
+ } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
+ struct nd_namespace_pmem *nspm;
+
+ nspm = to_nd_namespace_pmem(dev);
+ id = ida_simple_get(&nd_region->ns_ida, 0, 0,
+ GFP_KERNEL);
+ nspm->id = id;
} else
id = i;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 38ce6bbbc170..8623e57c2ce3 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -44,6 +44,23 @@ struct nvdimm {
struct resource *flush_wpq;
};
+/**
+ * struct blk_alloc_info - tracking info for BLK dpa scanning
+ * @nd_mapping: blk region mapping boundaries
+ * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
+ * @busy: decremented in blk_dpa_busy to account for ranges already
+ * handled by alias_dpa_busy
+ * @res: alias_dpa_busy interprets this a free space range that needs to
+ * be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
+ * treats it as a busy range that needs the aliased PMEM ranges
+ * truncated.
+ */
+struct blk_alloc_info {
+ struct nd_mapping *nd_mapping;
+ resource_size_t available, busy;
+ struct resource *res;
+};
+
bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev);
bool is_nd_blk(struct device *dev);
@@ -54,7 +71,7 @@ void nvdimm_devs_exit(void);
void nd_region_devs_exit(void);
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
struct nd_region;
-void nd_region_create_blk_seed(struct nd_region *nd_region);
+void nd_region_create_ns_seed(struct nd_region *nd_region);
void nd_region_create_btt_seed(struct nd_region *nd_region);
void nd_region_create_pfn_seed(struct nd_region *nd_region);
void nd_region_create_dax_seed(struct nd_region *nd_region);
@@ -73,13 +90,14 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
struct nd_region;
struct nvdimm_drvdata;
struct nd_mapping;
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap);
-resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping);
+resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
-struct nd_mapping;
+int alias_dpa_busy(struct device *dev, void *data);
struct resource *nsblk_add_resource(struct nd_region *nd_region,
struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
resource_size_t start);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 0b78a8211f4a..d3b2fca8deec 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -101,9 +101,6 @@ static inline struct nd_namespace_index *to_next_namespace_index(
(unsigned long long) (res ? resource_size(res) : 0), \
(unsigned long long) (res ? res->start : 0), ##arg)
-#define for_each_label(l, label, labels) \
- for (l = 0; (label = labels ? labels[l] : NULL); l++)
-
#define for_each_dpa_resource(ndd, res) \
for (res = (ndd)->dpa.child; res; res = res->sibling)
@@ -116,6 +113,31 @@ struct nd_percpu_lane {
spinlock_t lock;
};
+struct nd_label_ent {
+ struct list_head list;
+ struct nd_namespace_label *label;
+};
+
+enum nd_mapping_lock_class {
+ ND_MAPPING_CLASS0,
+ ND_MAPPING_UUID_SCAN,
+};
+
+struct nd_mapping {
+ struct nvdimm *nvdimm;
+ u64 start;
+ u64 size;
+ struct list_head labels;
+ struct mutex lock;
+ /*
+ * @ndd is for private use at region enable / disable time for
+ * get_ndd() + put_ndd(), all other nd_mapping to ndd
+ * conversions use to_ndd() which respects enabled state of the
+ * nvdimm.
+ */
+ struct nvdimm_drvdata *ndd;
+};
+
struct nd_region {
struct device dev;
struct ida ns_ida;
@@ -209,6 +231,7 @@ void nvdimm_exit(void);
void nd_region_exit(void);
struct nvdimm;
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
+int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 571a6c7ee2fc..24618431a14b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
struct device *dev = to_dev(pmem);
@@ -62,8 +62,32 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
__func__, (unsigned long long) sector,
cleared / 512, cleared / 512 > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared / 512);
+ } else {
+ return -EIO;
}
+
invalidate_pmem(pmem->virt_addr + offset, len);
+ return 0;
+}
+
+static void write_pmem(void *pmem_addr, struct page *page,
+ unsigned int off, unsigned int len)
+{
+ void *mem = kmap_atomic(page);
+
+ memcpy_to_pmem(pmem_addr, mem + off, len);
+ kunmap_atomic(mem);
+}
+
+static int read_pmem(struct page *page, unsigned int off,
+ void *pmem_addr, unsigned int len)
+{
+ int rc;
+ void *mem = kmap_atomic(page);
+
+ rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+ kunmap_atomic(mem);
+ return rc;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
@@ -72,7 +96,6 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
{
int rc = 0;
bool bad_pmem = false;
- void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void *pmem_addr = pmem->virt_addr + pmem_off;
@@ -83,7 +106,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(bad_pmem))
rc = -EIO;
else {
- rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+ rc = read_pmem(page, off, pmem_addr, len);
flush_dcache_page(page);
}
} else {
@@ -102,14 +125,13 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
* after clear poison.
*/
flush_dcache_page(page);
- memcpy_to_pmem(pmem_addr, mem + off, len);
+ write_pmem(pmem_addr, page, off, len);
if (unlikely(bad_pmem)) {
- pmem_clear_poison(pmem, pmem_off, len);
- memcpy_to_pmem(pmem_addr, mem + off, len);
+ rc = pmem_clear_poison(pmem, pmem_off, len);
+ write_pmem(pmem_addr, page, off, len);
}
}
- kunmap_atomic(mem);
return rc;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 4c0ac4abb629..6af5e629140c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -70,7 +70,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
int nd_region_activate(struct nd_region *nd_region)
{
- int i, num_flush = 0;
+ int i, j, num_flush = 0;
struct nd_region_data *ndrd;
struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
@@ -107,6 +107,21 @@ int nd_region_activate(struct nd_region *nd_region)
return rc;
}
+ /*
+ * Clear out entries that are duplicates. This should prevent the
+ * extra flushings.
+ */
+ for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
+ /* ignore if NULL already */
+ if (!ndrd_get_flush_wpq(ndrd, i, 0))
+ continue;
+
+ for (j = i + 1; j < nd_region->ndr_mappings; j++)
+ if (ndrd_get_flush_wpq(ndrd, i, 0) ==
+ ndrd_get_flush_wpq(ndrd, j, 0))
+ ndrd_set_flush_wpq(ndrd, j, 0, NULL);
+ }
+
return 0;
}
@@ -298,9 +313,8 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
blk_max_overlap = overlap;
goto retry;
}
- } else if (is_nd_blk(&nd_region->dev)) {
- available += nd_blk_available_dpa(nd_mapping);
- }
+ } else if (is_nd_blk(&nd_region->dev))
+ available += nd_blk_available_dpa(nd_region);
}
return available;
@@ -491,6 +505,17 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
return 0;
}
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+{
+ struct nd_label_ent *label_ent, *e;
+
+ WARN_ON(!mutex_is_locked(&nd_mapping->lock));
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ list_del(&label_ent->list);
+ kfree(label_ent);
+ }
+}
+
/*
* Upon successful probe/remove, take/release a reference on the
* associated interleave set (if present), and plant new btt + namespace
@@ -511,8 +536,10 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
struct nvdimm_drvdata *ndd = nd_mapping->ndd;
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- kfree(nd_mapping->labels);
- nd_mapping->labels = NULL;
+ mutex_lock(&nd_mapping->lock);
+ nd_mapping_free_labels(nd_mapping);
+ mutex_unlock(&nd_mapping->lock);
+
put_ndd(ndd);
nd_mapping->ndd = NULL;
if (ndd)
@@ -522,11 +549,12 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
if (is_nd_pmem(dev))
return;
}
- if (dev->parent && is_nd_blk(dev->parent) && probe) {
+ if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
+ && probe) {
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev)
- nd_region_create_blk_seed(nd_region);
+ nd_region_create_ns_seed(nd_region);
nvdimm_bus_unlock(dev);
}
if (is_nd_btt(dev) && probe) {
@@ -536,23 +564,30 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nvdimm_bus_lock(dev);
if (nd_region->btt_seed == dev)
nd_region_create_btt_seed(nd_region);
- if (nd_region->ns_seed == &nd_btt->ndns->dev &&
- is_nd_blk(dev->parent))
- nd_region_create_blk_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
nvdimm_bus_unlock(dev);
}
if (is_nd_pfn(dev) && probe) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->pfn_seed == dev)
nd_region_create_pfn_seed(nd_region);
+ if (nd_region->ns_seed == &nd_pfn->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
nvdimm_bus_unlock(dev);
}
if (is_nd_dax(dev) && probe) {
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->dax_seed == dev)
nd_region_create_dax_seed(nd_region);
+ if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
+ nd_region_create_ns_seed(nd_region);
nvdimm_bus_unlock(dev);
}
}
@@ -759,10 +794,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
int ro = 0;
for (i = 0; i < ndr_desc->num_mappings; i++) {
- struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+ struct nvdimm *nvdimm = mapping->nvdimm;
- if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
+ if ((mapping->start | mapping->size) % SZ_4K) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
caller, dev_name(&nvdimm->dev), i);
@@ -813,11 +848,15 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
ndl->count = 0;
}
- memcpy(nd_region->mapping, ndr_desc->nd_mapping,
- sizeof(struct nd_mapping) * ndr_desc->num_mappings);
for (i = 0; i < ndr_desc->num_mappings; i++) {
- struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
+ struct nvdimm *nvdimm = mapping->nvdimm;
+
+ nd_region->mapping[i].nvdimm = nvdimm;
+ nd_region->mapping[i].start = mapping->start;
+ nd_region->mapping[i].size = mapping->size;
+ INIT_LIST_HEAD(&nd_region->mapping[i].labels);
+ mutex_init(&nd_region->mapping[i].lock);
get_device(&nvdimm->dev);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2feacc70bf61..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -156,12 +156,14 @@ static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
- if (ns->type == NVME_NS_LIGHTNVM)
- nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+ if (ns->ndev)
+ nvme_nvm_unregister(ns);
- spin_lock(&dev_list_lock);
- ns->disk->private_data = NULL;
- spin_unlock(&dev_list_lock);
+ if (ns->disk) {
+ spin_lock(&dev_list_lock);
+ ns->disk->private_data = NULL;
+ spin_unlock(&dev_list_lock);
+ }
put_disk(ns->disk);
ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
@@ -552,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(1);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
@@ -570,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(2);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}
@@ -597,7 +599,7 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
}
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result)
+ void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
@@ -606,10 +608,9 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
@@ -617,7 +618,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
}
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result)
+ void *buffer, size_t buflen, u32 *result)
{
struct nvme_command c;
struct nvme_completion cqe;
@@ -625,12 +626,11 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+ buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
@@ -664,7 +664,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
u32 result;
int status, nr_io_queues;
- status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
if (status < 0)
return status;
@@ -888,42 +888,32 @@ static void nvme_config_discard(struct nvme_ns *ns)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
-static int nvme_revalidate_disk(struct gendisk *disk)
+static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
{
- struct nvme_ns *ns = disk->private_data;
- struct nvme_id_ns *id;
- u8 lbaf, pi_type;
- u16 old_ms;
- unsigned short bs;
-
- if (test_bit(NVME_NS_DEAD, &ns->flags)) {
- set_capacity(disk, 0);
- return -ENODEV;
- }
- if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
- dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
- __func__);
+ if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
+ dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
return -ENODEV;
}
- if (id->ncap == 0) {
- kfree(id);
+
+ if ((*id)->ncap == 0) {
+ kfree(*id);
return -ENODEV;
}
- if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
- if (nvme_nvm_register(ns->queue, disk->disk_name)) {
- dev_warn(disk_to_dev(ns->disk),
- "%s: LightNVM init failure\n", __func__);
- kfree(id);
- return -ENODEV;
- }
- ns->type = NVME_NS_LIGHTNVM;
- }
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
+ memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
+ memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+
+ return 0;
+}
- if (ns->ctrl->vs >= NVME_VS(1, 1))
- memcpy(ns->eui, id->eui64, sizeof(ns->eui));
- if (ns->ctrl->vs >= NVME_VS(1, 2))
- memcpy(ns->uuid, id->nguid, sizeof(ns->uuid));
+static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+{
+ struct nvme_ns *ns = disk->private_data;
+ u8 lbaf, pi_type;
+ u16 old_ms;
+ unsigned short bs;
old_ms = ns->ms;
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
@@ -962,8 +952,26 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue);
+}
+static int nvme_revalidate_disk(struct gendisk *disk)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_id_ns *id = NULL;
+ int ret;
+
+ if (test_bit(NVME_NS_DEAD, &ns->flags)) {
+ set_capacity(disk, 0);
+ return -ENODEV;
+ }
+
+ ret = nvme_revalidate_ns(ns, &id);
+ if (ret)
+ return ret;
+
+ __nvme_revalidate_disk(disk, id);
kfree(id);
+
return 0;
}
@@ -1078,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
if ((csts & NVME_CSTS_RDY) == bit)
break;
@@ -1232,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
- if (ctrl->vs >= NVME_VS(1, 1))
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(cap);
ret = nvme_identify_ctrl(ctrl, &id);
@@ -1425,7 +1435,7 @@ static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
struct nvme_ctrl *ctrl = ns->ctrl;
int serial_len = sizeof(ctrl->serial);
int model_len = sizeof(ctrl->model);
@@ -1449,7 +1459,7 @@ static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%pU\n", ns->uuid);
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
@@ -1457,7 +1467,7 @@ static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%8phd\n", ns->eui);
}
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
@@ -1465,7 +1475,7 @@ static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
return sprintf(buf, "%d\n", ns->ns_id);
}
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
@@ -1482,7 +1492,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
@@ -1642,6 +1652,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
struct gendisk *disk;
+ struct nvme_id_ns *id;
+ char disk_name[DISK_NAME_LEN];
int node = dev_to_node(ctrl->dev);
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
@@ -1659,34 +1671,49 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
- disk = alloc_disk_node(0, node);
- if (!disk)
- goto out_free_queue;
-
kref_init(&ns->kref);
ns->ns_id = nsid;
- ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
-
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- disk->fops = &nvme_fops;
- disk->private_data = ns;
- disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
- sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
+
+ if (nvme_revalidate_ns(ns, &id))
+ goto out_free_queue;
+
+ if (nvme_nvm_ns_supported(ns, id)) {
+ if (nvme_nvm_register(ns, disk_name, node,
+ &nvme_ns_attr_group)) {
+ dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
+ __func__);
+ goto out_free_id;
+ }
+ } else {
+ disk = alloc_disk_node(0, node);
+ if (!disk)
+ goto out_free_id;
- if (nvme_revalidate_disk(ns->disk))
- goto out_free_disk;
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->flags = GENHD_FL_EXT_DEVT;
+ memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+ ns->disk = disk;
+
+ __nvme_revalidate_disk(disk, id);
+ }
mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
mutex_unlock(&ctrl->namespaces_mutex);
kref_get(&ctrl->kref);
- if (ns->type == NVME_NS_LIGHTNVM)
+
+ kfree(id);
+
+ if (ns->ndev)
return;
device_add_disk(ctrl->device, ns->disk);
@@ -1695,8 +1722,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
return;
- out_free_disk:
- kfree(disk);
+ out_free_id:
+ kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
out_release_instance:
@@ -1710,7 +1737,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
- if (ns->disk->flags & GENHD_FL_UP) {
+ if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
@@ -1733,7 +1760,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (revalidate_disk(ns->disk))
+ if (ns->disk && revalidate_disk(ns->disk))
nvme_ns_remove(ns);
nvme_put_ns(ns);
} else
@@ -1815,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
return;
nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1) &&
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
@@ -1826,9 +1853,6 @@ static void nvme_scan_work(struct work_struct *work)
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
-
- if (ctrl->ops->post_scan)
- ctrl->ops->post_scan(ctrl);
}
void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -2038,7 +2062,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
*/
- if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4eff49174466..5a3f008d3480 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -111,8 +111,19 @@ static void nvmf_host_put(struct nvmf_host *host)
*/
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
{
- return snprintf(buf, size, "traddr=%s,trsvcid=%s\n",
- ctrl->opts->traddr, ctrl->opts->trsvcid);
+ int len = 0;
+
+ if (ctrl->opts->mask & NVMF_OPT_TRADDR)
+ len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
+ if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
+ len += snprintf(buf + len, size - len, "%strsvcid=%s",
+ (len) ? "," : "", ctrl->opts->trsvcid);
+ if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
+ len += snprintf(buf + len, size - len, "%shost_traddr=%s",
+ (len) ? "," : "", ctrl->opts->host_traddr);
+ len += snprintf(buf + len, size - len, "\n");
+
+ return len;
}
EXPORT_SYMBOL_GPL(nvmf_get_address);
@@ -519,6 +530,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
{ NVMF_OPT_KATO, "keep_alive_tmo=%d" },
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -675,6 +687,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->reconnect_delay = token;
break;
+ case NVMF_OPT_HOST_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->host_traddr = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -741,6 +761,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->traddr);
kfree(opts->trsvcid);
kfree(opts->subsysnqn);
+ kfree(opts->host_traddr);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46e460aee52d..924145c979f1 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -52,6 +52,7 @@ enum {
NVMF_OPT_KATO = 1 << 7,
NVMF_OPT_HOSTNQN = 1 << 8,
NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+ NVMF_OPT_HOST_TRADDR = 1 << 10,
};
/**
@@ -64,9 +65,12 @@ enum {
* being added.
* @subsysnqn: Hold the fully qualified NQN subystem name (format defined
* in the NVMe specification, "NVMe Qualified Names").
- * @traddr: network address that will be used by the host to communicate
- * to the added NVMe controller.
- * @trsvcid: network port used for host-controller communication.
+ * @traddr: The transport-specific TRADDR field for a port on the
+ * subsystem which is adding a controller.
+ * @trsvcid: The transport-specific TRSVCID field for a port on the
+ * subsystem which is adding a controller.
+ * @host_traddr: A transport-specific field identifying the NVME host port
+ * to use for the connection to the controller.
* @queue_size: Number of IO queue elements.
* @nr_io_queues: Number of controller IO queues that will be established.
* @reconnect_delay: Time between two consecutive reconnect attempts.
@@ -80,6 +84,7 @@ struct nvmf_ctrl_options {
char *subsysnqn;
char *traddr;
char *trsvcid;
+ char *host_traddr;
size_t queue_size;
unsigned int nr_io_queues;
unsigned int reconnect_delay;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 63f483daf930..5daf2f4be0cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -475,7 +475,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
+ rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, int error)
@@ -592,14 +592,37 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.max_phys_sect = 64,
};
-int nvme_nvm_register(struct request_queue *q, char *disk_name)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+ const struct attribute_group *attrs)
{
- return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
+ struct request_queue *q = ns->queue;
+ struct nvm_dev *dev;
+ int ret;
+
+ dev = nvm_alloc_dev(node);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->q = q;
+ memcpy(dev->name, disk_name, DISK_NAME_LEN);
+ dev->ops = &nvme_nvm_dev_ops;
+ dev->parent_dev = ns->ctrl->device;
+ dev->private_data = ns;
+ ns->ndev = dev;
+
+ ret = nvm_register(dev);
+
+ ns->lba_shift = ilog2(dev->sec_size);
+
+ if (sysfs_create_group(&dev->dev.kobj, attrs))
+ pr_warn("%s: failed to create sysfs group for identification\n",
+ disk_name);
+ return ret;
}
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
+void nvme_nvm_unregister(struct nvme_ns *ns)
{
- nvm_unregister(disk_name);
+ nvm_unregister(ns->ndev);
}
/* move to shared place when used in multiple places. */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ab18b78102bf..d47f5a5d18c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
+#include <linux/lightnvm.h>
enum {
/*
@@ -154,6 +155,7 @@ struct nvme_ns {
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+ struct nvm_dev *ndev;
struct kref kref;
int instance;
@@ -165,7 +167,6 @@ struct nvme_ns {
u16 ms;
bool ext;
u8 pi_type;
- int type;
unsigned long flags;
#define NVME_NS_REMOVING 0
@@ -184,7 +185,6 @@ struct nvme_ctrl_ops {
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
- void (*post_scan)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
int (*delete_ctrl)(struct nvme_ctrl *ctrl);
const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
@@ -292,9 +292,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
- dma_addr_t dma_addr, u32 *result);
+ void *buffer, size_t buflen, u32 *result);
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
- dma_addr_t dma_addr, u32 *result);
+ void *buffer, size_t buflen, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
@@ -307,20 +307,35 @@ int nvme_sg_get_version_num(int __user *ip);
#ifdef CONFIG_NVM
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct request_queue *q, char *disk_name);
-void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
+ const struct attribute_group *attrs);
+void nvme_nvm_unregister(struct nvme_ns *ns);
+
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+ if (dev->type->devnode)
+ return dev_to_disk(dev)->private_data;
+
+ return (container_of(dev, struct nvm_dev, dev))->private_data;
+}
#else
-static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
+static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
+ int node,
+ const struct attribute_group *attrs)
{
return 0;
}
-static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
+static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
return 0;
}
+static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
+{
+ return dev_to_disk(dev)->private_data;
+}
#endif /* CONFIG_NVM */
int __init nvme_core_init(void);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 60f7eab11865..5e52034ab010 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -88,7 +89,6 @@ struct nvme_dev {
unsigned max_qid;
int q_depth;
u32 db_stride;
- struct msix_entry *entry;
void __iomem *bar;
struct work_struct reset_work;
struct work_struct remove_work;
@@ -99,6 +99,7 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
+ u32 cmbloc;
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -201,6 +202,11 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
}
+static int nvmeq_irq(struct nvme_queue *nvmeq)
+{
+ return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
+}
+
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -263,6 +269,13 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
+static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_dev *dev = set->driver_data;
+
+ return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
+}
+
/**
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
@@ -503,7 +516,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_MQ_RQ_QUEUE_BUSY;
- if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
+ if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+ DMA_ATTR_NO_WARN))
goto out;
if (!nvme_setup_prps(dev, req, size))
@@ -880,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
/*
* Mark the request as handled, since the inline shutdown
@@ -960,7 +974,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
return 1;
}
- vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ vector = nvmeq_irq(nvmeq);
nvmeq->dev->online_queues--;
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -968,7 +982,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
- irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
return 0;
@@ -1075,15 +1088,14 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
return NULL;
}
-static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- const char *name)
+static int queue_request_irq(struct nvme_queue *nvmeq)
{
if (use_threaded_interrupts)
- return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq, IRQF_SHARED,
- name, nvmeq);
- return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_SHARED, name, nvmeq);
+ return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
+ nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
+ else
+ return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
+ nvmeq->irqname, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1114,7 +1126,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
+ result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1131,7 +1143,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
static struct blk_mq_ops nvme_mq_admin_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_admin_init_hctx,
.exit_hctx = nvme_admin_exit_hctx,
.init_request = nvme_admin_init_request,
@@ -1141,9 +1152,9 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
static struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
+ .map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
};
@@ -1204,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq;
- dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0;
if (dev->subsystem &&
@@ -1231,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
result = nvme_enable_ctrl(&dev->ctrl, cap);
if (result)
- goto free_nvmeq;
+ return result;
nvmeq->cq_vector = 0;
- result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
+ result = queue_request_irq(nvmeq);
if (result) {
nvmeq->cq_vector = -1;
- goto free_nvmeq;
+ return result;
}
return result;
-
- free_nvmeq:
- nvme_free_queues(dev, 0);
- return result;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1281,7 +1288,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
- if (queue_work(nvme_workq, &dev->reset_work))
+ if (!nvme_reset(dev))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
@@ -1306,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
ret = nvme_create_queue(dev->queues[i], i);
- if (ret) {
- nvme_free_queues(dev, i);
+ if (ret)
break;
- }
}
/*
@@ -1321,28 +1326,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret;
}
+static ssize_t nvme_cmb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
+
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
u64 szu, size, offset;
- u32 cmbloc;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
dma_addr_t dma_addr;
- if (!use_cmb_sqes)
- return NULL;
-
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
return NULL;
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+ if (!use_cmb_sqes)
+ return NULL;
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+ offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
if (offset > bar_size)
return NULL;
@@ -1355,7 +1369,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
cmb = ioremap_wc(dma_addr, size);
if (!cmb)
return NULL;
@@ -1382,7 +1396,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int result, i, vecs, nr_io_queues, size;
+ int result, nr_io_queues, size;
nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
@@ -1417,29 +1431,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, adminq);
+ free_irq(pci_irq_vector(pdev, 0), adminq);
/*
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (pdev->msi_enabled)
- pci_disable_msi(pdev);
- else if (pdev->msix_enabled)
- pci_disable_msix(pdev);
-
- for (i = 0; i < nr_io_queues; i++)
- dev->entry[i].entry = i;
- vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
- if (vecs < 0) {
- vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
- if (vecs < 0) {
- vecs = 1;
- } else {
- for (i = 0; i < vecs; i++)
- dev->entry[i].vector = i + pdev->irq;
- }
- }
+ pci_free_irq_vectors(pdev);
+ nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
+ if (nr_io_queues <= 0)
+ return -EIO;
+ dev->max_qid = nr_io_queues;
/*
* Should investigate if there's a performance win from allocating
@@ -1447,36 +1450,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* path to scale better, even if the receive path is limited by the
* number of interrupts.
*/
- nr_io_queues = vecs;
- dev->max_qid = nr_io_queues;
- result = queue_request_irq(dev, adminq, adminq->irqname);
+ result = queue_request_irq(adminq);
if (result) {
adminq->cq_vector = -1;
- goto free_queues;
+ return result;
}
return nvme_create_io_queues(dev);
-
- free_queues:
- nvme_free_queues(dev, 1);
- return result;
-}
-
-static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
-{
- struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
}
static void nvme_del_queue_end(struct request *req, int error)
@@ -1531,9 +1511,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
{
- int pass, queues = dev->online_queues - 1;
+ int pass;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -1615,15 +1595,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/
- if (pci_enable_msix(pdev, dev->entry, 1)) {
- pci_enable_msi(pdev);
- dev->entry[0].vector = pdev->irq;
- }
-
- if (!dev->entry[0].vector) {
- result = -ENODEV;
- goto disable;
- }
+ result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (result < 0)
+ return result;
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1642,9 +1616,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth);
}
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+ /*
+ * CMBs can currently only exist on >=1.2 PCIe devices. We only
+ * populate sysfs if a CMB is implemented. Note that we add the
+ * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+ * it on exit. Since nvme_dev_attrs_group has no name we can pass
+ * NULL as final argument to sysfs_add_file_to_group.
+ */
+
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
+ if (dev->cmbsz) {
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->dev,
+ "failed to add sysfs attribute for CMB\n");
+ }
+ }
+
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
return 0;
@@ -1665,10 +1655,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- if (pdev->msi_enabled)
- pci_disable_msi(pdev);
- else if (pdev->msix_enabled)
- pci_disable_msix(pdev);
+ pci_free_irq_vectors(pdev);
if (pci_is_enabled(pdev)) {
pci_disable_pcie_error_reporting(pdev);
@@ -1678,7 +1665,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
+ int i, queues;
u32 csts = -1;
del_timer_sync(&dev->watchdog_timer);
@@ -1689,6 +1676,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
csts = readl(dev->bar + NVME_REG_CSTS);
}
+ queues = dev->online_queues - 1;
for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
@@ -1700,7 +1688,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (dev->queue_count)
nvme_suspend_queue(dev->queues[0]);
} else {
- nvme_disable_io_queues(dev);
+ nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
@@ -1743,7 +1731,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
- kfree(dev->entry);
kfree(dev);
}
@@ -1848,11 +1835,10 @@ static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
-
+ if (work_busy(&dev->reset_work))
+ return -ENODEV;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
-
- flush_work(&dev->reset_work);
return 0;
}
@@ -1876,7 +1862,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
- return nvme_reset(to_nvme_dev(ctrl));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = nvme_reset(dev);
+
+ if (!ret)
+ flush_work(&dev->reset_work);
+ return ret;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1887,7 +1878,6 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read64 = nvme_pci_reg_read64,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
- .post_scan = nvme_pci_post_scan,
.submit_async_event = nvme_pci_submit_async_event,
};
@@ -1920,10 +1910,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->entry = kzalloc_node(num_possible_cpus() * sizeof(*dev->entry),
- GFP_KERNEL, node);
- if (!dev->entry)
- goto free;
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
GFP_KERNEL, node);
if (!dev->queues)
@@ -1964,7 +1950,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_dev_unmap(dev);
free:
kfree(dev->queues);
- kfree(dev->entry);
kfree(dev);
return result;
}
@@ -1976,7 +1961,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_disable(dev, false);
else
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2045,7 +2030,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- queue_work(nvme_workq, &ndev->reset_work);
+ nvme_reset(ndev);
return 0;
}
#endif
@@ -2084,7 +2069,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index fbdb2267e460..3d25add36d91 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -54,7 +54,6 @@
struct nvme_rdma_device {
struct ib_device *dev;
struct ib_pd *pd;
- struct ib_mr *mr;
struct kref ref;
struct list_head entry;
};
@@ -84,6 +83,7 @@ enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0),
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
NVME_RDMA_Q_DELETING = (1 << 2),
+ NVME_RDMA_Q_LIVE = (1 << 3),
};
struct nvme_rdma_queue {
@@ -408,10 +408,7 @@ static void nvme_rdma_free_dev(struct kref *ref)
list_del(&ndev->entry);
mutex_unlock(&device_list_mutex);
- if (!register_always)
- ib_dereg_mr(ndev->mr);
ib_dealloc_pd(ndev->pd);
-
kfree(ndev);
}
@@ -444,24 +441,16 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->dev = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->dev);
+ ndev->pd = ib_alloc_pd(ndev->dev,
+ register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(ndev->pd))
goto out_free_dev;
- if (!register_always) {
- ndev->mr = ib_get_dma_mr(ndev->pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE);
- if (IS_ERR(ndev->mr))
- goto out_free_pd;
- }
-
if (!(ndev->dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
dev_err(&ndev->dev->dev,
"Memory registrations not supported.\n");
- goto out_free_mr;
+ goto out_free_pd;
}
list_add(&ndev->entry, &device_list);
@@ -469,9 +458,6 @@ out_unlock:
mutex_unlock(&device_list_mutex);
return ndev;
-out_free_mr:
- if (!register_always)
- ib_dereg_mr(ndev->mr);
out_free_pd:
ib_dealloc_pd(ndev->pd);
out_free_dev:
@@ -639,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
- if (ret)
- break;
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "failed to connect i/o queue: %d\n", ret);
+ goto out_free_queues;
+ }
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
}
+ return 0;
+
+out_free_queues:
+ nvme_rdma_free_io_queues(ctrl);
return ret;
}
@@ -727,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (ret)
goto stop_admin_q;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
if (ret)
goto stop_admin_q;
@@ -776,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_stop_keep_alive(&ctrl->ctrl);
- for (i = 0; i < ctrl->queue_count; i++)
+ for (i = 0; i < ctrl->queue_count; i++) {
clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+ clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
+ }
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
@@ -915,7 +913,7 @@ static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
- put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
return 0;
}
@@ -1000,7 +998,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
nvme_rdma_queue_idx(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
- if (!register_always)
+ if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
return nvme_rdma_map_sg_single(queue, req, c);
}
@@ -1393,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED;
}
+/*
+ * We cannot accept any other command until the Connect command has completed.
+ */
+static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+ struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+
+ if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+ cmd->common.opcode != nvme_fabrics_command ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect)
+ return false;
+ }
+
+ return true;
+}
+
static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1409,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
+ if (!nvme_rdma_queue_is_ready(queue, rq))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -1495,7 +1514,6 @@ static void nvme_rdma_complete_rq(struct request *rq)
static struct blk_mq_ops nvme_rdma_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.reinit_request = nvme_rdma_reinit_request,
@@ -1507,7 +1525,6 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
.queue_rq = nvme_rdma_queue_rq,
.complete = nvme_rdma_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_rdma_init_admin_request,
.exit_request = nvme_rdma_exit_admin_request,
.reinit_request = nvme_rdma_reinit_request,
@@ -1561,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
if (error)
goto out_cleanup_queue;
+ set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
if (error) {
dev_err(ctrl->ctrl.device,
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index e947e298a737..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -72,15 +72,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#define ALL_LUNS_RETURNED 0x02
#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
#define RESTRICTED_LUNS_RETURNED 0x00
-#define NVME_POWER_STATE_START_VALID 0x00
-#define NVME_POWER_STATE_ACTIVE 0x01
-#define NVME_POWER_STATE_IDLE 0x02
-#define NVME_POWER_STATE_STANDBY 0x03
-#define NVME_POWER_STATE_LU_CONTROL 0x07
-#define POWER_STATE_0 0
-#define POWER_STATE_1 1
-#define POWER_STATE_2 2
-#define POWER_STATE_3 3
#define DOWNLOAD_SAVE_ACTIVATE 0x05
#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
#define ACTIVATE_DEFERRED_MICROCODE 0x0F
@@ -615,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64;
len = sizeof(id_ns->eui64);
- if (ns->ctrl->vs >= NVME_VS(1, 2)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid;
len = sizeof(id_ns->nguid);
@@ -688,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
- if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP)
return res;
@@ -915,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
kfree(smart_log);
/* Get Features for Temp Threshold */
- res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
+ res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0,
&feature_resp);
if (res != NVME_SC_SUCCESS)
temp_c_thresh = LOG_TEMP_UNKNOWN;
@@ -1048,7 +1039,7 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
if (len < MODE_PAGE_CACHING_LEN)
return -EINVAL;
- nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
+ nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0,
&feature_resp);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@@ -1229,64 +1220,6 @@ static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
/* Start Stop Unit Helper Functions */
-static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
- u8 pc, u8 pcmod, u8 start)
-{
- int res;
- int nvme_sc;
- struct nvme_id_ctrl *id_ctrl;
- int lowest_pow_st; /* max npss = lowest power consumption */
- unsigned ps_desired = 0;
-
- nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
- res = nvme_trans_status_code(hdr, nvme_sc);
- if (res)
- return res;
-
- lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
- kfree(id_ctrl);
-
- switch (pc) {
- case NVME_POWER_STATE_START_VALID:
- /* Action unspecified if POWER CONDITION MODIFIER != 0 */
- if (pcmod == 0 && start == 0x1)
- ps_desired = POWER_STATE_0;
- if (pcmod == 0 && start == 0x0)
- ps_desired = lowest_pow_st;
- break;
- case NVME_POWER_STATE_ACTIVE:
- /* Action unspecified if POWER CONDITION MODIFIER != 0 */
- if (pcmod == 0)
- ps_desired = POWER_STATE_0;
- break;
- case NVME_POWER_STATE_IDLE:
- /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
- if (pcmod == 0x0)
- ps_desired = POWER_STATE_1;
- else if (pcmod == 0x1)
- ps_desired = POWER_STATE_2;
- else if (pcmod == 0x2)
- ps_desired = POWER_STATE_3;
- break;
- case NVME_POWER_STATE_STANDBY:
- /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
- if (pcmod == 0x0)
- ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
- else if (pcmod == 0x1)
- ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
- break;
- case NVME_POWER_STATE_LU_CONTROL:
- default:
- res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
- ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
- SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
- break;
- }
- nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
- NULL);
- return nvme_trans_status_code(hdr, nvme_sc);
-}
-
static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 buffer_id)
{
@@ -1395,7 +1328,7 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case MODE_PAGE_CACHING:
dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
- dword11, 0, NULL);
+ dword11, NULL, 0, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
break;
case MODE_PAGE_CONTROL:
@@ -2235,11 +2168,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
- u8 immed, pcmod, pc, no_flush, start;
+ u8 immed, pcmod, no_flush, start;
immed = cmd[1] & 0x01;
pcmod = cmd[3] & 0x0f;
- pc = (cmd[4] & 0xf0) >> 4;
no_flush = cmd[4] & 0x04;
start = cmd[4] & 0x01;
@@ -2254,8 +2186,8 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
}
- /* Setup the expected power state transition */
- return nvme_trans_power_state(ns, hdr, pc, pcmod, start);
+
+ return 0;
}
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 47c564b5a289..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <generated/utsrelease.h>
+#include <asm/unaligned.h>
#include "nvmet.h"
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
@@ -29,8 +30,84 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ struct nvmet_ns *ns;
+ u64 host_reads, host_writes, data_units_read, data_units_written;
+
+ status = NVME_SC_SUCCESS;
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
+ if (!ns) {
+ status = NVME_SC_INVALID_NS;
+ pr_err("nvmet : Counld not find namespace id : %d\n",
+ le32_to_cpu(req->cmd->get_log_page.nsid));
+ goto out;
+ }
+
+ host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+ nvmet_put_namespace(ns);
+out:
+ return status;
+}
+
+static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+ u64 host_reads = 0, host_writes = 0;
+ u64 data_units_read = 0, data_units_written = 0;
+ struct nvmet_ns *ns;
+ struct nvmet_ctrl *ctrl;
+
+ status = NVME_SC_SUCCESS;
+ ctrl = req->sq->ctrl;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+ data_units_read +=
+ part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+ data_units_written +=
+ part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+
+ }
+ rcu_read_unlock();
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+ put_unaligned_le64(host_writes, &slog->host_writes[0]);
+ put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
+
+ return status;
+}
+
+static u16 nvmet_get_smart_log(struct nvmet_req *req,
+ struct nvme_smart_log *slog)
+{
+ u16 status;
+
+ WARN_ON(req == NULL || slog == NULL);
+ if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+ status = nvmet_get_smart_log_all(req, slog);
+ else
+ status = nvmet_get_smart_log_nsid(req, slog);
+ return status;
+}
+
static void nvmet_execute_get_log_page(struct nvmet_req *req)
{
+ struct nvme_smart_log *smart_log;
size_t data_len = nvmet_get_log_page_len(req->cmd);
void *buf;
u16 status = 0;
@@ -59,6 +136,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
* available (e.g. units or commands read/written) those aren't
* persistent over power loss.
*/
+ if (data_len != sizeof(*smart_log)) {
+ status = NVME_SC_INTERNAL;
+ goto err;
+ }
+ smart_log = buf;
+ status = nvmet_get_smart_log(req, smart_log);
+ if (status) {
+ memset(buf, '\0', data_len);
+ goto err;
+ }
break;
case 0x03:
/*
@@ -73,6 +160,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, buf, data_len);
+err:
kfree(buf);
out:
nvmet_req_complete(req, status);
@@ -111,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
*/
/* we support multiple ports and multiples hosts: */
- id->mic = (1 << 0) | (1 << 1);
+ id->cmic = (1 << 0) | (1 << 1);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -423,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x00:
+ case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
return 0;
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute = nvmet_execute_identify_ctrl;
return 0;
- case 0x02:
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..a21437a33adb 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
- ctrl->csts |= NVME_CSTS_CFS;
- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
- schedule_work(&ctrl->fatal_err_work);
+ mutex_lock(&ctrl->lock);
+ if (!(ctrl->csts & NVME_CSTS_CFS)) {
+ ctrl->csts |= NVME_CSTS_CFS;
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ schedule_work(&ctrl->fatal_err_work);
+ }
+ mutex_unlock(&ctrl->lock);
}
EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
@@ -882,7 +886,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return NULL;
- subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+ subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
/* we support only dynamic controllers */
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
- e->nqntype = type;
+ e->subtype = type;
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute =
nvmet_execute_identify_disc_ctrl;
return 0;
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 2cd069b691ae..4a96c2049b7b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -58,6 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE;
+ op_flags = WRITE_ODIRECT;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA;
} else {
@@ -205,7 +206,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_dsm:
req->execute = nvmet_execute_dsm;
- req->data_len = le32_to_cpu(cmd->dsm.nr) *
+ req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
sizeof(struct nvme_dsm_range);
return 0;
default:
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 395e60dad835..d5df77d686b2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -273,7 +273,6 @@ static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static struct blk_mq_ops nvme_loop_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
.timeout = nvme_loop_timeout,
@@ -282,7 +281,6 @@ static struct blk_mq_ops nvme_loop_mq_ops = {
static struct blk_mq_ops nvme_loop_admin_mq_ops = {
.queue_rq = nvme_loop_queue_rq,
.complete = nvme_loop_complete_rq,
- .map_queue = blk_mq_map_queue,
.init_request = nvme_loop_init_admin_request,
.init_hctx = nvme_loop_init_admin_hctx,
.timeout = nvme_loop_timeout,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1cbe6e053b5b..005ef5d17a19 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -848,7 +848,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
ndev->device = cm_id->device;
kref_init(&ndev->ref);
- ndev->pd = ib_alloc_pd(ndev->device);
+ ndev->pd = ib_alloc_pd(ndev->device, 0);
if (IS_ERR(ndev->pd))
goto out_free_dev;
@@ -951,6 +951,7 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
+ ib_drain_qp(queue->cm_id->qp);
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->cq);
}
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
spin_lock_init(&queue->rsp_wr_wait_lock);
INIT_LIST_HEAD(&queue->free_rsps);
spin_lock_init(&queue->rsps_lock);
+ INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
if (queue->idx < 0) {
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- ib_drain_qp(queue->cm_id->qp);
schedule_work(&queue->release_work);
}
}
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
{
WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
- pr_err("failed to connect queue\n");
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list))
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ pr_err("failed to connect queue %d\n", queue->idx);
schedule_work(&queue->release_work);
}
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
- nvmet_rdma_queue_disconnect(queue);
+ /*
+ * We might end up here when we already freed the qp
+ * which means queue release sequence is in progress,
+ * so don't get in the way...
+ */
+ if (queue)
+ nvmet_rdma_queue_disconnect(queue);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
ret = nvmet_rdma_device_removal(cm_id, queue);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index f550c4596a7a..ba140eaee5c8 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -101,4 +101,14 @@ config NVMEM_VF610_OCOTP
This driver can also be build as a module. If so, the module will
be called nvmem-vf610-ocotp.
+config MESON_EFUSE
+ tristate "Amlogic eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 45ab1ae08fa9..8f942a0cdaec 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
new file mode 100644
index 000000000000..f207c3b10482
--- /dev/null
+++ b/drivers/nvmem/meson-efuse.c
@@ -0,0 +1,93 @@
+/*
+ * Amlogic eFuse Driver
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+static int meson_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ u8 *buf = val;
+ int ret;
+
+ ret = meson_sm_call_read(buf, SM_EFUSE_READ, offset,
+ bytes, 0, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "meson-efuse",
+ .owner = THIS_MODULE,
+ .stride = 1,
+ .word_size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id meson_efuse_match[] = {
+ { .compatible = "amlogic,meson-gxbb-efuse", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_efuse_match);
+
+static int meson_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ unsigned int size;
+
+ if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0)
+ return -EINVAL;
+
+ econfig.dev = &pdev->dev;
+ econfig.reg_read = meson_efuse_read;
+ econfig.size = size;
+
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int meson_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver meson_efuse_driver = {
+ .probe = meson_efuse_probe,
+ .remove = meson_efuse_remove,
+ .driver = {
+ .name = "meson-efuse",
+ .of_match_table = meson_efuse_match,
+ },
+};
+
+module_platform_driver(meson_efuse_driver);
+
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index bc07ad30c9bf..ba7b034b2b91 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -90,10 +90,6 @@ config OF_PCI_IRQ
help
OpenFirmware PCI IRQ routing helpers
-config OF_MTD
- depends on MTD
- def_bool y
-
config OF_RESERVED_MEM
depends on OF_EARLY_FLATTREE
bool
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a2e68f740eda..393fea85eb4e 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -592,87 +593,16 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
u32 rid_in)
{
struct device *parent_dev;
- struct device_node *msi_controller_node;
- struct device_node *msi_np = *np;
- u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
- int msi_map_len;
- bool matched;
u32 rid_out = rid_in;
- const __be32 *msi_map = NULL;
/*
* Walk up the device parent links looking for one with a
* "msi-map" property.
*/
- for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
- if (!parent_dev->of_node)
- continue;
-
- msi_map = of_get_property(parent_dev->of_node,
- "msi-map", &msi_map_len);
- if (!msi_map)
- continue;
-
- if (msi_map_len % (4 * sizeof(__be32))) {
- dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
- msi_map_len);
- return rid_out;
- }
- /* We have a good parent_dev and msi_map, let's use them. */
- break;
- }
- if (!msi_map)
- return rid_out;
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "msi-map-mask" property. If
- * of_property_read_u32() fails, the default is used.
- */
- of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
-
- masked_rid = map_mask & rid_in;
- matched = false;
- while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
- rid_base = be32_to_cpup(msi_map + 0);
- phandle = be32_to_cpup(msi_map + 1);
- msi_base = be32_to_cpup(msi_map + 2);
- rid_len = be32_to_cpup(msi_map + 3);
-
- if (rid_base & ~map_mask) {
- dev_err(parent_dev,
- "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
- map_mask, rid_base);
- return rid_out;
- }
-
- msi_controller_node = of_find_node_by_phandle(phandle);
-
- matched = (masked_rid >= rid_base &&
- masked_rid < rid_base + rid_len);
- if (msi_np)
- matched &= msi_np == msi_controller_node;
-
- if (matched && !msi_np) {
- *np = msi_np = msi_controller_node;
+ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
+ if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
+ "msi-map-mask", np, &rid_out))
break;
- }
-
- of_node_put(msi_controller_node);
- msi_map_len -= 4 * sizeof(__be32);
- msi_map += 4;
- }
- if (!matched)
- return rid_out;
-
- rid_out = masked_rid - rid_base + msi_base;
- dev_dbg(dev,
- "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
- dev_name(parent_dev), map_mask, rid_base, msi_base,
- rid_len, rid_in, rid_out);
-
return rid_out;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b470f7e3521d..5a3145a02547 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
return to_phy_device(d);
+ put_device(d);
}
return NULL;
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
status.link = 1;
status.duplex = of_property_read_bool(fixed_link_node,
"full-duplex");
- if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+ if (of_property_read_u32(fixed_link_node, "speed",
+ &status.speed)) {
+ of_node_put(fixed_link_node);
return -EINVAL;
+ }
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 589b30c68e14..b58be12ab277 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -308,3 +308,105 @@ struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
#endif /* CONFIG_PCI_MSI */
+
+/**
+ * of_pci_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @rid: PCI requester ID to map.
+ * @map_name: property name of the map to use.
+ * @map_mask_name: optional property name of the mask to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a PCI requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
+ * @id_out may be NULL if only the other is required. If @target points to
+ * a non-NULL device node pointer, only entries targeting that node will be
+ * matched; if it points to a NULL value, it will receive the device node of
+ * the first matching target phandle, with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_pci_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+
+ if (!np || !map_name || (!target && !id_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *id_out = rid;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%s: Error: Bad %s length: %d\n", np->full_name,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ if (map_mask_name)
+ of_property_read_u32(np, map_mask_name, &map_mask);
+
+ masked_rid = map_mask & rid;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np->full_name, map_name, map_name,
+ map_mask, rid_base);
+ return -EFAULT;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (id_out)
+ *id_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np->full_name, map_name, map_mask, rid_base, out_base,
+ rid_len, rid, *id_out);
+ return 0;
+ }
+
+ pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n",
+ np->full_name, map_name, rid,
+ target && *target ? (*target)->full_name : "any target");
+ return -EFAULT;
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f811d2796437..e4bf07d20f9b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -29,6 +29,7 @@
const struct of_device_id of_default_bus_match_table[] = {
{ .compatible = "simple-bus", },
{ .compatible = "simple-mfd", },
+ { .compatible = "isa", },
#ifdef CONFIG_ARM_AMBA
{ .compatible = "arm,amba-bus", },
#endif /* CONFIG_ARM_AMBA */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index a0e5260bd006..134398e0231b 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -30,7 +30,7 @@ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
}
return inode;
}
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index e4a5b7ee90cf..4fce494271cc 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (advk_pcie_link_up(pcie)) {
- dev_info(&pcie->pdev->dev, "link up\n");
+ dev_info(dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(&pcie->pdev->dev, "link never came up\n");
-
+ dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
@@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
else
str_posted = "Posted";
- dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
@@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return 0;
}
- dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+ dev_err(dev, "config read/write timed out\n");
return -ETIMEDOUT;
}
@@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
{
+ struct device *dev = &pcie->pdev->dev;
+
mutex_lock(&pcie->msi_used_lock);
if (!test_bit(hwirq, pcie->msi_irq_in_use))
- dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
- hwirq);
+ dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, pcie->msi_irq_in_use);
mutex_unlock(&pcie->msi_used_lock);
@@ -910,6 +913,7 @@ out_release_res:
static int advk_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
@@ -917,31 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device_node *msi_node;
int ret, irq;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
- GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
- platform_set_drvdata(pdev, pcie);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcie->base = devm_ioremap_resource(&pdev->dev, res);
+ pcie->base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to register interrupt\n");
+ dev_err(dev, "Failed to register interrupt\n");
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to parse resources\n");
+ dev_err(dev, "Failed to parse resources\n");
return ret;
}
@@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize irq\n");
+ dev_err(dev, "Failed to initialize irq\n");
return ret;
}
ret = advk_pcie_init_msi_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize irq\n");
+ dev_err(dev, "Failed to initialize irq\n");
advk_pcie_remove_irq_domain(pcie);
return ret;
}
- msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+ msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
if (msi_node)
msi = of_pci_find_msi_chip_by_node(msi_node);
else
msi = NULL;
- bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+ bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
pcie, &pcie->resources, &pcie->msi);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
@@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
return 0;
}
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 19223ed2e619..9595fad63f6f 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -64,11 +64,10 @@
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
- void __iomem *base;
- struct phy **phy;
- int phy_count;
- struct device *dev;
struct pcie_port pp;
+ void __iomem *base; /* DT ti_conf */
+ int phy_count; /* DT phy-names count */
+ struct phy **phy;
};
#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
@@ -84,17 +83,6 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
-{
- return readl(pp->dbi_base + offset);
-}
-
-static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
- u32 value)
-{
- writel(value, pp->dbi_base + offset);
-}
-
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -103,13 +91,14 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
return !!(reg & LINK_UP);
}
-static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
{
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = pp->dev;
u32 reg;
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "link is already up\n");
+ dev_err(dev, "link is already up\n");
return 0;
}
@@ -120,10 +109,8 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
return dw_pcie_wait_for_link(pp);
}
-static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
-
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
~INTERRUPTS);
dra7xx_pcie_writel(dra7xx,
@@ -142,6 +129,8 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
@@ -149,10 +138,10 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- dra7xx_pcie_establish_link(pp);
+ dra7xx_pcie_establish_link(dra7xx);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
- dra7xx_pcie_enable_interrupts(pp);
+ dra7xx_pcie_enable_interrupts(dra7xx);
}
static struct pcie_host_ops dra7xx_pcie_host_ops = {
@@ -196,8 +185,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ struct dra7xx_pcie *dra7xx = arg;
+ struct pcie_port *pp = &dra7xx->pp;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@@ -223,51 +212,51 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
struct dra7xx_pcie *dra7xx = arg;
+ struct device *dev = dra7xx->pp.dev;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
if (reg & ERR_SYS)
- dev_dbg(dra7xx->dev, "System Error\n");
+ dev_dbg(dev, "System Error\n");
if (reg & ERR_FATAL)
- dev_dbg(dra7xx->dev, "Fatal Error\n");
+ dev_dbg(dev, "Fatal Error\n");
if (reg & ERR_NONFATAL)
- dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+ dev_dbg(dev, "Non Fatal Error\n");
if (reg & ERR_COR)
- dev_dbg(dra7xx->dev, "Correctable Error\n");
+ dev_dbg(dev, "Correctable Error\n");
if (reg & ERR_AXI)
- dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+ dev_dbg(dev, "AXI tag lookup fatal Error\n");
if (reg & ERR_ECRC)
- dev_dbg(dra7xx->dev, "ECRC Error\n");
+ dev_dbg(dev, "ECRC Error\n");
if (reg & PME_TURN_OFF)
- dev_dbg(dra7xx->dev,
+ dev_dbg(dev,
"Power Management Event Turn-Off message received\n");
if (reg & PME_TO_ACK)
- dev_dbg(dra7xx->dev,
+ dev_dbg(dev,
"Power Management Turn-Off Ack message received\n");
if (reg & PM_PME)
- dev_dbg(dra7xx->dev,
- "PM Power Management Event message received\n");
+ dev_dbg(dev, "PM Power Management Event message received\n");
if (reg & LINK_REQ_RST)
- dev_dbg(dra7xx->dev, "Link Request Reset\n");
+ dev_dbg(dev, "Link Request Reset\n");
if (reg & LINK_UP_EVT)
- dev_dbg(dra7xx->dev, "Link-up state change\n");
+ dev_dbg(dev, "Link-up state change\n");
if (reg & CFG_BME_EVT)
- dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+ dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
if (reg & CFG_MSE_EVT)
- dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+ dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
@@ -278,13 +267,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
int ret;
- struct pcie_port *pp;
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = pp->dev;
struct resource *res;
- struct device *dev = &pdev->dev;
-
- pp = &dra7xx->pp;
- pp->dev = dev;
- pp->ops = &dra7xx_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
@@ -292,12 +277,11 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return -EINVAL;
}
- ret = devm_request_irq(&pdev->dev, pp->irq,
- dra7xx_pcie_msi_irq_handler,
+ ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", pp);
+ "dra7-pcie-msi", dra7xx);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
+ dev_err(dev, "failed to request irq\n");
return ret;
}
@@ -314,7 +298,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(dra7xx->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -332,6 +316,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
void __iomem *base;
struct resource *res;
struct dra7xx_pcie *dra7xx;
+ struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
@@ -343,6 +328,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!dra7xx)
return -ENOMEM;
+ pp = &dra7xx->pp;
+ pp->dev = dev;
+ pp->ops = &dra7xx_pcie_host_ops;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource\n");
@@ -390,7 +379,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->base = base;
dra7xx->phy = phy;
- dra7xx->dev = dev;
dra7xx->phy_count = phy_count;
pm_runtime_enable(dev);
@@ -407,7 +395,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
"pcie_reset");
if (ret) {
- dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+ dev_err(dev, "gpio%d request failed, ret %d\n",
gpio_sel, ret);
goto err_gpio;
}
@@ -420,12 +408,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- platform_set_drvdata(pdev, dra7xx);
-
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
+ platform_set_drvdata(pdev, dra7xx);
return 0;
err_gpio:
@@ -451,9 +438,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
u32 val;
/* clear MSE */
- val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
- dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
@@ -465,9 +452,9 @@ static int dra7xx_pcie_resume(struct device *dev)
u32 val;
/* set MSE */
- val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
- dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 2e2d7f00b9e8..f1c544bb8b68 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -29,13 +29,13 @@
#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
struct exynos_pcie {
- void __iomem *elbi_base;
- void __iomem *phy_base;
- void __iomem *block_base;
+ struct pcie_port pp;
+ void __iomem *elbi_base; /* DT 0th resource */
+ void __iomem *phy_base; /* DT 1st resource */
+ void __iomem *block_base; /* DT 2nd resource */
int reset_gpio;
struct clk *clk;
struct clk *bus_clk;
- struct pcie_port pp;
};
/* PCIe ELBI registers */
@@ -102,40 +102,40 @@ struct exynos_pcie {
#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
-static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->elbi_base + reg);
+ writel(val, exynos_pcie->elbi_base + reg);
}
-static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->elbi_base + reg);
+ return readl(exynos_pcie->elbi_base + reg);
}
-static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->phy_base + reg);
+ writel(val, exynos_pcie->phy_base + reg);
}
-static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->phy_base + reg);
+ return readl(exynos_pcie->phy_base + reg);
}
-static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->block_base + reg);
+ writel(val, exynos_pcie->block_base + reg);
}
-static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->block_base + reg);
+ return readl(exynos_pcie->block_base + reg);
}
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
+ bool on)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
@@ -148,10 +148,10 @@ static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
}
}
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
+ bool on)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
@@ -164,10 +164,9 @@ static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
}
}
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
@@ -177,10 +176,9 @@ static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
}
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
@@ -193,18 +191,14 @@ static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
}
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
}
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
@@ -213,10 +207,9 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
-static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val &= ~PCIE_PHY_COMMON_PD_CMN;
@@ -239,10 +232,9 @@ static void exynos_pcie_power_on_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
-static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val |= PCIE_PHY_COMMON_PD_CMN;
@@ -265,10 +257,8 @@ static void exynos_pcie_power_off_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
-static void exynos_pcie_init_phy(struct pcie_port *pp)
+static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
/* DCC feedback control off */
exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
@@ -305,51 +295,41 @@ static void exynos_pcie_init_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
}
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
if (exynos_pcie->reset_gpio >= 0)
- devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
GPIOF_OUT_INIT_HIGH, "RESET");
}
-static int exynos_pcie_establish_link(struct pcie_port *pp)
+static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
u32 val;
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
+ dev_err(dev, "Link already up\n");
return 0;
}
- /* assert reset signals */
- exynos_pcie_assert_core_reset(pp);
- exynos_pcie_assert_phy_reset(pp);
-
- /* de-assert phy reset */
- exynos_pcie_deassert_phy_reset(pp);
-
- /* power on phy */
- exynos_pcie_power_on_phy(pp);
-
- /* initialize phy */
- exynos_pcie_init_phy(pp);
+ exynos_pcie_assert_core_reset(exynos_pcie);
+ exynos_pcie_assert_phy_reset(exynos_pcie);
+ exynos_pcie_deassert_phy_reset(exynos_pcie);
+ exynos_pcie_power_on_phy(exynos_pcie);
+ exynos_pcie_init_phy(exynos_pcie);
/* pulse for common reset */
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
- /* de-assert core reset */
- exynos_pcie_deassert_core_reset(pp);
-
- /* setup root complex */
+ exynos_pcie_deassert_core_reset(exynos_pcie);
dw_pcie_setup_rc(pp);
-
- /* assert reset signal */
- exynos_pcie_assert_reset(pp);
+ exynos_pcie_assert_reset(exynos_pcie);
/* assert LTSSM enable */
exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
@@ -361,27 +341,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
- dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ dev_info(dev, "PLL Locked: 0x%x\n", val);
}
- /* power off phy */
- exynos_pcie_power_off_phy(pp);
-
+ exynos_pcie_power_off_phy(exynos_pcie);
return -ETIMEDOUT;
}
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
}
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* enable INTX interrupt */
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
@@ -391,23 +367,24 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct exynos_pcie *exynos_pcie = arg;
- exynos_pcie_clear_irq_pulse(pp);
+ exynos_pcie_clear_irq_pulse(exynos_pcie);
return IRQ_HANDLED;
}
static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct exynos_pcie *exynos_pcie = arg;
+ struct pcie_port *pp = &exynos_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static void exynos_pcie_msi_init(struct pcie_port *pp)
+static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
{
+ struct pcie_port *pp = &exynos_pcie->pp;
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
dw_pcie_msi_init(pp);
@@ -417,60 +394,64 @@ static void exynos_pcie_msi_init(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
}
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
{
- exynos_pcie_enable_irq_pulse(pp);
+ exynos_pcie_enable_irq_pulse(exynos_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(pp);
+ exynos_pcie_msi_init(exynos_pcie);
}
-static inline u32 exynos_pcie_readl_rc(struct pcie_port *pp,
- void __iomem *dbi_base)
+static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val;
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- val = readl(dbi_base);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
+ val = readl(pp->dbi_base + reg);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return val;
}
-static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
- u32 val, void __iomem *dbi_base)
+static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- writel(val, dbi_base);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
+ writel(val, pp->dbi_base + reg);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
}
static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
- exynos_pcie_sideband_dbi_r_mode(pp, true);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
- exynos_pcie_sideband_dbi_w_mode(pp, true);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_link_up(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
- u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+ u32 val;
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
if (val == PCIE_ELBI_LTSSM_ENABLE)
return 1;
@@ -479,8 +460,10 @@ static int exynos_pcie_link_up(struct pcie_port *pp)
static void exynos_pcie_host_init(struct pcie_port *pp)
{
- exynos_pcie_establish_link(pp);
- exynos_pcie_enable_interrupts(pp);
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_pcie_establish_link(exynos_pcie);
+ exynos_pcie_enable_interrupts(exynos_pcie);
}
static struct pcie_host_ops exynos_pcie_host_ops = {
@@ -492,36 +475,38 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct pcie_port *pp,
+static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
struct platform_device *pdev)
{
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
if (!pp->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
+ ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", exynos_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
+ dev_err(dev, "failed to request irq\n");
return ret;
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
if (!pp->msi_irq) {
- dev_err(&pdev->dev, "failed to get msi irq\n");
+ dev_err(dev, "failed to get msi irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "exynos-pcie", pp);
+ "exynos-pcie", exynos_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request msi irq\n");
+ dev_err(dev, "failed to request msi irq\n");
return ret;
}
}
@@ -531,7 +516,7 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -540,37 +525,36 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_pcie *exynos_pcie;
struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource *elbi_base;
struct resource *phy_base;
struct resource *block_base;
int ret;
- exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
- GFP_KERNEL);
+ exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
if (!exynos_pcie)
return -ENOMEM;
pp = &exynos_pcie->pp;
-
- pp->dev = &pdev->dev;
+ pp->dev = dev;
exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
- exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ exynos_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(exynos_pcie->clk)) {
- dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ dev_err(dev, "Failed to get pcie rc clock\n");
return PTR_ERR(exynos_pcie->clk);
}
ret = clk_prepare_enable(exynos_pcie->clk);
if (ret)
return ret;
- exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(exynos_pcie->bus_clk)) {
- dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ dev_err(dev, "Failed to get pcie bus clock\n");
ret = PTR_ERR(exynos_pcie->bus_clk);
goto fail_clk;
}
@@ -579,27 +563,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
goto fail_clk;
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
if (IS_ERR(exynos_pcie->elbi_base)) {
ret = PTR_ERR(exynos_pcie->elbi_base);
goto fail_bus_clk;
}
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(exynos_pcie->phy_base)) {
ret = PTR_ERR(exynos_pcie->phy_base);
goto fail_bus_clk;
}
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
if (IS_ERR(exynos_pcie->block_base)) {
ret = PTR_ERR(exynos_pcie->block_base);
goto fail_bus_clk;
}
- ret = exynos_add_pcie_port(pp, pdev);
+ ret = exynos_add_pcie_port(exynos_pcie, pdev);
if (ret < 0)
goto fail_bus_clk;
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index ead4a5c3480b..c8cefb078218 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -39,16 +39,15 @@ enum imx6_pcie_variants {
};
struct imx6_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
- struct pcie_port pp;
struct regmap *iomuxc_gpr;
enum imx6_pcie_variants variant;
- void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -96,14 +95,15 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
-static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = readl(dbi_base + PCIE_PHY_STAT);
+ val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
wait_counter++;
@@ -116,123 +116,126 @@ static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
int ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(dbi_base, 0);
+ return pcie_phy_poll_ack(imx6_pcie, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val, phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(dbi_base, addr);
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
/* assert Read signal */
phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
- writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
- val = readl(dbi_base + PCIE_PHY_STAT);
+ val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
*data = val & 0xffff;
/* deassert Read signal */
- writel(0x00, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(dbi_base, 0);
+ return pcie_phy_poll_ack(imx6_pcie, 0);
}
-static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(dbi_base, addr);
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* capture data */
var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert cap data */
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(dbi_base, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
/* assert wr signal */
var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert wr signal */
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(dbi_base, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
- writel(0x0, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
return 0;
}
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u32 tmp;
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
/* Added for PCI abort handling */
@@ -242,9 +245,9 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 0;
}
-static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val, gpr1, gpr12;
switch (imx6_pcie->variant) {
@@ -281,10 +284,10 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
- val = readl(pp->dbi_base + PCIE_PL_PFLR);
+ val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
- writel(val, pp->dbi_base + PCIE_PL_PFLR);
+ dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -296,20 +299,19 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
-
- return 0;
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret = 0;
switch (imx6_pcie->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+ dev_err(dev, "unable to enable pcie_axi clock\n");
break;
}
@@ -336,32 +338,33 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
-static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
+ dev_err(dev, "unable to enable pcie_phy clock\n");
+ return;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+ dev_err(dev, "unable to enable pcie_bus clock\n");
goto err_pcie_bus;
}
ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie clock\n");
+ dev_err(dev, "unable to enable pcie clock\n");
goto err_pcie;
}
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie ref clock\n");
+ dev_err(dev, "unable to enable pcie ref clock\n");
goto err_ref_clk;
}
@@ -392,7 +395,7 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
break;
}
- return 0;
+ return;
err_ref_clk:
clk_disable_unprepare(imx6_pcie->pcie);
@@ -400,14 +403,10 @@ err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- return ret;
}
-static void imx6_pcie_init_phy(struct pcie_port *pp)
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
-
if (imx6_pcie->variant == IMX6SX)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
@@ -439,45 +438,52 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
imx6_pcie->tx_swing_low << 25);
}
-static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
+
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
- dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
-static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
u32 tmp;
unsigned int retries;
for (retries = 0; retries < 200; retries++) {
- tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* Test if the speed change finished. */
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
return 0;
usleep_range(100, 1000);
}
- dev_err(pp->dev, "Speed change timeout\n");
+ dev_err(dev, "Speed change timeout\n");
return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct imx6_pcie *imx6_pcie = arg;
+ struct pcie_port *pp = &imx6_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static int imx6_pcie_establish_link(struct pcie_port *pp)
+static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
u32 tmp;
int ret;
@@ -486,76 +492,73 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
- ret = imx6_pcie_wait_for_link(pp);
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
- dev_info(pp->dev, "Link never came up\n");
+ dev_info(dev, "Link never came up\n");
goto err_reset_phy;
}
if (imx6_pcie->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
} else {
- dev_info(pp->dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Gen2 disabled\n");
}
/*
* Start Directed Speed Change so the best possible speed both link
* partners support can be negotiated.
*/
- tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
- writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
- ret = imx6_pcie_wait_for_speed_change(pp);
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "Failed to bring link up!\n");
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
/* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(pp);
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "Failed to bring link up!\n");
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
- tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
- dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
+ dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
- dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
- imx6_pcie_reset_phy(pp);
-
+ dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
+ imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
{
- imx6_pcie_assert_core_reset(pp);
-
- imx6_pcie_init_phy(pp);
-
- imx6_pcie_deassert_core_reset(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
-
- imx6_pcie_establish_link(pp);
+ imx6_pcie_establish_link(imx6_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@@ -563,7 +566,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+ return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
@@ -572,24 +575,26 @@ static struct pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int __init imx6_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
- dev_err(&pdev->dev, "failed to get MSI irq\n");
+ dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "mx6-pcie-msi", pp);
+ "mx6-pcie-msi", imx6_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI irq\n");
+ dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@@ -599,7 +604,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -608,75 +613,72 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = dev->of_node;
int ret;
- imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
return -ENOMEM;
pp = &imx6_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
imx6_pcie->variant =
- (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+ (enum imx6_pcie_variants)of_device_get_match_data(dev);
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(np,
+ imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high ?
GPIOF_OUT_INIT_HIGH :
GPIOF_OUT_INIT_LOW,
"PCIe reset");
if (ret) {
- dev_err(&pdev->dev, "unable to get reset gpio\n");
+ dev_err(dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
- imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(&pdev->dev,
- "pcie_phy clock source missing or invalid\n");
+ dev_err(dev, "pcie_phy clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_phy);
}
- imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+ imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(&pdev->dev,
- "pcie_bus clock source missing or invalid\n");
+ dev_err(dev, "pcie_bus clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_bus);
}
- imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+ imx6_pcie->pcie = devm_clk_get(dev, "pcie");
if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(&pdev->dev,
- "pcie clock source missing or invalid\n");
+ dev_err(dev, "pcie clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie);
}
if (imx6_pcie->variant == IMX6SX) {
- imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"pcie_incbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
@@ -686,7 +688,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
@@ -712,12 +714,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+ ret = of_property_read_u32(node, "fsl,max-link-speed",
&imx6_pcie->link_gen);
if (ret)
imx6_pcie->link_gen = 1;
- ret = imx6_add_pcie_port(pp, pdev);
+ ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
@@ -730,7 +732,7 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+ imx6_pcie_assert_core_reset(imx6_pcie);
}
static const struct of_device_id imx6_pcie_of_match[] = {
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 41515092eb0d..9397c4667106 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -88,13 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
return ks_pcie->app.start + MSI_IRQ;
}
+static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 pending, vector;
int src, virq;
- pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+ pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
@@ -104,7 +115,7 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
if (BIT(src) & pending) {
vector = offset + (src << 3);
virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
src, vector, virq);
generic_handle_irq(virq);
}
@@ -124,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
- writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+ BIT(bit_pos));
+ ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -135,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ BIT(bit_pos));
}
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@@ -145,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ BIT(bit_pos));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
@@ -215,6 +226,7 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ struct device *dev = pp->dev;
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@@ -222,7 +234,7 @@ int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
- dev_err(pp->dev, "irq domain init failed\n");
+ dev_err(dev, "irq domain init failed\n");
return -ENXIO;
}
@@ -237,47 +249,47 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
int i;
for (i = 0; i < MAX_LEGACY_IRQS; i++)
- writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+ ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 pending;
int virq;
- pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+ pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
- virq);
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
generic_handle_irq(virq);
}
/* EOI the INTx interrupt */
- writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+ ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
}
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
- writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+ ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
}
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
- void __iomem *reg_base)
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
{
u32 status;
- status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+ status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;
if (status & ERR_FATAL_IRQ)
- dev_err(dev, "fatal error (status %#010x)\n", status);
+ dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
+ status);
/* Ack the IRQ; status bits are RW1C */
- writel(status, reg_base + ERR_IRQ_STATUS);
+ ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
return IRQ_HANDLED;
}
@@ -322,15 +334,15 @@ static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
-static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
- writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
- reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
do {
- val = readl(reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}
@@ -340,15 +352,15 @@ static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
-static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
- writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
- reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
do {
- val = readl(reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}
@@ -357,28 +369,29 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
+ u32 val;
/* Disable BARs for inbound access */
- ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
- writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
- writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
- ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie);
/* Set outbound translation size per window division */
- writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+ ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
- writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
- writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+ ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
+ ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
start += tr_size;
}
/* Enable OB translation */
- writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
- ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
}
/**
@@ -418,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
if (bus != 1)
regval |= BIT(24);
- writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+ ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
return pp->va_cfg0_base;
}
@@ -456,19 +469,19 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
/* Configure and set up BAR0 */
- ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_set_dbi_mode(ks_pcie);
/* Enable BAR0 */
- writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
- writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
- ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie);
/*
* For BAR0, just setting bus address for inbound writes (MSI) should
* be sufficient. Use physical address to avoid any conflicts.
*/
- writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
}
/**
@@ -476,8 +489,9 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
*/
int ks_dw_pcie_link_up(struct pcie_port *pp)
{
- u32 val = readl(pp->dbi_base + DEBUG0);
+ u32 val;
+ val = dw_pcie_readl_rc(pp, DEBUG0);
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}
@@ -486,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
u32 val;
/* Disable Link training */
- val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
/* Initiate Link Training */
- val = readl(ks_pcie->va_app_base + CMD_STATUS);
- writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
/**
@@ -506,12 +520,13 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np)
{
struct pcie_port *pp = &ks_pcie->pp;
- struct platform_device *pdev = to_platform_device(pp->dev);
+ struct device *dev = pp->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+ pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
@@ -524,7 +539,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 1 is the application reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ks_pcie->va_app_base))
return PTR_ERR(ks_pcie->va_app_base);
@@ -537,7 +552,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
- dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
return -EINVAL;
}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 82b461b5b08a..043c19a05da1 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
unsigned int retries;
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
+ dev_err(dev, "Link already up\n");
return 0;
}
@@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return 0;
}
- dev_err(pp->dev, "phy link never came up\n");
+ dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
}
@@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
+ dev_dbg(dev, "%s, irq %d\n", __func__, irq);
/*
* The chained irq handler installation would have replaced normal
@@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling legacy irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -234,7 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
- ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
+ ks_dw_pcie_enable_error_irq(ks_pcie);
}
/*
@@ -302,14 +305,14 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
- return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
- ks_pcie->va_app_base);
+ return ks_dw_pcie_handle_error_irq(ks_pcie);
}
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
ret = ks_pcie_get_irq_controller_info(ks_pcie,
@@ -332,12 +335,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
*/
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
if (ks_pcie->error_irq <= 0)
- dev_info(&pdev->dev, "no error IRQ defined\n");
+ dev_info(dev, "no error IRQ defined\n");
else {
ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+ dev_err(dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
}
@@ -347,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -381,12 +384,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct phy *phy;
int ret;
- ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
- GFP_KERNEL);
+ ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
pp = &ks_pcie->pp;
+ pp->dev = dev;
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
@@ -408,7 +411,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_iounmap(dev, reg_p);
devm_release_mem_region(dev, res->start, resource_size(res));
- pp->dev = dev;
ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index a5b0cb2ba4d7..bc54bafda068 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -17,8 +17,8 @@
#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT 0th res */
struct clk *clk;
- struct pcie_port pp;
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
@@ -34,7 +34,7 @@ struct keystone_pcie {
int error_irq;
/* Application register space */
- void __iomem *va_app_base;
+ void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
};
@@ -45,9 +45,8 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
- void __iomem *reg_base);
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 114ba819277a..653707996342 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -45,10 +45,9 @@ struct ls_pcie_drvdata {
};
struct ls_pcie {
- void __iomem *dbi;
+ struct pcie_port pp; /* pp.dbi_base is DT regs */
void __iomem *lut;
struct regmap *scfg;
- struct pcie_port pp;
const struct ls_pcie_drvdata *drvdata;
int index;
};
@@ -59,7 +58,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
{
u32 header_type;
- header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
+ header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE);
header_type &= 0x7f;
return header_type == PCI_HEADER_TYPE_BRIDGE;
@@ -68,13 +67,13 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
/* Clear multi-function bit */
static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
{
- iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE);
}
/* Fix class value */
static void ls_pcie_fix_class(struct ls_pcie *pcie)
{
- iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE);
}
/* Drop MSG TLP except for Vendor MSG */
@@ -82,9 +81,9 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
{
u32 val;
- val = ioread32(pcie->dbi + PCIE_STRFMR1);
+ val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1);
val &= 0xDFFFFFFF;
- iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+ iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
}
static int ls1021_pcie_link_up(struct pcie_port *pp)
@@ -106,18 +105,19 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
+ struct device *dev = pp->dev;
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 index[2];
- pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+ pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,pcie-scfg");
if (IS_ERR(pcie->scfg)) {
- dev_err(pp->dev, "No syscfg phandle specified\n");
+ dev_err(dev, "No syscfg phandle specified\n");
pcie->scfg = NULL;
return;
}
- if (of_property_read_u32_array(pp->dev->of_node,
+ if (of_property_read_u32_array(dev->of_node,
"fsl,pcie-scfg", index, 2)) {
pcie->scfg = NULL;
return;
@@ -148,18 +148,19 @@ static void ls_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
- iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
ls_pcie_drop_msg_tlp(pcie);
- iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip)
{
+ struct device *dev = pp->dev;
+ struct device_node *np = dev->of_node;
struct device_node *msi_node;
- struct device_node *np = pp->dev->of_node;
/*
* The MSI domain is set by the generic of_msi_configure(). This
@@ -169,7 +170,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
*/
msi_node = of_parse_phandle(np, "msi-parent", 0);
if (!msi_node) {
- dev_err(pp->dev, "failed to find msi-parent\n");
+ dev_err(dev, "failed to find msi-parent\n");
return -EINVAL;
}
@@ -212,19 +213,15 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int __init ls_add_pcie_port(struct ls_pcie *pcie)
{
+ struct pcie_port *pp = &pcie->pp;
+ struct device *dev = pp->dev;
int ret;
- struct ls_pcie *pcie = to_ls_pcie(pp);
-
- pp->dev = &pdev->dev;
- pp->dbi_base = pcie->dbi;
- pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(pp->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -233,38 +230,42 @@ static int __init ls_add_pcie_port(struct pcie_port *pp,
static int __init ls_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct ls_pcie *pcie;
+ struct pcie_port *pp;
struct resource *dbi_base;
int ret;
- match = of_match_device(ls_pcie_of_match, &pdev->dev);
+ match = of_match_device(ls_pcie_of_match, dev);
if (!match)
return -ENODEV;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
+ pp = &pcie->pp;
+ pp->dev = dev;
+ pcie->drvdata = match->data;
+ pp->ops = pcie->drvdata->ops;
+
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
- if (IS_ERR(pcie->dbi)) {
- dev_err(&pdev->dev, "missing *regs* space\n");
- return PTR_ERR(pcie->dbi);
+ pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
+ if (IS_ERR(pcie->pp.dbi_base)) {
+ dev_err(dev, "missing *regs* space\n");
+ return PTR_ERR(pcie->pp.dbi_base);
}
- pcie->drvdata = match->data;
- pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
+ pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
- ret = ls_add_pcie_port(&pcie->pp, pdev);
+ ret = ls_add_pcie_port(pcie);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, pcie);
-
return 0;
}
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 307f81d6b479..45a89d969700 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
static int mvebu_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
- GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
- dev_err(&pdev->dev, "invalid memory aperture size\n");
+ dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
@@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
if (ret) {
- dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
- ret);
+ dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
- num = of_get_available_child_count(pdev->dev.of_node);
+ num = of_get_available_child_count(np);
- pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
- GFP_KERNEL);
+ pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
ret = mvebu_pcie_parse_port(pcie, port, child);
@@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
- dev_err(&pdev->dev, "%s: cannot map registers\n",
- port->name);
+ dev_err(dev, "%s: cannot map registers\n", port->name);
port->base = NULL;
mvebu_pcie_powerdown(port);
continue;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 597566f96f5e..1eeefa4df64c 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
+ struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
- dev_err(priv->dev, "error irq: status %08x\n", status);
+ dev_err(dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
@@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
+ struct device *dev = priv->dev;
int ret;
u32 val;
- ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
- dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ dev_err(dev, "cannot claim IRQ for error handling\n");
return;
}
@@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
+ struct device *dev = priv->dev;
void __iomem *reg = priv->reg;
u32 val;
int ret;
- pm_runtime_enable(priv->dev);
- pm_runtime_get_sync(priv->dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
- dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+ dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->mem_res);
- ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+ ret = devm_request_pci_bus_resources(dev, &sys->resources);
if (ret < 0)
return ret;
@@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
+ struct device *dev = pci->dev;
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
@@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
/* Catch HW limitations */
if (!(range.flags & IORESOURCE_PREFETCH)) {
- dev_err(pci->dev, "window must be prefetchable\n");
+ dev_err(dev, "window must be prefetchable\n");
return -EINVAL;
}
if (pci->window_addr) {
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
if (lowaddr < pci->window_size) {
- dev_err(pci->dev, "invalid window size/addr\n");
+ dev_err(dev, "invalid window size/addr\n");
return -EINVAL;
}
}
@@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
static int rcar_pci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
@@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ reg = devm_ioremap_resource(dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
if (mem_res->start & 0xFFFF)
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev,
- sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
- priv->dev = &pdev->dev;
+ priv->dev = dev;
if (priv->irq < 0) {
- dev_err(&pdev->dev, "no valid irq found\n");
+ dev_err(dev, "no valid irq found\n");
return priv->irq;
}
@@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_pci = 0x40000000;
priv->window_size = SZ_1G;
- if (pdev->dev.of_node) {
+ if (dev->of_node) {
struct resource busnr;
int ret;
- ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+ ret = of_pci_parse_bus_range(dev->of_node, &busnr);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse bus-range\n");
+ dev_err(dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
- dev_warn(&pdev->dev, "only one bus number supported\n");
+ dev_warn(dev, "only one bus number supported\n");
- ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
+ ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse dma-range\n");
+ dev_err(dev, "failed to parse dma-range\n");
return ret;
}
} else {
@@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
- pci_common_init_dev(&pdev->dev, &hw);
+ pci_common_init_dev(dev, &hw);
return 0;
}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index e2a8e4cab22e..8dfccf733241 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
+ struct device *dev = pcie->dev;
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
@@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
- dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
- err);
+ dev_err(dev, "ioremap_page_range() failed: %d\n", err);
goto unmap;
}
}
@@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct device *dev = pcie->dev;
void __iomem *addr = NULL;
if (bus->number == 0) {
@@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
addr = (void __iomem *)b->area->addr;
if (!addr) {
- dev_err(pcie->dev,
- "failed to map cfg. space for bus %u\n",
+ dev_err(dev, "failed to map cfg. space for bus %u\n",
bus->number);
return NULL;
}
@@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
{
struct tegra_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
- devm_iounmap(pcie->dev, port->base);
- devm_release_mem_region(pcie->dev, port->regs.start,
+ devm_iounmap(dev, port->base);
+ devm_release_mem_region(dev, port->regs.start,
resource_size(&port->regs));
list_del(&port->list);
- devm_kfree(pcie->dev, port);
+ devm_kfree(dev, port);
}
/* Tegra PCIE root complex wrongly reports device class */
@@ -612,12 +613,13 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct device *dev = pcie->dev;
int err;
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
- err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
+ err = devm_request_resource(dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
@@ -631,7 +633,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+ err = devm_request_pci_bus_resources(dev, &sys->resources);
if (err < 0)
return err;
@@ -672,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
+ struct device *dev = pcie->dev;
u32 code, signature;
code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
@@ -689,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
- dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
- signature);
+ dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
- dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
- signature);
+ dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
code == AFI_INTR_FPCI_DECODE_ERROR) {
@@ -701,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
if (code == AFI_INTR_MASTER_ABORT)
- dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
+ dev_dbg(dev, " FPCI address: %10llx\n", address);
else
- dev_err(pcie->dev, " FPCI address: %10llx\n", address);
+ dev_err(dev, " FPCI address: %10llx\n", address);
}
return IRQ_HANDLED;
@@ -793,6 +794,7 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
@@ -829,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
- dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+ dev_err(dev, "PLL failed to lock: %d\n", err);
return err;
}
@@ -859,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
- pads_writel(pcie, PADS_CTL, value);
+ pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
@@ -880,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
for (i = 0; i < port->lanes; i++) {
err = phy_power_on(port->phys[i]);
if (err < 0) {
- dev_err(dev, "failed to power on PHY#%u: %d\n", i,
- err);
+ dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
return err;
}
}
@@ -909,6 +910,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@@ -920,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
err = tegra_pcie_phy_enable(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+ dev_err(dev, "failed to power on PHY: %d\n", err);
return err;
}
@@ -928,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_on(port);
if (err < 0) {
- dev_err(pcie->dev,
+ dev_err(dev,
"failed to power on PCIe port %u PHY: %d\n",
port->index, err);
return err;
@@ -946,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct tegra_pcie_port *port;
int err;
@@ -956,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
err = tegra_pcie_phy_disable(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY: %d\n",
- err);
+ dev_err(dev, "failed to power off PHY: %d\n", err);
return err;
}
@@ -965,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_off(port);
if (err < 0) {
- dev_err(pcie->dev,
+ dev_err(dev,
"failed to power off PCIe port %u PHY: %d\n",
port->index, err);
return err;
@@ -977,6 +979,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
@@ -1016,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
- dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
return err;
}
@@ -1049,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
/* TODO: disable and unprepare clocks? */
err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
@@ -1065,11 +1069,12 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
+ dev_warn(dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
int err;
@@ -1082,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
+ dev_err(dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
pcie->pex_rst);
if (err) {
- dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
+ dev_err(dev, "powerup sequence failed: %d\n", err);
return err;
}
@@ -1096,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
+ dev_err(dev, "failed to enable AFI clock: %d\n", err);
return err;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable CML clock: %d\n",
- err);
+ dev_err(dev, "failed to enable CML clock: %d\n", err);
return err;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
+ dev_err(dev, "failed to enable PLLE clock: %d\n", err);
return err;
}
@@ -1120,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
const struct tegra_pcie_soc *soc = pcie->soc;
- pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
+ pcie->pex_clk = devm_clk_get(dev, "pex");
if (IS_ERR(pcie->pex_clk))
return PTR_ERR(pcie->pex_clk);
- pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
+ pcie->afi_clk = devm_clk_get(dev, "afi");
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
- pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
+ pcie->pll_e = devm_clk_get(dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
if (soc->has_cml_clk) {
- pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
+ pcie->cml_clk = devm_clk_get(dev, "cml");
if (IS_ERR(pcie->cml_clk))
return PTR_ERR(pcie->cml_clk);
}
@@ -1145,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
- pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ struct device *dev = pcie->dev;
+
+ pcie->pex_rst = devm_reset_control_get(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
- pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ pcie->afi_rst = devm_reset_control_get(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
- pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
@@ -1162,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
- pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
if (IS_ERR(pcie->phy)) {
err = PTR_ERR(pcie->phy);
- dev_err(pcie->dev, "failed to get PHY: %d\n", err);
+ dev_err(dev, "failed to get PHY: %d\n", err);
return err;
}
err = phy_init(pcie->phy);
if (err < 0) {
- dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
+ dev_err(dev, "failed to initialize PHY: %d\n", err);
return err;
}
@@ -1256,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
int err;
err = tegra_pcie_clocks_get(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
+ dev_err(dev, "failed to get clocks: %d\n", err);
return err;
}
err = tegra_pcie_resets_get(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ dev_err(dev, "failed to get resets: %d\n", err);
return err;
}
err = tegra_pcie_phys_get(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
+ dev_err(dev, "failed to get PHYs: %d\n", err);
return err;
}
err = tegra_pcie_power_on(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to power up: %d\n", err);
+ dev_err(dev, "failed to power up: %d\n", err);
return err;
}
pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
- pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
+ pcie->pads = devm_ioremap_resource(dev, pads);
if (IS_ERR(pcie->pads)) {
err = PTR_ERR(pcie->pads);
goto poweroff;
}
afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
- pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
+ pcie->afi = devm_ioremap_resource(dev, afi);
if (IS_ERR(pcie->afi)) {
err = PTR_ERR(pcie->afi);
goto poweroff;
@@ -1305,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
goto poweroff;
}
- pcie->cs = devm_request_mem_region(pcie->dev, res->start,
+ pcie->cs = devm_request_mem_region(dev, res->start,
resource_size(res), res->name);
if (!pcie->cs) {
err = -EADDRNOTAVAIL;
@@ -1315,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
/* request interrupt */
err = platform_get_irq_byname(pdev, "intr");
if (err < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ dev_err(dev, "failed to get IRQ: %d\n", err);
goto poweroff;
}
@@ -1323,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
if (err) {
- dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
+ dev_err(dev, "failed to register IRQ: %d\n", err);
goto poweroff;
}
@@ -1336,6 +1345,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
if (pcie->irq > 0)
@@ -1345,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
err = phy_exit(pcie->phy);
if (err < 0)
- dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
return 0;
}
@@ -1384,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
{
struct tegra_pcie *pcie = data;
+ struct device *dev = pcie->dev;
struct tegra_msi *msi = &pcie->msi;
unsigned int i, processed = 0;
@@ -1403,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
- dev_info(pcie->dev, "unhandled MSI\n");
+ dev_info(dev, "unhandled MSI\n");
} else {
/*
* that's weird who triggered this?
* just clear it
*/
- dev_info(pcie->dev, "unexpected MSI\n");
+ dev_info(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@@ -1488,7 +1499,8 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
@@ -1497,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
mutex_init(&msi->lock);
- msi->chip.dev = pcie->dev;
+ msi->chip.dev = dev;
msi->chip.setup_irq = tegra_msi_setup_irq;
msi->chip.teardown_irq = tegra_msi_teardown_irq;
- msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
- dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ dev_err(dev, "failed to get IRQ: %d\n", err);
goto err;
}
@@ -1519,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@@ -1594,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
switch (lanes) {
case 0x0000104:
- dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+ dev_info(dev, "4x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
return 0;
case 0x0000102:
- dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+ dev_info(dev, "2x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
- dev_info(pcie->dev, "4x1, 2x1 configuration\n");
+ dev_info(dev, "4x1, 2x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
return 0;
case 0x00020202:
- dev_info(pcie->dev, "2x3 configuration\n");
+ dev_info(dev, "2x3 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
return 0;
case 0x00010104:
- dev_info(pcie->dev, "4x1, 1x2 configuration\n");
+ dev_info(dev, "4x1, 1x2 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
switch (lanes) {
case 0x00000004:
- dev_info(pcie->dev, "single-mode configuration\n");
+ dev_info(dev, "single-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
return 0;
case 0x00000202:
- dev_info(pcie->dev, "dual-mode configuration\n");
+ dev_info(dev, "dual-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
return 0;
}
@@ -1673,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np,
*/
static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
pcie->num_supplies = 3;
@@ -1681,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
pcie->num_supplies = 2;
if (pcie->num_supplies == 0) {
- dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+ dev_err(dev, "device %s not supported in legacy mode\n",
np->full_name);
return -ENODEV;
}
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1698,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
if (pcie->num_supplies > 2)
pcie->supplies[2].supply = "avdd";
- return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
- pcie->supplies);
+ return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
}
/*
@@ -1713,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
*/
static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
unsigned int i = 0;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1746,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
(need_pexb ? 2 : 0);
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1769,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
pcie->num_supplies = 5;
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1782,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->supplies[4].supply = "vddio-pex-clk";
}
- if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+ if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
pcie->num_supplies))
- return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ return devm_regulator_bulk_get(dev, pcie->num_supplies,
pcie->supplies);
/*
@@ -1792,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
* that the device tree complies with an older version of the device
* tree binding.
*/
- dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+ dev_info(dev, "using legacy DT binding for power supplies\n");
- devm_kfree(pcie->dev, pcie->supplies);
+ devm_kfree(dev, pcie->supplies);
pcie->num_supplies = 0;
return tegra_pcie_get_legacy_regulators(pcie);
@@ -1802,7 +1816,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
- struct device_node *np = pcie->dev->of_node, *port;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node, *port;
const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
@@ -1812,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
int err;
if (of_pci_range_parser_init(&parser, np)) {
- dev_err(pcie->dev, "missing \"ranges\" property\n");
+ dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
@@ -1867,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse ranges property: %d\n",
- err);
+ dev_err(dev, "failed to parse ranges property: %d\n", err);
pcie->busn.name = np->name;
pcie->busn.start = 0;
pcie->busn.end = 0xff;
@@ -1883,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse address: %d\n",
- err);
+ dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
- dev_err(pcie->dev, "invalid port number: %d\n", index);
+ dev_err(dev, "invalid port number: %d\n", index);
return -EINVAL;
}
@@ -1899,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
+ dev_err(dev, "failed to parse # of lanes: %d\n",
err);
return err;
}
if (value > 16) {
- dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
+ dev_err(dev, "invalid # of lanes: %u\n", value);
return -EINVAL;
}
@@ -1919,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
mask |= ((1 << value) - 1) << lane;
lane += value;
- rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
+ rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse address: %d\n",
- err);
+ dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
@@ -1936,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
- rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
+ rp->base = devm_ioremap_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
@@ -1945,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
if (err < 0) {
- dev_err(pcie->dev, "invalid lane configuration\n");
+ dev_err(dev, "invalid lane configuration\n");
return err;
}
@@ -1964,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
{
+ struct device *dev = port->pcie->dev;
unsigned int retries = 3;
unsigned long value;
@@ -1986,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
- dev_err(port->pcie->dev, "link %u down, retrying\n",
- port->index);
+ dev_err(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@@ -2011,11 +2023,12 @@ retry:
static int tegra_pcie_enable(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
- dev_info(pcie->dev, "probing port %u, using %u lanes\n",
+ dev_info(dev, "probing port %u, using %u lanes\n",
port->index, port->lanes);
tegra_pcie_port_enable(port);
@@ -2023,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
if (tegra_pcie_port_check_link(port))
continue;
- dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
+ dev_info(dev, "link %u down, ignoring\n", port->index);
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
@@ -2041,8 +2054,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.map_irq = tegra_pcie_map_irq;
hw.ops = &tegra_pcie_ops;
- pci_common_init_dev(pcie->dev, &hw);
-
+ pci_common_init_dev(dev, &hw);
return 0;
}
@@ -2204,17 +2216,18 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct tegra_pcie *pcie;
int err;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->soc = of_device_get_match_data(&pdev->dev);
+ pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
@@ -2222,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
@@ -2236,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_pcie_enable_msi(pcie);
if (err < 0) {
- dev_err(&pdev->dev,
- "failed to enable MSI support: %d\n",
- err);
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
goto put_resources;
}
}
err = tegra_pcie_enable(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
+ dev_err(dev, "failed to enable PCIe ports: %d\n", err);
goto disable_msi;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
- dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
- err);
+ dev_err(dev, "failed to setup debugfs: %d\n", err);
}
- platform_set_drvdata(pdev, pcie);
return 0;
disable_msi:
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index a81273c23341..1de23d74783f 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -76,6 +76,16 @@ struct xgene_pcie_port {
u32 version;
};
+static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+{
+ return readl(port->csr_base + reg);
+}
+
+static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+{
+ writel(val, port->csr_base + reg);
+}
+
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
- writel(rtdid_val, port->csr_base + RTDID);
+ xgene_pcie_writel(port, RTDID, rtdid_val);
/* read the register back to ensure flush */
- readl(port->csr_base + RTDID);
+ xgene_pcie_readl(port, RTDID);
}
/*
@@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
-static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
- val32 = readl(csr_base + addr);
+ val32 = xgene_pcie_readl(port, addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
- writel(val, csr_base + addr);
+ xgene_pcie_writel(port, addr, val);
- val32 = readl(csr_base + addr + 0x04);
+ val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
- writel(val, csr_base + addr + 0x04);
+ xgene_pcie_writel(port, addr + 0x04, val);
- val32 = readl(csr_base + addr + 0x04);
+ val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
- writel(val, csr_base + addr + 0x04);
+ xgene_pcie_writel(port, addr + 0x04, val);
- val32 = readl(csr_base + addr + 0x08);
+ val32 = xgene_pcie_readl(port, addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
- writel(val, csr_base + addr + 0x08);
+ xgene_pcie_writel(port, addr + 0x08, val);
return mask;
}
@@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
- void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
- val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
+ val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
- val32 = readl(csr_base + BRIDGE_STATUS_0);
+ val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
+ struct device *dev = port->dev;
int rc;
- port->clk = clk_get(port->dev, NULL);
+ port->clk = clk_get(dev, NULL);
if (IS_ERR(port->clk)) {
- dev_err(port->dev, "clock not available\n");
+ dev_err(dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
- dev_err(port->dev, "clock enable failed\n");
+ dev_err(dev, "clock enable failed\n");
return rc;
}
@@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
+ struct device *dev = port->dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- port->csr_base = devm_ioremap_resource(port->dev, res);
+ port->csr_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- port->cfg_base = devm_ioremap_resource(port->dev, res);
+ port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
@@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
- void __iomem *base = port->csr_base + offset;
+ struct device *dev = port->dev;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
@@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
if (size >= min_size)
mask = ~(size - 1) | flag;
else
- dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
+ dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
- writel(lower_32_bits(cpu_addr), base);
- writel(upper_32_bits(cpu_addr), base + 0x04);
- writel(lower_32_bits(mask), base + 0x08);
- writel(upper_32_bits(mask), base + 0x0c);
- writel(lower_32_bits(pci_addr), base + 0x10);
- writel(upper_32_bits(pci_addr), base + 0x14);
+ xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
+ xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
-static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
{
- writel(lower_32_bits(addr), csr_base + CFGBARL);
- writel(upper_32_bits(addr), csr_base + CFGBARH);
- writel(EN_REG, csr_base + CFGCTL);
+ u64 addr = port->cfg_addr;
+
+ xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
+ xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
+ xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
@@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct resource *res = window->res;
u64 restype = resource_type(res);
- dev_dbg(port->dev, "%pR\n", res);
+ dev_dbg(dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
@@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return -EINVAL;
}
}
- xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
-
+ xgene_pcie_setup_cfg_reg(port);
return 0;
}
-static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
+static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+ u64 pim, u64 size)
{
- writel(lower_32_bits(pim), addr);
- writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
- writel(lower_32_bits(size), addr + 0x10);
- writel(upper_32_bits(size), addr + 0x14);
+ xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
+ xgene_pcie_writel(port, pim_reg + 0x04,
+ upper_32_bits(pim) | EN_COHERENCY);
+ xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
+ xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
}
/*
@@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
- void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
+ struct device *dev = port->dev;
void *bar_addr;
- void *pim_addr;
+ u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
@@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
- dev_warn(port->dev, "invalid pcie dma-range config\n");
+ dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
@@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
- xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
+ xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
- pim_addr = csr_base + PIM1_1L;
+ pim_reg = PIM1_1L;
break;
case 1:
- bar_addr = csr_base + IBAR2;
- writel(bar_low, bar_addr);
- writel(lower_32_bits(mask), csr_base + IR2MSK);
- pim_addr = csr_base + PIM2_1L;
+ xgene_pcie_writel(port, IBAR2, bar_low);
+ xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
+ pim_reg = PIM2_1L;
break;
case 2:
- bar_addr = csr_base + IBAR3L;
- writel(bar_low, bar_addr);
- writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
- writel(lower_32_bits(mask), csr_base + IR3MSKL);
- writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
- pim_addr = csr_base + PIM3_1L;
+ xgene_pcie_writel(port, IBAR3L, bar_low);
+ xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
+ xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
+ pim_reg = PIM3_1L;
break;
}
- xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
+ xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
@@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
- dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
@@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
- writel(0x0, port->csr_base + i);
+ xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
+ struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
int ret;
@@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
- writel(val, port->csr_base + BRIDGE_CFG_0);
+ xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
@@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
- dev_info(port->dev, "(rc) link down\n");
+ dev_info(dev, "(rc) link down\n");
else
- dev_info(port->dev, "(rc) x%d gen-%d link up\n",
- lanes, speed + 1);
+ dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
- struct device_node *dn = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
- port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->node = of_node_get(pdev->dev.of_node);
- port->dev = &pdev->dev;
+
+ port->node = of_node_get(dn);
+ port->dev = dev;
port->version = XGENE_PCIE_IP_VER_UNKN;
if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
@@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
goto error;
@@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
- bus = pci_create_root_bus(&pdev->dev, 0,
- &xgene_pcie_ops, port, &res);
+ bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
@@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
-
- platform_set_drvdata(pdev, port);
return 0;
error:
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index c24e96559cbb..b0ac4dfafa0b 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,15 +55,19 @@
#define TLP_PAYLOAD_SIZE 0x01
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
-#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
-#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
+#define RP_DEVFN 0
+#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_CFG_DW0(pcie, bus) \
+ ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
+ : TLP_FMTTYPE_CFGRD1) << 24) | \
+ TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(pcie, tag, be) \
+ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
-#define RP_DEVFN 0
#define LINK_UP_TIMEOUT HZ
#define LINK_RETRAIN_TIMEOUT HZ
@@ -74,7 +78,7 @@
struct altera_pcie {
struct platform_device *pdev;
- void __iomem *cra_base;
+ void __iomem *cra_base; /* DT Cra */
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
@@ -131,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_valid_config(struct altera_pcie *pcie,
+static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
@@ -218,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
{
u32 headers[TLP_HDR_SIZE];
- if (bus == pcie->root_bus_nr)
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
- else
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
-
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
- TLP_READ_TAG, byte_en);
+ headers[0] = TLP_CFG_DW0(pcie, bus);
+ headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
tlp_write_packet(pcie, headers, 0, false);
@@ -238,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
- if (bus == pcie->root_bus_nr)
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
- else
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
-
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
- TLP_WRITE_TAG, byte_en);
+ headers[0] = TLP_CFG_DW0(pcie, bus);
+ headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
/* check alignment to Qword */
@@ -342,7 +336,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -359,7 +353,7 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
@@ -394,6 +388,7 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
static void altera_wait_link_retrain(struct altera_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
u16 reg16;
unsigned long start_jiffies;
@@ -406,7 +401,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
- dev_err(&pcie->pdev->dev, "link retrain timeout\n");
+ dev_err(dev, "link retrain timeout\n");
break;
}
udelay(100);
@@ -419,7 +414,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
- dev_err(&pcie->pdev->dev, "link up timeout\n");
+ dev_err(dev, "link up timeout\n");
break;
}
udelay(100);
@@ -460,7 +455,6 @@ static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
-
return 0;
}
@@ -472,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_pcie *pcie;
+ struct device *dev;
unsigned long status;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
@@ -489,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
if (virq)
generic_handle_irq(virq);
else
- dev_err(&pcie->pdev->dev,
- "unexpected IRQ, INT%d\n", bit);
+ dev_err(dev, "unexpected IRQ, INT%d\n", bit);
}
}
@@ -549,30 +544,25 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
- struct resource *cra;
+ struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
+ struct resource *cra;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
- if (!cra) {
- dev_err(&pdev->dev, "no Cra memory resource defined\n");
- return -ENODEV;
- }
-
- pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
+ pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base)) {
- dev_err(&pdev->dev, "failed to map cra memory\n");
+ dev_err(dev, "failed to map cra memory\n");
return PTR_ERR(pcie->cra_base);
}
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq <= 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
+ dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
return -EINVAL;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
-
return 0;
}
@@ -583,12 +573,13 @@ static void altera_pcie_host_init(struct altera_pcie *pcie)
static int altera_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
int ret;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -596,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_dt(pcie);
if (ret) {
- dev_err(&pdev->dev, "Parsing DT failed\n");
+ dev_err(dev, "Parsing DT failed\n");
return ret;
}
@@ -604,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed add resources\n");
+ dev_err(dev, "Failed add resources\n");
return ret;
}
ret = altera_pcie_init_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Failed creating IRQ Domain\n");
return ret;
}
@@ -620,7 +611,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
- bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
+ bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
if (!bus)
return -ENOMEM;
@@ -633,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
- platform_set_drvdata(pdev, pcie);
return ret;
}
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
index 0f4f570068e3..0ac0f18690f2 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -29,34 +29,33 @@
#include "pcie-designware.h"
struct armada8k_pcie {
- void __iomem *base;
+ struct pcie_port pp; /* pp.dbi_base is DT ctrl */
struct clk *clk;
- struct pcie_port pp;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
-#define PCIE_GLOBAL_CONTROL_REG 0x0
+#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
#define PCIE_APP_LTSSM_EN BIT(2)
#define PCIE_DEVICE_TYPE_SHIFT 4
#define PCIE_DEVICE_TYPE_MASK 0xF
#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
-#define PCIE_GLOBAL_STATUS_REG 0x8
+#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
-#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
-#define PCIE_GLOBAL_INT_MASK1_REG 0x20
+#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
#define PCIE_INT_A_ASSERT_MASK BIT(9)
#define PCIE_INT_B_ASSERT_MASK BIT(10)
#define PCIE_INT_C_ASSERT_MASK BIT(11)
#define PCIE_INT_D_ASSERT_MASK BIT(12)
-#define PCIE_ARCACHE_TRC_REG 0x50
-#define PCIE_AWCACHE_TRC_REG 0x54
-#define PCIE_ARUSER_REG 0x5C
-#define PCIE_AWUSER_REG 0x60
+#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
* AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
* allocate
@@ -72,11 +71,10 @@ struct armada8k_pcie {
static int armada8k_pcie_link_up(struct pcie_port *pp)
{
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
- reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
return 1;
@@ -85,51 +83,50 @@ static int armada8k_pcie_link_up(struct pcie_port *pp)
return 0;
}
-static void armada8k_pcie_establish_link(struct pcie_port *pp)
+static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
{
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
- void __iomem *base = pcie->base;
+ struct pcie_port *pp = &pcie->pp;
u32 reg;
if (!dw_pcie_link_up(pp)) {
/* Disable LTSSM state machine to enable configuration */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_APP_LTSSM_EN);
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Set the device to root complex mode */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
/* Set the PCIe master AxCache attributes */
- writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
- writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+ dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+ dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
/* Set the PCIe master AxDomain attributes */
- reg = readl(base + PCIE_ARUSER_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
- writel(reg, base + PCIE_ARUSER_REG);
+ dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg);
- reg = readl(base + PCIE_AWUSER_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
- writel(reg, base + PCIE_AWUSER_REG);
+ dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg);
/* Enable INT A-D interrupts */
- reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG);
reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
- writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg);
if (!dw_pcie_link_up(pp)) {
/* Configuration done. Start LTSSM */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg |= PCIE_APP_LTSSM_EN;
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Wait until the link becomes active again */
@@ -139,15 +136,16 @@ static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_host_init(struct pcie_port *pp)
{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+
dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pp);
+ armada8k_pcie_establish_link(pcie);
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
- void __iomem *base = pcie->base;
+ struct armada8k_pcie *pcie = arg;
+ struct pcie_port *pp = &pcie->pp;
u32 val;
/*
@@ -155,8 +153,8 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
* PCI device. However, they are also latched into the PCIe
* controller, so we simply discard them.
*/
- val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
- writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+ val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val);
return IRQ_HANDLED;
}
@@ -166,9 +164,10 @@ static struct pcie_host_ops armada8k_pcie_host_ops = {
.host_init = armada8k_pcie_host_init,
};
-static int armada8k_add_pcie_port(struct pcie_port *pp,
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
+ struct pcie_port *pp = &pcie->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -182,7 +181,7 @@ static int armada8k_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
- IRQF_SHARED, "armada8k-pcie", pp);
+ IRQF_SHARED, "armada8k-pcie", pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@@ -217,7 +216,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
- platform_set_drvdata(pdev, pcie);
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
@@ -228,9 +226,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail;
}
- pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
-
- ret = armada8k_add_pcie_port(pp, pdev);
+ ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
goto fail;
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
index 39bf1a6df463..212786b27f1a 100644
--- a/drivers/pci/host/pcie-artpec6.c
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -27,9 +27,9 @@
#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
struct artpec6_pcie {
- struct pcie_port pp;
- struct regmap *regmap;
- void __iomem *phy_base;
+ struct pcie_port pp; /* pp.dbi_base is DT dbi */
+ struct regmap *regmap; /* DT axis,syscon-pcie */
+ void __iomem *phy_base; /* DT phy */
};
/* PCIe Port Logic registers (memory-mapped) */
@@ -65,18 +65,31 @@ struct artpec6_pcie {
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
-static int artpec6_pcie_establish_link(struct pcie_port *pp)
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
{
- struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+ u32 val;
+
+ regmap_read(artpec6_pcie->regmap, offset, &val);
+ return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+ regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
+{
+ struct pcie_port *pp = &artpec6_pcie->pp;
u32 val;
unsigned int retries;
/* Hold DW core in reset */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_CORE_RESET_REQ;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
PCIECFG_MODE_TX_DRV_EN |
PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
@@ -84,27 +97,27 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
val |= PCIECFG_REFCLK_ENABLE;
val &= ~PCIECFG_DBG_OEN;
val &= ~PCIECFG_CLKREQ_B;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(5000, 6000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val |= NOCCFG_ENABLE_CLK_PCIE;
- regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
usleep_range(20, 30);
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(6000, 7000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
- regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
retries = 50;
do {
usleep_range(1000, 2000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
@@ -117,16 +130,16 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
} while (retries && !(val & PHY_COSPLLLOCK));
/* Take DW core out of reset */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_CORE_RESET_REQ;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
/*
* Enable writing to config regs. This is required as the Synopsys
* driver changes the class code. That register needs DBI write enable.
*/
- writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+ dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@@ -137,78 +150,69 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
-static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
{
+ struct pcie_port *pp = &artpec6_pcie->pp;
+
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
}
static void artpec6_pcie_host_init(struct pcie_port *pp)
{
- artpec6_pcie_establish_link(pp);
- artpec6_pcie_enable_interrupts(pp);
-}
-
-static int artpec6_pcie_link_up(struct pcie_port *pp)
-{
- u32 rc;
-
- /*
- * Get status from Synopsys IP
- * link is debug bit 36, debug register 1 starts at bit 32
- */
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
- if (rc)
- return 1;
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
- return 0;
+ artpec6_pcie_establish_link(artpec6_pcie);
+ artpec6_pcie_enable_interrupts(artpec6_pcie);
}
static struct pcie_host_ops artpec6_pcie_host_ops = {
- .link_up = artpec6_pcie_link_up,
.host_init = artpec6_pcie_host_init,
};
static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct artpec6_pcie *artpec6_pcie = arg;
+ struct pcie_port *pp = &artpec6_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static int artpec6_add_pcie_port(struct pcie_port *pp,
+static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
+ struct pcie_port *pp = &artpec6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
- dev_err(&pdev->dev, "failed to get MSI irq\n");
+ dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
artpec6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "artpec6-pcie-msi", pp);
+ "artpec6-pcie-msi", artpec6_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI irq\n");
+ dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@@ -218,7 +222,7 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -227,41 +231,40 @@ static int artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct artpec6_pcie *artpec6_pcie;
struct pcie_port *pp;
struct resource *dbi_base;
struct resource *phy_base;
int ret;
- artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
- GFP_KERNEL);
+ artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
if (!artpec6_pcie)
return -ENOMEM;
pp = &artpec6_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
artpec6_pcie->regmap =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ syscon_regmap_lookup_by_phandle(dev->of_node,
"axis,syscon-pcie");
if (IS_ERR(artpec6_pcie->regmap))
return PTR_ERR(artpec6_pcie->regmap);
- ret = artpec6_add_pcie_port(pp, pdev);
+ ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, artpec6_pcie);
return 0;
}
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 17da005497a5..8df6312ed300 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpinto@synopsys.com>
+ * Authors: Joao Pinto <jpmpinto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,8 +25,7 @@
#include "pcie-designware.h"
struct dw_plat_pcie {
- void __iomem *mem_base;
- struct pcie_port pp;
+ struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
};
static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@@ -52,6 +51,7 @@ static struct pcie_host_ops dw_plat_pcie_host_ops = {
static int dw_plat_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
@@ -63,11 +63,11 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
if (pp->msi_irq < 0)
return pp->msi_irq;
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
IRQF_SHARED, "dw-plat-pcie-msi", pp);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI IRQ\n");
+ dev_err(dev, "failed to request MSI IRQ\n");
return ret;
}
}
@@ -77,7 +77,7 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -86,31 +86,28 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct pcie_port *pp;
struct resource *res; /* Resource from DT */
int ret;
- dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
- GFP_KERNEL);
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
return -ENOMEM;
pp = &dw_plat_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dw_plat_pcie->mem_base))
- return PTR_ERR(dw_plat_pcie->mem_base);
-
- pp->dbi_base = dw_plat_pcie->mem_base;
+ pp->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
ret = dw_plat_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, dw_plat_pcie);
return 0;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 74da71ea544a..bed19994c1e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -141,41 +141,35 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static inline u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
- return pp->ops->readl_rc(pp, pp->dbi_base + reg);
+ return pp->ops->readl_rc(pp, reg);
return readl(pp->dbi_base + reg);
}
-static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
if (pp->ops->writel_rc)
- pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ pp->ops->writel_rc(pp, reg, val);
else
writel(val, pp->dbi_base + reg);
}
-static inline u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
+static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- if (pp->ops->readl_rc)
- return pp->ops->readl_rc(pp, pp->dbi_base + offset + reg);
-
- return readl(pp->dbi_base + offset + reg);
+ return dw_pcie_readl_rc(pp, offset + reg);
}
-static inline void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index,
- u32 val, u32 reg)
+static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
+ u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
- if (pp->ops->writel_rc)
- pp->ops->writel_rc(pp, val, pp->dbi_base + offset + reg);
- else
- writel(val, pp->dbi_base + offset + reg);
+ dw_pcie_writel_rc(pp, offset + reg, val);
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
@@ -202,35 +196,35 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
u32 retries, val;
if (pp->iatu_unroll_enabled) {
- dw_pcie_writel_unroll(pp, index,
- lower_32_bits(cpu_addr), PCIE_ATU_UNR_LOWER_BASE);
- dw_pcie_writel_unroll(pp, index,
- upper_32_bits(cpu_addr), PCIE_ATU_UNR_UPPER_BASE);
- dw_pcie_writel_unroll(pp, index,
- lower_32_bits(cpu_addr + size - 1), PCIE_ATU_UNR_LIMIT);
- dw_pcie_writel_unroll(pp, index,
- lower_32_bits(pci_addr), PCIE_ATU_UNR_LOWER_TARGET);
- dw_pcie_writel_unroll(pp, index,
- upper_32_bits(pci_addr), PCIE_ATU_UNR_UPPER_TARGET);
- dw_pcie_writel_unroll(pp, index,
- type, PCIE_ATU_UNR_REGION_CTRL1);
- dw_pcie_writel_unroll(pp, index,
- PCIE_ATU_ENABLE, PCIE_ATU_UNR_REGION_CTRL2);
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
+ type);
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE);
} else {
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr),
- PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr),
- PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, lower_32_bits(pci_addr),
- PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pci_addr),
- PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
+ PCIE_ATU_REGION_OUTBOUND | index);
+ dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
+ dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
}
/*
@@ -643,8 +637,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -760,8 +752,8 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
return ret;
}
-static int dw_pcie_valid_config(struct pcie_port *pp,
- struct pci_bus *bus, int dev)
+static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
+ int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
@@ -781,7 +773,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct pcie_port *pp = bus->sysdata;
- if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -797,7 +789,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct pcie_port *pp = bus->sysdata;
- if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
@@ -815,6 +807,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
@@ -835,7 +832,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
return;
}
- dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
/* set link width speed control register */
val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
@@ -854,30 +851,30 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
- dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
/* setup RC BARs */
- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
- dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
- dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
+ dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
/* setup bus numbers */
val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
- dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
+ dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
- dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index c8e5bc647f49..a567ea288ee2 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -54,9 +54,8 @@ struct pcie_port {
};
struct pcie_host_ops {
- u32 (*readl_rc)(struct pcie_port *pp, void __iomem *dbi_base);
- void (*writel_rc)(struct pcie_port *pp,
- u32 val, void __iomem *dbi_base);
+ u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
+ void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
@@ -73,6 +72,8 @@ struct pcie_host_ops {
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 7ee9dfcc45fb..56154c25980c 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -22,51 +22,38 @@
#include "pcie-designware.h"
-#define PCIE_LTSSM_LINKUP_STATE 0x11
-#define PCIE_LTSSM_STATE_MASK 0x3F
-#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
-#define PCIE_SYS_STATE4 0x31c
-#define PCIE_HIP06_CTRL_OFF 0x1000
+#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
+#define PCIE_HIP06_CTRL_OFF 0x1000
+#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
+#define PCIE_LTSSM_LINKUP_STATE 0x11
+#define PCIE_LTSSM_STATE_MASK 0x3F
#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
struct hisi_pcie;
struct pcie_soc_ops {
- int (*hisi_pcie_link_up)(struct hisi_pcie *pcie);
+ int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
};
struct hisi_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */
struct regmap *subctrl;
- void __iomem *reg_base;
u32 port_id;
- struct pcie_port pp;
struct pcie_soc_ops *soc_ops;
};
-static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
- u32 val, u32 reg)
-{
- writel(val, pcie->reg_base + reg);
-}
-
-static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
-{
- return readl(pcie->reg_base + reg);
-}
-
/* HipXX PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
u32 *val)
{
u32 reg;
u32 reg_val;
- struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
if (size == 1)
*val = *(u8 __force *) walker;
@@ -86,21 +73,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
{
u32 reg_val;
u32 reg;
- struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
if (size == 4)
- hisi_pcie_apb_writel(pcie, val, reg);
+ dw_pcie_writel_rc(pp, reg, val);
else if (size == 2) {
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
*(u16 __force *) walker = val;
- hisi_pcie_apb_writel(pcie, reg_val, reg);
+ dw_pcie_writel_rc(pp, reg, reg_val);
} else if (size == 1) {
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
*(u8 __force *) walker = val;
- hisi_pcie_apb_writel(pcie, reg_val, reg);
+ dw_pcie_writel_rc(pp, reg, reg_val);
} else
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -119,10 +105,10 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
{
+ struct pcie_port *pp = &hisi_pcie->pp;
u32 val;
- val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF +
- PCIE_SYS_STATE4);
+ val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4);
return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
}
@@ -140,19 +126,20 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
.link_up = hisi_pcie_link_up,
};
-static int hisi_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
+ struct platform_device *pdev)
{
+ struct pcie_port *pp = &hisi_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
u32 port_id;
- struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
- if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
- dev_err(&pdev->dev, "failed to read port-id\n");
+ if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
+ dev_err(dev, "failed to read port-id\n");
return -EINVAL;
}
if (port_id > 3) {
- dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
+ dev_err(dev, "Invalid port-id: %d\n", port_id);
return -EINVAL;
}
hisi_pcie->port_id = port_id;
@@ -161,7 +148,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -170,6 +157,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
static int hisi_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct hisi_pcie *hisi_pcie;
struct pcie_port *pp;
const struct of_device_id *match;
@@ -177,40 +165,36 @@ static int hisi_pcie_probe(struct platform_device *pdev)
struct device_driver *driver;
int ret;
- hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
+ hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
if (!hisi_pcie)
return -ENOMEM;
pp = &hisi_pcie->pp;
- pp->dev = &pdev->dev;
- driver = (pdev->dev).driver;
+ pp->dev = dev;
+ driver = dev->driver;
- match = of_match_device(driver->of_match_table, &pdev->dev);
+ match = of_match_device(driver->of_match_table, dev);
hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
hisi_pcie->subctrl =
syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
if (IS_ERR(hisi_pcie->subctrl)) {
- dev_err(pp->dev, "cannot get subctrl base\n");
+ dev_err(dev, "cannot get subctrl base\n");
return PTR_ERR(hisi_pcie->subctrl);
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
- if (IS_ERR(hisi_pcie->reg_base)) {
- dev_err(pp->dev, "cannot get rc_dbi base\n");
- return PTR_ERR(hisi_pcie->reg_base);
+ pp->dbi_base = devm_ioremap_resource(dev, reg);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "cannot get rc_dbi base\n");
+ return PTR_ERR(pp->dbi_base);
}
- hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
-
- ret = hisi_add_pcie_port(pp, pdev);
+ ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
- platform_set_drvdata(pdev, hisi_pcie);
-
- dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+ dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return 0;
}
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 0d7bee4a0d26..8ce089043a27 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
+ struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(res);
struct resource res_mem;
int ret;
- pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &bdev->dev;
- bcma_set_drvdata(bdev, pcie);
+ pcie->dev = dev;
pcie->base = bdev->io_addr;
+ if (!pcie->base) {
+ dev_err(dev, "no controller registers\n");
+ return -ENOMEM;
+ }
+
pcie->base_addr = bdev->addr;
res_mem.start = bdev->addr_s[0];
@@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
- dev_err(pcie->dev, "PCIe controller setup failed\n");
+ dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
+ bcma_set_drvdata(bdev, pcie);
return ret;
}
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 1738c5288eb6..a3de087976b3 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct iproc_pcie *pcie;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(res);
int ret;
- of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
+ of_id = of_match_device(iproc_pcie_of_match_table, dev);
if (!of_id)
return -EINVAL;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
pcie->type = (enum iproc_pcie_type)of_id->data;
- platform_set_drvdata(pdev, pcie);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
- dev_err(pcie->dev, "unable to obtain controller resources\n");
+ dev_err(dev, "unable to obtain controller resources\n");
return ret;
}
- pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
+ pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
if (!pcie->base) {
- dev_err(pcie->dev, "unable to map controller registers\n");
+ dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
}
pcie->base_addr = reg.start;
@@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
&val);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"missing brcm,pcie-ob-axi-offset property\n");
return ret;
}
@@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
&val);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"missing brcm,pcie-ob-window-size property\n");
return ret;
}
@@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
}
/* PHY use is optional */
- pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
+ pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"unable to get PCI host bridge resources\n");
return ret;
}
@@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
- dev_err(pcie->dev, "PCIe controller setup failed\n");
+ dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
+ platform_set_drvdata(pdev, pcie);
return ret;
}
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index e167b2f0098d..0b999a9fb843 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -63,6 +63,8 @@
#define OARR_SIZE_CFG_SHIFT 1
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
+#define PCI_EXP_CAP 0xac
+
#define MAX_NUM_OB_WINDOWS 2
#define IPROC_PCIE_REG_INVALID 0xffff
@@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
+ struct device *dev = pcie->dev;
u8 hdr_type;
u32 link_ctrl, class, val;
- u16 pos, link_status;
+ u16 pos = PCI_EXP_CAP, link_status;
bool link_is_active = false;
/*
@@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
- dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
+ dev_err(dev, "PHY or data link is INACTIVE!\n");
return -ENODEV;
}
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
- dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
+ dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
- pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
-#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
pci_bus_read_config_dword(bus, 0,
- PCI_LINK_STATUS_CTRL_2_OFFSET,
+ pos + PCI_EXP_LNKCTL2,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
pci_bus_write_config_dword(bus, 0,
- PCI_LINK_STATUS_CTRL_2_OFFSET,
+ pos + PCI_EXP_LNKCTL2,
link_ctrl);
msleep(100);
- pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
@@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
}
- dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+ dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
return link_is_active ? 0 : -ENODEV;
}
@@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
u64 pci_addr, resource_size_t size)
{
struct iproc_pcie_ob *ob = &pcie->ob;
+ struct device *dev = pcie->dev;
unsigned i;
u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
u64 remainder;
if (size > max_size) {
- dev_err(pcie->dev,
+ dev_err(dev,
"res size %pap exceeds max supported size 0x%llx\n",
&size, max_size);
return -EINVAL;
@@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
div64_u64_rem(size, ob->window_size, &remainder);
if (remainder) {
- dev_err(pcie->dev,
+ dev_err(dev,
"res size %pap needs to be multiple of window size %pap\n",
&size, &ob->window_size);
return -EINVAL;
}
if (axi_addr < ob->axi_offset) {
- dev_err(pcie->dev,
- "axi address %pap less than offset %pap\n",
+ dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset);
return -EINVAL;
}
@@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
struct list_head *resources)
{
+ struct device *dev = pcie->dev;
struct resource_entry *window;
int ret;
@@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return ret;
break;
default:
- dev_err(pcie->dev, "invalid resource %pR\n", res);
+ dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
@@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
+ struct device *dev;
int ret;
void *sysdata;
struct pci_bus *bus;
- if (!pcie || !pcie->dev || !pcie->base)
- return -EINVAL;
-
- ret = devm_request_pci_bus_resources(pcie->dev, res);
+ dev = pcie->dev;
+ ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
- dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+ dev_err(dev, "unable to initialize PCIe PHY\n");
return ret;
}
ret = phy_power_on(pcie->phy);
if (ret) {
- dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+ dev_err(dev, "unable to power on PCIe PHY\n");
goto err_exit_phy;
}
@@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pcie->reg_offsets = iproc_pcie_reg_paxc;
break;
default:
- dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
+ dev_err(dev, "incompatible iProc PCIe interface\n");
ret = -EINVAL;
goto err_power_off_phy;
}
@@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
- dev_err(pcie->dev, "map failed\n");
+ dev_err(dev, "map failed\n");
goto err_power_off_phy;
}
}
@@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
- bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
+ bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
- dev_err(pcie->dev, "unable to create PCI root bus\n");
+ dev_err(dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
goto err_power_off_phy;
}
@@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
ret = iproc_pcie_check_link(pcie, bus);
if (ret) {
- dev_err(pcie->dev, "no PCIe EP device detected\n");
+ dev_err(dev, "no PCIe EP device detected\n");
goto err_rm_root_bus;
}
@@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (IS_ENABLED(CONFIG_PCI_MSI))
if (iproc_pcie_msi_enable(pcie))
- dev_info(pcie->dev, "not using iProc MSI\n");
+ dev_info(dev, "not using iProc MSI\n");
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 5ec2d440a6b7..35936409b2d4 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -86,12 +86,10 @@ struct qcom_pcie_ops {
};
struct qcom_pcie {
- struct pcie_port pp;
- struct device *dev;
+ struct pcie_port pp; /* pp.dbi_base is DT dbi */
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
union qcom_pcie_resources res;
- void __iomem *parf;
- void __iomem *dbi;
- void __iomem *elbi;
struct phy *phy;
struct gpio_desc *reset;
struct qcom_pcie_ops *ops;
@@ -136,7 +134,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@@ -188,7 +186,7 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@@ -237,7 +235,7 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
u32 val;
int ret;
@@ -359,7 +357,7 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
int ret;
ret = reset_control_deassert(res->core);
@@ -426,7 +424,7 @@ err_res:
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
- u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
+ u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
@@ -509,8 +507,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ pp = &pcie->pp;
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
- pcie->dev = dev;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
if (IS_ERR(pcie->reset))
@@ -522,9 +520,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pcie->dbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->dbi))
- return PTR_ERR(pcie->dbi);
+ pp->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
@@ -535,13 +533,11 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
+ pp->dev = dev;
ret = pcie->ops->get_resources(pcie);
if (ret)
return ret;
- pp = &pcie->pp;
- pp->dev = dev;
- pp->dbi_base = pcie->dbi;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
@@ -569,8 +565,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, pcie);
-
return 0;
}
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index e06b1d3b4dea..62700d1896f4 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -31,8 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#define DRV_NAME "rcar-pcie"
-
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE (1 << 31)
@@ -397,6 +395,7 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
unsigned int timeout = 1000;
u32 macsr;
@@ -404,7 +403,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
return;
if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
- dev_err(pcie->dev, "Speed change already in progress\n");
+ dev_err(dev, "Speed change already in progress\n");
return;
}
@@ -433,7 +432,7 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
rcar_pci_write_reg(pcie, macsr, MACSR);
if (macsr & SPCHGFAIL)
- dev_err(pcie->dev, "Speed change failed\n");
+ dev_err(dev, "Speed change failed\n");
goto done;
}
@@ -441,15 +440,16 @@ static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
msleep(1);
};
- dev_err(pcie->dev, "Speed change timed out\n");
+ dev_err(dev, "Speed change timed out\n");
done:
- dev_info(pcie->dev, "Current link speed is %s GT/s\n",
+ dev_info(dev, "Current link speed is %s GT/s\n",
(macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
}
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct pci_bus *bus, *child;
LIST_HEAD(res);
@@ -461,14 +461,14 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
- bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
+ bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
else
- bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
+ bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res);
if (!bus) {
- dev_err(pcie->dev, "Scanning rootbus failed");
+ dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
}
@@ -487,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
static int phy_wait_for_ack(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
unsigned int timeout = 100;
while (timeout--) {
@@ -496,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
udelay(100);
}
- dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+ dev_err(dev, "Access to PCIe phy timed out\n");
return -ETIMEDOUT;
}
@@ -697,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie *pcie = data;
struct rcar_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
unsigned long reg;
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
@@ -717,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
- dev_info(pcie->dev, "unhandled MSI\n");
+ dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
- dev_dbg(pcie->dev, "unexpected MSI\n");
+ dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@@ -843,22 +845,22 @@ static const struct irq_domain_ops msi_domain_ops = {
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
int err, i;
mutex_init(&msi->lock);
- msi->chip.dev = pcie->dev;
+ msi->chip.dev = dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
- msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
- dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -866,19 +868,19 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
irq_create_mapping(msi->domain, i);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
- err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
- err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+ err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@@ -899,32 +901,32 @@ err:
return err;
}
-static int rcar_pcie_get_resources(struct platform_device *pdev,
- struct rcar_pcie *pcie)
+static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct resource res;
int err, i;
- err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
- pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+ pcie->base = devm_ioremap_resource(dev, &res);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(pcie->clk)) {
- dev_err(pcie->dev, "cannot get platform clock\n");
+ dev_err(dev, "cannot get platform clock\n");
return PTR_ERR(pcie->clk);
}
err = clk_prepare_enable(pcie->clk);
if (err)
return err;
- pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
- dev_err(pcie->dev, "cannot get pcie bus clock\n");
+ dev_err(dev, "cannot get pcie bus clock\n");
err = PTR_ERR(pcie->bus_clk);
goto fail_clk;
}
@@ -932,17 +934,17 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
if (err)
goto fail_clk;
- i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
- dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq1 = i;
- i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ i = irq_of_parse_and_map(dev->of_node, 1);
if (!i) {
- dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
@@ -1119,60 +1121,60 @@ out_release_res:
static int rcar_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
const struct of_device_id *of_id;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
- platform_set_drvdata(pdev, pcie);
+ pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
rcar_pcie_parse_request_of_pci_ranges(pcie);
- err = rcar_pcie_get_resources(pdev, pcie);
+ err = rcar_pcie_get_resources(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+ err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
return err;
- of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+ of_id = of_match_device(rcar_pcie_of_match, dev);
if (!of_id || !of_id->data)
return -EINVAL;
hw_init_fn = of_id->data;
- pm_runtime_enable(pcie->dev);
- err = pm_runtime_get_sync(pcie->dev);
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
if (err < 0) {
- dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_pm_disable;
}
/* Failure to get a link might just be that no cards are inserted */
err = hw_init_fn(pcie);
if (err) {
- dev_info(&pdev->dev, "PCIe link down\n");
+ dev_info(dev, "PCIe link down\n");
err = 0;
goto err_pm_put;
}
data = rcar_pci_read_reg(pcie, MACSR);
- dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = rcar_pcie_enable_msi(pcie);
if (err < 0) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto err_pm_put;
@@ -1186,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
err_pm_put:
- pm_runtime_put(pcie->dev);
+ pm_runtime_put(dev);
err_pm_disable:
- pm_runtime_disable(pcie->dev);
+ pm_runtime_disable(dev);
return err;
}
static struct platform_driver rcar_pcie_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index b8c82fc812dc..e04f69beb42d 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -190,6 +190,9 @@ struct rockchip_pcie {
struct reset_control *mgmt_rst;
struct reset_control *mgmt_sticky_rst;
struct reset_control *pipe_rst;
+ struct reset_control *pm_rst;
+ struct reset_control *aclk_rst;
+ struct reset_control *pclk_rst;
struct clk *aclk_pcie;
struct clk *aclk_perf_pcie;
struct clk *hclk_pcie;
@@ -408,6 +411,44 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
gpiod_set_value(rockchip->ep_gpio, 0);
+ err = reset_control_assert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "assert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "assert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "assert pm_rst err %d\n", err);
+ return err;
+ }
+
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -781,6 +822,27 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
return PTR_ERR(rockchip->pipe_rst);
}
+ rockchip->pm_rst = devm_reset_control_get(dev, "pm");
+ if (IS_ERR(rockchip->pm_rst)) {
+ if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pm reset property in node\n");
+ return PTR_ERR(rockchip->pm_rst);
+ }
+
+ rockchip->pclk_rst = devm_reset_control_get(dev, "pclk");
+ if (IS_ERR(rockchip->pclk_rst)) {
+ if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pclk reset property in node\n");
+ return PTR_ERR(rockchip->pclk_rst);
+ }
+
+ rockchip->aclk_rst = devm_reset_control_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_rst)) {
+ if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing aclk reset property in node\n");
+ return PTR_ERR(rockchip->aclk_rst);
+ }
+
rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
if (IS_ERR(rockchip->ep_gpio)) {
dev_err(dev, "missing ep-gpios property in node\n");
@@ -972,7 +1034,7 @@ static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
return -EINVAL;
if (region_no == 0) {
if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
- return -EINVAL;
+ return -EINVAL;
}
if (region_no != 0) {
if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
@@ -1091,8 +1153,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err)
goto err_vpcie;
- platform_set_drvdata(pdev, rockchip);
-
rockchip_pcie_enable_interrupts(rockchip);
err = rockchip_pcie_init_irq_domain(rockchip);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 09aed85f275a..3cf197ba7f37 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -25,10 +25,10 @@
#include "pcie-designware.h"
struct spear13xx_pcie {
+ struct pcie_port pp; /* DT dbi is pp.dbi_base */
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- struct pcie_port pp;
bool is_gen1;
};
@@ -57,96 +57,26 @@ struct pcie_app_reg {
};
/* CR0 ID */
-#define RX_LANE_FLIP_EN_ID 0
-#define TX_LANE_FLIP_EN_ID 1
-#define SYS_AUX_PWR_DET_ID 2
#define APP_LTSSM_ENABLE_ID 3
-#define SYS_ATTEN_BUTTON_PRESSED_ID 4
-#define SYS_MRL_SENSOR_STATE_ID 5
-#define SYS_PWR_FAULT_DET_ID 6
-#define SYS_MRL_SENSOR_CHGED_ID 7
-#define SYS_PRE_DET_CHGED_ID 8
-#define SYS_CMD_CPLED_INT_ID 9
-#define APP_INIT_RST_0_ID 11
-#define APP_REQ_ENTR_L1_ID 12
-#define APP_READY_ENTR_L23_ID 13
-#define APP_REQ_EXIT_L1_ID 14
-#define DEVICE_TYPE_EP (0 << 25)
-#define DEVICE_TYPE_LEP (1 << 25)
#define DEVICE_TYPE_RC (4 << 25)
-#define SYS_INT_ID 29
#define MISCTRL_EN_ID 30
#define REG_TRANSLATION_ENABLE 31
-/* CR1 ID */
-#define APPS_PM_XMT_TURNOFF_ID 2
-#define APPS_PM_XMT_PME_ID 5
-
/* CR3 ID */
-#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
-#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
-#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
-#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
-#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
-#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
-#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
-#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
-#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
-#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
-#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
-#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
-#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
-#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
-#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
-#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
-#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
-#define XMLH_LTSSM_STATE_L0 0x11
-#define XMLH_LTSSM_STATE_L0S 0x12
-#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
-#define XMLH_LTSSM_STATE_L1_IDLE 0x14
-#define XMLH_LTSSM_STATE_L2_IDLE 0x15
-#define XMLH_LTSSM_STATE_L2_WAKE 0x16
-#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
-#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
-#define XMLH_LTSSM_STATE_DISABLED 0x19
-#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
-#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
-#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
-#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
-#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
-#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
-#define XMLH_LTSSM_STATE_MASK 0x3F
#define XMLH_LINK_UP (1 << 6)
-/* CR4 ID */
-#define CFG_MSI_EN_ID 18
-
/* CR6 */
-#define INTA_CTRL_INT (1 << 7)
-#define INTB_CTRL_INT (1 << 8)
-#define INTC_CTRL_INT (1 << 9)
-#define INTD_CTRL_INT (1 << 10)
#define MSI_CTRL_INT (1 << 26)
-/* CR19 ID */
-#define VEN_MSI_REQ_ID 11
-#define VEN_MSI_FUN_NUM_ID 8
-#define VEN_MSI_TC_ID 5
-#define VEN_MSI_VECTOR_ID 0
-#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
-#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
-#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
-#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
-
#define EXP_CAP_ID_OFFSET 0x70
#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
-static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
{
- u32 val;
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ u32 val;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pp)) {
@@ -203,9 +133,9 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_port *pp = &spear13xx_pcie->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -220,9 +150,9 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
@@ -246,8 +176,10 @@ static int spear13xx_pcie_link_up(struct pcie_port *pp)
static void spear13xx_pcie_host_init(struct pcie_port *pp)
{
- spear13xx_pcie_establish_link(pp);
- spear13xx_pcie_enable_interrupts(pp);
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+
+ spear13xx_pcie_establish_link(spear13xx_pcie);
+ spear13xx_pcie_enable_interrupts(spear13xx_pcie);
}
static struct pcie_host_ops spear13xx_pcie_host_ops = {
@@ -255,10 +187,11 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
.host_init = spear13xx_pcie_host_init,
};
-static int spear13xx_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+ struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
+ struct pcie_port *pp = &spear13xx_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 0);
@@ -268,7 +201,7 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "spear1340-pcie", pp);
+ "spear1340-pcie", spear13xx_pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@@ -288,10 +221,10 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
static int spear13xx_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct spear13xx_pcie *spear13xx_pcie;
struct pcie_port *pp;
- struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource *dbi_base;
int ret;
@@ -323,7 +256,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
pp = &spear13xx_pcie->pp;
-
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
@@ -338,7 +270,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "st,pcie-is-gen1"))
spear13xx_pcie->is_gen1 = true;
- ret = spear13xx_add_pcie_port(pp, pdev);
+ ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
if (ret < 0)
goto fail_clk;
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 67eae4179290..43eaa4afab94 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -212,6 +212,7 @@ static bool nwl_phy_link_up(struct nwl_pcie *pcie)
static int nwl_wait_for_link(struct nwl_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int retries;
/* check if the link is up or not */
@@ -221,7 +222,7 @@ static int nwl_wait_for_link(struct nwl_pcie *pcie)
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(pcie->dev, "PHY link never came up\n");
+ dev_err(dev, "PHY link never came up\n");
return -ETIMEDOUT;
}
@@ -277,6 +278,7 @@ static struct pci_ops nwl_pcie_ops = {
static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
{
struct nwl_pcie *pcie = data;
+ struct device *dev = pcie->dev;
u32 misc_stat;
/* Checking for misc interrupts */
@@ -286,45 +288,43 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(pcie->dev, "Received Message FIFO Overflow\n");
+ dev_err(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(pcie->dev, "Slave error\n");
+ dev_err(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(pcie->dev, "Master error\n");
+ dev_err(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(pcie->dev,
- "In Misc Ingress address translation error\n");
+ dev_err(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(pcie->dev,
- "In Misc Egress address translation error\n");
+ dev_err(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(pcie->dev, "Fatal Error in AER Capability\n");
+ dev_err(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(pcie->dev, "Non-Fatal Error in AER Capability\n");
+ dev_err(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(pcie->dev, "Correctable Error in AER Capability\n");
+ dev_err(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(pcie->dev, "Unsupported request Detected\n");
+ dev_err(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(pcie->dev, "Non-Fatal Error Detected\n");
+ dev_err(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(pcie->dev, "Fatal Error Detected\n");
+ dev_err(dev, "Fatal Error Detected\n");
if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
- dev_info(pcie->dev, "Link Autonomous Bandwidth Management Status bit set\n");
+ dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
- dev_info(pcie->dev, "Link Bandwidth Management Status bit set\n");
+ dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
@@ -494,20 +494,21 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
- struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
+ struct device *dev = pcie->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
&dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
- dev_err(pcie->dev, "failed to create dev IRQ domain\n");
+ dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
&nwl_msi_domain_info,
msi->dev_domain);
if (!msi->msi_domain) {
- dev_err(pcie->dev, "failed to create msi IRQ domain\n");
+ dev_err(dev, "failed to create msi IRQ domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -517,12 +518,13 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
- struct device_node *node = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
struct device_node *legacy_intc_node;
legacy_intc_node = of_get_next_child(node, NULL);
if (!legacy_intc_node) {
- dev_err(pcie->dev, "No legacy intc node found\n");
+ dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
@@ -532,7 +534,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
pcie);
if (!pcie->legacy_irq_domain) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -542,7 +544,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
@@ -557,7 +560,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
if (msi->irq_msi1 < 0) {
- dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
+ dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
ret = -EINVAL;
goto err;
}
@@ -568,7 +571,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
if (msi->irq_msi0 < 0) {
- dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
+ dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
ret = -EINVAL;
goto err;
}
@@ -579,7 +582,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Check for msii_present bit */
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
- dev_err(pcie->dev, "MSI not present\n");
+ dev_err(dev, "MSI not present\n");
ret = -EIO;
goto err;
}
@@ -628,13 +631,14 @@ err:
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
u32 breg_val, ecam_val, first_busno = 0;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
if (!breg_val) {
- dev_err(pcie->dev, "BREG is not present\n");
+ dev_err(dev, "BREG is not present\n");
return breg_val;
}
@@ -665,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
if (!ecam_val) {
- dev_err(pcie->dev, "ECAM is not present\n");
+ dev_err(dev, "ECAM is not present\n");
return ecam_val;
}
@@ -692,23 +696,23 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
if (nwl_pcie_link_up(pcie))
- dev_info(pcie->dev, "Link is UP\n");
+ dev_info(dev, "Link is UP\n");
else
- dev_info(pcie->dev, "Link is DOWN\n");
+ dev_info(dev, "Link is DOWN\n");
/* Get misc IRQ number */
pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
if (pcie->irq_misc < 0) {
- dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
+ dev_err(dev, "failed to get misc IRQ %d\n",
pcie->irq_misc);
return -EINVAL;
}
- err = devm_request_irq(pcie->dev, pcie->irq_misc,
+ err = devm_request_irq(dev, pcie->irq_misc,
nwl_pcie_misc_handler, IRQF_SHARED,
"nwl_pcie:misc", pcie);
if (err) {
- dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
+ dev_err(dev, "fail to register misc IRQ#%d\n",
pcie->irq_misc);
return err;
}
@@ -744,31 +748,32 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
- struct device_node *node = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
struct resource *res;
const char *type;
/* Check for device type */
type = of_get_property(node, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
- dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
- pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->breg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->breg_base))
return PTR_ERR(pcie->breg_base);
pcie->phys_breg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
- pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->pcireg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->pcireg_base))
return PTR_ERR(pcie->pcireg_base);
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->ecam_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
@@ -776,8 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
/* Get intx IRQ number */
pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
if (pcie->irq_intx < 0) {
- dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
- pcie->irq_intx);
+ dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
return -EINVAL;
}
@@ -794,7 +798,8 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -802,42 +807,42 @@ static int nwl_pcie_probe(struct platform_device *pdev)
resource_size_t iobase = 0;
LIST_HEAD(res);
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
- dev_err(pcie->dev, "Parsing DT failed\n");
+ dev_err(dev, "Parsing DT failed\n");
return err;
}
err = nwl_pcie_bridge_init(pcie);
if (err) {
- dev_err(pcie->dev, "HW Initialization failed\n");
+ dev_err(dev, "HW Initialization failed\n");
return err;
}
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
- dev_err(pcie->dev, "Getting bridge resources failed\n");
+ dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(pcie->dev, &res);
+ err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
- dev_err(pcie->dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
}
- bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
+ bus = pci_create_root_bus(dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
if (!bus) {
err = -ENOMEM;
@@ -847,8 +852,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
- dev_err(&pdev->dev,
- "failed to enable MSI support: %d\n", err);
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
}
}
@@ -857,7 +861,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
- platform_set_drvdata(pdev, pcie);
return 0;
error:
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index be568039d9d0..c8616fadccf1 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -140,10 +140,11 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
+ struct device *dev = port->dev;
unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
- dev_dbg(port->dev, "Requester ID %lu\n",
+ dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
@@ -228,11 +229,10 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
/**
* xilinx_pcie_assign_msi - Allocate MSI number
- * @port: PCIe port structure
*
* Return: A valid IRQ on success and error value on failure.
*/
-static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_assign_msi(void)
{
int pos;
@@ -275,7 +275,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
struct msi_msg msg;
phys_addr_t msg_addr;
- hwirq = xilinx_pcie_assign_msi(port);
+ hwirq = xilinx_pcie_assign_msi();
if (hwirq < 0)
return hwirq;
@@ -383,6 +383,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ struct device *dev = port->dev;
u32 val, mask, status, msi_data;
/* Read interrupt decode and mask registers */
@@ -394,32 +395,32 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_NONE;
if (status & XILINX_PCIE_INTR_LINK_DOWN)
- dev_warn(port->dev, "Link Down\n");
+ dev_warn(dev, "Link Down\n");
if (status & XILINX_PCIE_INTR_ECRC_ERR)
- dev_warn(port->dev, "ECRC failed\n");
+ dev_warn(dev, "ECRC failed\n");
if (status & XILINX_PCIE_INTR_STR_ERR)
- dev_warn(port->dev, "Streaming error\n");
+ dev_warn(dev, "Streaming error\n");
if (status & XILINX_PCIE_INTR_HOT_RESET)
- dev_info(port->dev, "Hot reset\n");
+ dev_info(dev, "Hot reset\n");
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
- dev_warn(port->dev, "ECAM access timeout\n");
+ dev_warn(dev, "ECAM access timeout\n");
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
- dev_warn(port->dev, "Correctable error message\n");
+ dev_warn(dev, "Correctable error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
- dev_warn(port->dev, "Non fatal error message\n");
+ dev_warn(dev, "Non fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_FATAL) {
- dev_warn(port->dev, "Fatal error message\n");
+ dev_warn(dev, "Fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
@@ -429,7 +430,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
- dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
@@ -451,7 +452,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
- dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ dev_warn(dev, "RP Intr FIFO1 read error\n");
goto error;
}
@@ -471,31 +472,31 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
- dev_warn(port->dev, "Slave unsupported request\n");
+ dev_warn(dev, "Slave unsupported request\n");
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
- dev_warn(port->dev, "Slave unexpected completion\n");
+ dev_warn(dev, "Slave unexpected completion\n");
if (status & XILINX_PCIE_INTR_SLV_COMPL)
- dev_warn(port->dev, "Slave completion timeout\n");
+ dev_warn(dev, "Slave completion timeout\n");
if (status & XILINX_PCIE_INTR_SLV_ERRP)
- dev_warn(port->dev, "Slave Error Poison\n");
+ dev_warn(dev, "Slave Error Poison\n");
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
- dev_warn(port->dev, "Slave Completer Abort\n");
+ dev_warn(dev, "Slave Completer Abort\n");
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
- dev_warn(port->dev, "Slave Illegal Burst\n");
+ dev_warn(dev, "Slave Illegal Burst\n");
if (status & XILINX_PCIE_INTR_MST_DECERR)
- dev_warn(port->dev, "Master decode error\n");
+ dev_warn(dev, "Master decode error\n");
if (status & XILINX_PCIE_INTR_MST_SLVERR)
- dev_warn(port->dev, "Master slave error\n");
+ dev_warn(dev, "Master slave error\n");
if (status & XILINX_PCIE_INTR_MST_ERRP)
- dev_warn(port->dev, "Master error poison\n");
+ dev_warn(dev, "Master error poison\n");
error:
/* Clear the Interrupt Decode register */
@@ -554,10 +555,12 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
+ struct device *dev = port->dev;
+
if (xilinx_pcie_link_is_up(port))
- dev_info(port->dev, "PCIe Link is UP\n");
+ dev_info(dev, "PCIe Link is UP\n");
else
- dev_info(port->dev, "PCIe Link is DOWN\n");
+ dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
@@ -627,8 +630,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
*/
static int xilinx_pcie_probe(struct platform_device *pdev)
{
- struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
+ struct xilinx_pcie_port *port;
struct pci_bus *bus;
int err;
resource_size_t iobase = 0;
@@ -668,15 +671,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (err)
goto error;
- bus = pci_create_root_bus(&pdev->dev, 0,
- &xilinx_pcie_ops, port, &res);
+ bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
if (!bus) {
err = -ENOMEM;
goto error;
}
#ifdef CONFIG_PCI_MSI
- xilinx_pcie_msi_chip.dev = port->dev;
+ xilinx_pcie_msi_chip.dev = dev;
bus->msi = &xilinx_pcie_msi_chip;
#endif
pci_scan_child_bus(bus);
@@ -685,8 +687,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_bus_add_devices(bus);
- platform_set_drvdata(pdev, port);
-
return 0;
error:
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index e6245b03f0a1..56efaf72d08e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -22,6 +22,12 @@
#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+struct pnv_php_event {
+ bool added;
+ struct pnv_php_slot *php_slot;
+ struct work_struct work;
+};
+
static LIST_HEAD(pnv_php_slot_list);
static DEFINE_SPINLOCK(pnv_php_lock);
@@ -29,12 +35,40 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 ctrl;
+
+ if (php_slot->irq > 0) {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl &= ~(PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ free_irq(php_slot->irq, php_slot);
+ php_slot->irq = 0;
+ }
+
+ if (php_slot->wq) {
+ destroy_workqueue(php_slot->wq);
+ php_slot->wq = NULL;
+ }
+
+ if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
+ else if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
static void pnv_php_free_slot(struct kref *kref)
{
struct pnv_php_slot *php_slot = container_of(kref,
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
+ pnv_php_disable_irq(php_slot);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -122,7 +156,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent)
of_node_put(dn);
refcount = atomic_read(&dn->kobj.kref.refcount);
- if (unlikely(refcount != 1))
+ if (refcount != 1)
pr_warn("Invalid refcount %d on <%s>\n",
refcount, of_node_full_name(dn));
@@ -184,11 +218,11 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
ret = pnv_php_populate_changeset(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
}
@@ -201,7 +235,7 @@ static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
struct pci_dn *pdn;
pdn = pci_add_device_node_info(hose, dn);
- if (unlikely(!pdn))
+ if (!pdn)
return ERR_PTR(-ENOMEM);
return NULL;
@@ -224,21 +258,21 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
* fits the real size.
*/
fdt1 = kzalloc(0x10000, GFP_KERNEL);
- if (unlikely(!fdt1)) {
+ if (!fdt1) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
goto out;
}
ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n",
ret);
goto free_fdt1;
}
fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
- if (unlikely(!fdt)) {
+ if (!fdt) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
fdt_totalsize(fdt1));
@@ -248,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
/* Unflatten device tree blob */
memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
- if (unlikely(!dt)) {
+ if (!dt) {
ret = -EINVAL;
dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n");
goto free_fdt;
@@ -258,7 +292,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
of_changeset_init(&php_slot->ocs);
pnv_php_reverse_nodes(php_slot->dn);
ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
- if (unlikely(ret)) {
+ if (ret) {
pnv_php_reverse_nodes(php_slot->dn);
dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n",
ret);
@@ -267,7 +301,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
php_slot->dn->child = NULL;
ret = of_changeset_apply(&php_slot->ocs);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n",
ret);
goto destroy_changeset;
@@ -301,7 +335,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
int ret;
ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
- if (likely(ret > 0)) {
+ if (ret > 0) {
if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
be64_to_cpu(msg.params[2]) != state ||
be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
@@ -311,7 +345,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
be64_to_cpu(msg.params[3]));
return -ENOMSG;
}
- } else if (unlikely(ret < 0)) {
+ } else if (ret < 0) {
dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n",
ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
return ret;
@@ -338,7 +372,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
* be on.
*/
ret = pnv_pci_get_power_state(php_slot->id, &power_state);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n",
ret);
} else {
@@ -360,7 +394,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
* get that, it will fail back to be empty.
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
- if (likely(ret >= 0)) {
+ if (ret >= 0) {
*state = presence;
slot->info->adapter_status = presence;
ret = 0;
@@ -393,7 +427,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Retrieve slot presence status */
ret = pnv_php_get_adapter_state(slot, &presence);
- if (unlikely(ret))
+ if (ret)
return ret;
/* Proceed if there have nothing behind the slot */
@@ -414,7 +448,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
php_slot->power_state_check = true;
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status != OPAL_PCI_SLOT_POWER_ON)
@@ -423,7 +457,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Check the power status. Scan the slot if it is already on */
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status == OPAL_PCI_SLOT_POWER_ON)
@@ -431,7 +465,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Power is off, turn it on and then scan the slot */
ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
- if (unlikely(ret))
+ if (ret)
return ret;
scan:
@@ -513,29 +547,30 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
struct pci_bus *bus;
const char *label;
uint64_t id;
+ int ret;
- label = of_get_property(dn, "ibm,slot-label", NULL);
- if (unlikely(!label))
+ ret = of_property_read_string(dn, "ibm,slot-label", &label);
+ if (ret)
return NULL;
if (pnv_pci_get_slot_id(dn, &id))
return NULL;
bus = pci_find_bus_by_node(dn);
- if (unlikely(!bus))
+ if (!bus)
return NULL;
php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL);
- if (unlikely(!php_slot))
+ if (!php_slot)
return NULL;
php_slot->name = kstrdup(label, GFP_KERNEL);
- if (unlikely(!php_slot->name)) {
+ if (!php_slot->name) {
kfree(php_slot);
return NULL;
}
- if (likely(dn->child && PCI_DN(dn->child)))
+ if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
php_slot->slot_no = -1; /* Placeholder slot */
@@ -567,7 +602,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Check if the slot is registered or not */
parent = pnv_php_find_slot(php_slot->dn);
- if (unlikely(parent)) {
+ if (parent) {
pnv_php_put_slot(parent);
return -EEXIST;
}
@@ -575,7 +610,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Register PCI slot */
ret = pci_hp_register(&php_slot->slot, php_slot->bus,
php_slot->slot_no, php_slot->name);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n",
ret);
return ret;
@@ -609,33 +644,213 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
return 0;
}
+static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct msix_entry entry;
+ int nr_entries, ret;
+ u16 pcie_flag;
+
+ /* Get total number of MSIx entries */
+ nr_entries = pci_msix_vec_count(pdev);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* Check hotplug MSIx entry is in range */
+ pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag);
+ entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9;
+ if (entry.entry >= nr_entries)
+ return -ERANGE;
+
+ /* Enable MSIx */
+ ret = pci_enable_msix_exact(pdev, &entry, 1);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling MSIx\n", ret);
+ return ret;
+ }
+
+ return entry.vector;
+}
+
+static void pnv_php_event_handler(struct work_struct *work)
+{
+ struct pnv_php_event *event =
+ container_of(work, struct pnv_php_event, work);
+ struct pnv_php_slot *php_slot = event->php_slot;
+
+ if (event->added)
+ pnv_php_enable_slot(&php_slot->slot);
+ else
+ pnv_php_disable_slot(&php_slot->slot);
+
+ kfree(event);
+}
+
+static irqreturn_t pnv_php_interrupt(int irq, void *data)
+{
+ struct pnv_php_slot *php_slot = data;
+ struct pci_dev *pchild, *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ struct pnv_php_event *event;
+ u16 sts, lsts;
+ u8 presence;
+ bool added;
+ unsigned long flags;
+ int ret;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+ if (sts & PCI_EXP_SLTSTA_DLLSC) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts);
+ added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
+ } else if (sts & PCI_EXP_SLTSTA_PDC) {
+ ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ if (!ret)
+ return IRQ_HANDLED;
+ added = !!(presence == OPAL_PCI_SLOT_PRESENT);
+ } else {
+ return IRQ_NONE;
+ }
+
+ /* Freeze the removed PE to avoid unexpected error reporting */
+ if (!added) {
+ pchild = list_first_entry_or_null(&php_slot->bus->devices,
+ struct pci_dev, bus_list);
+ edev = pchild ? pci_dev_to_eeh_dev(pchild) : NULL;
+ pe = edev ? edev->pe : NULL;
+ if (pe) {
+ eeh_serialize_lock(&flags);
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_serialize_unlock(flags);
+ eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
+ }
+ }
+
+ /*
+ * The PE is left in frozen state if the event is missed. It's
+ * fine as the PCI devices (PE) aren't functional any more.
+ */
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ dev_warn(&pdev->dev, "PCI slot [%s] missed hotplug event 0x%04x\n",
+ php_slot->name, sts);
+ return IRQ_HANDLED;
+ }
+
+ dev_info(&pdev->dev, "PCI slot [%s] %s (IRQ: %d)\n",
+ php_slot->name, added ? "added" : "removed", irq);
+ INIT_WORK(&event->work, pnv_php_event_handler);
+ event->added = added;
+ event->php_slot = php_slot;
+ queue_work(php_slot->wq, &event->work);
+
+ return IRQ_HANDLED;
+}
+
+static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 sts, ctrl;
+ int ret;
+
+ /* Allocate workqueue */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ dev_warn(&pdev->dev, "Cannot alloc workqueue\n");
+ pnv_php_disable_irq(php_slot);
+ return;
+ }
+
+ /* Clear pending interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+
+ /* Request the interrupt */
+ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
+ php_slot->name, php_slot);
+ if (ret) {
+ pnv_php_disable_irq(php_slot);
+ dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq);
+ return;
+ }
+
+ /* Enable the interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl |= (PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ /* The interrupt is initialized successfully when @irq is valid */
+ php_slot->irq = irq;
+}
+
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ int irq, ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
+ return;
+ }
+
+ pci_set_master(pdev);
+
+ /* Enable MSIx interrupt */
+ irq = pnv_php_enable_msix(php_slot);
+ if (irq > 0) {
+ pnv_php_init_irq(php_slot, irq);
+ return;
+ }
+
+ /*
+ * Use MSI if MSIx doesn't work. Fail back to legacy INTx
+ * if MSI doesn't work either
+ */
+ ret = pci_enable_msi(pdev);
+ if (!ret || pdev->irq) {
+ irq = pdev->irq;
+ pnv_php_init_irq(php_slot, irq);
+ }
+}
+
static int pnv_php_register_one(struct device_node *dn)
{
struct pnv_php_slot *php_slot;
- const __be32 *prop32;
+ u32 prop32;
int ret;
/* Check if it's hotpluggable slot */
- prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,slot-pluggable", &prop32);
+ if (ret || !prop32)
return -ENXIO;
- prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,reset-by-firmware", &prop32);
+ if (ret || !prop32)
return -ENXIO;
php_slot = pnv_php_alloc_slot(dn);
- if (unlikely(!php_slot))
+ if (!php_slot)
return -ENODEV;
ret = pnv_php_register_slot(php_slot);
- if (unlikely(ret))
+ if (ret)
goto free_slot;
ret = pnv_php_enable(php_slot, false);
- if (unlikely(ret))
+ if (ret)
goto unregister_slot;
+ /* Enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(dn, "ibm,slot-surprise-pluggable", &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
return 0;
unregister_slot:
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index bfdd0744b686..ad70507cfb56 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453de562e..c7f3408e3148 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
return intel_mid_pci_set_power_state(pdev, state);
}
+static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ return intel_mid_pci_get_power_state(pdev);
+}
+
static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
{
return PCI_D3hot;
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
static struct pci_platform_pm_ops mid_pci_platform_pm = {
.is_manageable = mid_pci_power_manageable,
.set_state = mid_pci_set_power_state,
+ .get_state = mid_pci_get_power_state,
.choose_state = mid_pci_choose_state,
.sleep_wake = mid_pci_sleep_wake,
.run_wake = mid_pci_run_wake,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index cffc1c095519..c232729f5b1b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -834,6 +834,17 @@ static void quirk_amd_ioapic(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
#endif /* CONFIG_X86_IO_APIC */
+#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
+
+static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
+{
+ /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */
+ if (dev->subsystem_device == 0xa118)
+ dev->sriov->link = dev->devfn;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
+#endif
+
/*
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 66c4d8f42233..9526e341988b 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -121,6 +121,14 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return -EINVAL;
}
+ /*
+ * If we have a shadow copy in RAM, the PCI device doesn't respond
+ * to the shadow range, so we don't need to claim it, and upstream
+ * bridges don't need to route the range to the device.
+ */
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return 0;
+
root = pci_find_parent_resource(dev, res);
if (!root) {
dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index 44cfc4416e54..71ace6910d7e 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -31,13 +31,6 @@ static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
return 0;
}
-static void
-assabet_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
-{
- state->vs_3v = 1; /* Can only apply 3.3V on Assabet. */
- state->vs_Xv = 0;
-}
-
static int
assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
@@ -90,7 +83,7 @@ static void assabet_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
static struct pcmcia_low_level assabet_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = assabet_pcmcia_hw_init,
- .socket_state = assabet_pcmcia_socket_state,
+ .socket_state = soc_common_cf_socket_state,
.configure_socket = assabet_pcmcia_configure_socket,
.socket_suspend = assabet_pcmcia_socket_suspend,
};
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index b3774e5d0396..c3f67363f6a1 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -45,13 +45,6 @@ static void cerf_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
gpio_free(CERF_GPIO_CF_RESET);
}
-static void
-cerf_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
-{
- state->vs_3v = 1;
- state->vs_Xv = 0;
-}
-
static int
cerf_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
@@ -77,7 +70,7 @@ static struct pcmcia_low_level cerf_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = cerf_pcmcia_hw_init,
.hw_shutdown = cerf_pcmcia_hw_shutdown,
- .socket_state = cerf_pcmcia_socket_state,
+ .socket_state = soc_common_cf_socket_state,
.configure_socket = cerf_pcmcia_configure_socket,
};
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index d5ca760c4eb2..b6b316de055c 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -33,6 +33,7 @@
#include <linux/cpufreq.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -42,6 +43,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
@@ -79,6 +81,41 @@ EXPORT_SYMBOL(soc_pcmcia_debug);
#define to_soc_pcmcia_socket(x) \
container_of(x, struct soc_pcmcia_socket, socket)
+int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
+ struct soc_pcmcia_regulator *r, int v)
+{
+ bool on;
+ int ret;
+
+ if (!r->reg)
+ return 0;
+
+ on = v != 0;
+ if (r->on == on)
+ return 0;
+
+ if (on) {
+ ret = regulator_set_voltage(r->reg, v * 100000, v * 100000);
+ if (ret) {
+ int vout = regulator_get_voltage(r->reg) / 100000;
+
+ dev_warn(&skt->socket.dev,
+ "CS requested %s=%u.%uV, applying %u.%uV\n",
+ r == &skt->vcc ? "Vcc" : "Vpp",
+ v / 10, v % 10, vout / 10, vout % 10);
+ }
+
+ ret = regulator_enable(r->reg);
+ } else {
+ ret = regulator_disable(r->reg);
+ }
+ if (ret == 0)
+ r->on = on;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(soc_pcmcia_regulator_set);
+
static unsigned short
calc_speed(unsigned short *spds, int num, unsigned short dflt)
{
@@ -111,12 +148,9 @@ static void __soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt,
{
unsigned int i;
- for (i = 0; i < nr; i++) {
+ for (i = 0; i < nr; i++)
if (skt->stat[i].irq)
free_irq(skt->stat[i].irq, skt);
- if (gpio_is_valid(skt->stat[i].gpio))
- gpio_free(skt->stat[i].gpio);
- }
if (skt->ops->hw_shutdown)
skt->ops->hw_shutdown(skt);
@@ -129,6 +163,30 @@ static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
__soc_pcmcia_hw_shutdown(skt, ARRAY_SIZE(skt->stat));
}
+int soc_pcmcia_request_gpiods(struct soc_pcmcia_socket *skt)
+{
+ struct device *dev = skt->socket.dev.parent;
+ struct gpio_desc *desc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(skt->stat); i++) {
+ if (!skt->stat[i].name)
+ continue;
+
+ desc = devm_gpiod_get(dev, skt->stat[i].name, GPIOD_IN);
+ if (IS_ERR(desc)) {
+ dev_err(dev, "Failed to get GPIO for %s: %ld\n",
+ skt->stat[i].name, PTR_ERR(desc));
+ return PTR_ERR(desc);
+ }
+
+ skt->stat[i].desc = desc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(soc_pcmcia_request_gpiods);
+
static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = 0, i;
@@ -143,21 +201,32 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
for (i = 0; i < ARRAY_SIZE(skt->stat); i++) {
if (gpio_is_valid(skt->stat[i].gpio)) {
- int irq;
+ unsigned long flags = GPIOF_IN;
- ret = gpio_request_one(skt->stat[i].gpio, GPIOF_IN,
- skt->stat[i].name);
+ /* CD is active low by default */
+ if (i == SOC_STAT_CD)
+ flags |= GPIOF_ACTIVE_LOW;
+
+ ret = devm_gpio_request_one(skt->socket.dev.parent,
+ skt->stat[i].gpio, flags,
+ skt->stat[i].name);
if (ret) {
__soc_pcmcia_hw_shutdown(skt, i);
return ret;
}
- irq = gpio_to_irq(skt->stat[i].gpio);
+ skt->stat[i].desc = gpio_to_desc(skt->stat[i].gpio);
+ }
+
+ if (i < SOC_STAT_VS1 && skt->stat[i].desc) {
+ int irq = gpiod_to_irq(skt->stat[i].desc);
- if (i == SOC_STAT_RDY)
- skt->socket.pci_irq = irq;
- else
- skt->stat[i].irq = irq;
+ if (irq > 0) {
+ if (i == SOC_STAT_RDY)
+ skt->socket.pci_irq = irq;
+ else
+ skt->stat[i].irq = irq;
+ }
}
if (skt->stat[i].irq) {
@@ -166,8 +235,6 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
IRQF_TRIGGER_NONE,
skt->stat[i].name, skt);
if (ret) {
- if (gpio_is_valid(skt->stat[i].gpio))
- gpio_free(skt->stat[i].gpio);
__soc_pcmcia_hw_shutdown(skt, i);
return ret;
}
@@ -197,6 +264,18 @@ static void soc_pcmcia_hw_disable(struct soc_pcmcia_socket *skt)
irq_set_irq_type(skt->stat[i].irq, IRQ_TYPE_NONE);
}
+/*
+ * The CF 3.0 specification says that cards tie VS1 to ground and leave
+ * VS2 open. Many implementations do not wire up the VS signals, so we
+ * provide hard-coded values as per the CF 3.0 spec.
+ */
+void soc_common_cf_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ state->vs_3v = 1;
+}
+EXPORT_SYMBOL_GPL(soc_common_cf_socket_state);
+
static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
{
struct pcmcia_state state;
@@ -208,17 +287,18 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
state.bvd1 = 1;
state.bvd2 = 1;
- /* CD is active low by default */
- if (gpio_is_valid(skt->stat[SOC_STAT_CD].gpio))
- state.detect = !gpio_get_value(skt->stat[SOC_STAT_CD].gpio);
-
- /* RDY and BVD are active high by default */
- if (gpio_is_valid(skt->stat[SOC_STAT_RDY].gpio))
- state.ready = !!gpio_get_value(skt->stat[SOC_STAT_RDY].gpio);
- if (gpio_is_valid(skt->stat[SOC_STAT_BVD1].gpio))
- state.bvd1 = !!gpio_get_value(skt->stat[SOC_STAT_BVD1].gpio);
- if (gpio_is_valid(skt->stat[SOC_STAT_BVD2].gpio))
- state.bvd2 = !!gpio_get_value(skt->stat[SOC_STAT_BVD2].gpio);
+ if (skt->stat[SOC_STAT_CD].desc)
+ state.detect = !!gpiod_get_value(skt->stat[SOC_STAT_CD].desc);
+ if (skt->stat[SOC_STAT_RDY].desc)
+ state.ready = !!gpiod_get_value(skt->stat[SOC_STAT_RDY].desc);
+ if (skt->stat[SOC_STAT_BVD1].desc)
+ state.bvd1 = !!gpiod_get_value(skt->stat[SOC_STAT_BVD1].desc);
+ if (skt->stat[SOC_STAT_BVD2].desc)
+ state.bvd2 = !!gpiod_get_value(skt->stat[SOC_STAT_BVD2].desc);
+ if (skt->stat[SOC_STAT_VS1].desc)
+ state.vs_3v = !!gpiod_get_value(skt->stat[SOC_STAT_VS1].desc);
+ if (skt->stat[SOC_STAT_VS2].desc)
+ state.vs_Xv = !!gpiod_get_value(skt->stat[SOC_STAT_VS2].desc);
skt->ops->socket_state(skt, &state);
@@ -257,7 +337,30 @@ static int soc_common_pcmcia_config_skt(
int ret;
ret = skt->ops->configure_socket(skt, state);
+ if (ret < 0) {
+ pr_err("soc_common_pcmcia: unable to configure socket %d\n",
+ skt->nr);
+ /* restore the previous state */
+ WARN_ON(skt->ops->configure_socket(skt, &skt->cs_state));
+ return ret;
+ }
+
if (ret == 0) {
+ struct gpio_desc *descs[2];
+ int values[2], n = 0;
+
+ if (skt->gpio_reset) {
+ descs[n] = skt->gpio_reset;
+ values[n++] = !!(state->flags & SS_RESET);
+ }
+ if (skt->gpio_bus_enable) {
+ descs[n] = skt->gpio_bus_enable;
+ values[n++] = !!(state->flags & SS_OUTPUT_ENA);
+ }
+
+ if (n)
+ gpiod_set_array_value_cansleep(n, descs, values);
+
/*
* This really needs a better solution. The IRQ
* may or may not be claimed by the driver.
@@ -274,10 +377,6 @@ static int soc_common_pcmcia_config_skt(
skt->cs_state = *state;
}
- if (ret < 0)
- printk(KERN_ERR "soc_common_pcmcia: unable to configure "
- "socket %d\n", skt->nr);
-
return ret;
}
@@ -637,54 +736,19 @@ static struct pccard_operations soc_common_pcmcia_operations = {
};
-static LIST_HEAD(soc_pcmcia_sockets);
-static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
-
#ifdef CONFIG_CPU_FREQ
-static int
-soc_pcmcia_notifier(struct notifier_block *nb, unsigned long val, void *data)
+static int soc_common_pcmcia_cpufreq_nb(struct notifier_block *nb,
+ unsigned long val, void *data)
{
- struct soc_pcmcia_socket *skt;
+ struct soc_pcmcia_socket *skt = container_of(nb, struct soc_pcmcia_socket, cpufreq_nb);
struct cpufreq_freqs *freqs = data;
- int ret = 0;
-
- mutex_lock(&soc_pcmcia_sockets_lock);
- list_for_each_entry(skt, &soc_pcmcia_sockets, node)
- if (skt->ops->frequency_change)
- ret += skt->ops->frequency_change(skt, val, freqs);
- mutex_unlock(&soc_pcmcia_sockets_lock);
-
- return ret;
-}
-
-static struct notifier_block soc_pcmcia_notifier_block = {
- .notifier_call = soc_pcmcia_notifier
-};
-
-static int soc_pcmcia_cpufreq_register(void)
-{
- int ret;
- ret = cpufreq_register_notifier(&soc_pcmcia_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret < 0)
- printk(KERN_ERR "Unable to register CPU frequency change "
- "notifier for PCMCIA (%d)\n", ret);
- return ret;
+ return skt->ops->frequency_change(skt, val, freqs);
}
-fs_initcall(soc_pcmcia_cpufreq_register);
-
-static void soc_pcmcia_cpufreq_unregister(void)
-{
- cpufreq_unregister_notifier(&soc_pcmcia_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-module_exit(soc_pcmcia_cpufreq_unregister);
-
#endif
void soc_pcmcia_init_one(struct soc_pcmcia_socket *skt,
- struct pcmcia_low_level *ops, struct device *dev)
+ const struct pcmcia_low_level *ops, struct device *dev)
{
int i;
@@ -700,19 +764,21 @@ EXPORT_SYMBOL(soc_pcmcia_init_one);
void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
{
- mutex_lock(&soc_pcmcia_sockets_lock);
del_timer_sync(&skt->poll_timer);
pcmcia_unregister_socket(&skt->socket);
+#ifdef CONFIG_CPU_FREQ
+ if (skt->ops->frequency_change)
+ cpufreq_unregister_notifier(&skt->cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
soc_pcmcia_hw_shutdown(skt);
/* should not be required; violates some lowlevel drivers */
soc_common_pcmcia_config_skt(skt, &dead_socket);
- list_del(&skt->node);
- mutex_unlock(&soc_pcmcia_sockets_lock);
-
iounmap(skt->virt_io);
skt->virt_io = NULL;
release_resource(&skt->res_attr);
@@ -726,6 +792,8 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
{
int ret;
+ skt->cs_state = dead_socket;
+
setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event,
(unsigned long)skt);
skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
@@ -752,10 +820,6 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
goto out_err_5;
}
- mutex_lock(&soc_pcmcia_sockets_lock);
-
- list_add(&skt->node, &soc_pcmcia_sockets);
-
/*
* We initialize default socket timing here, because
* we are not guaranteed to see a SetIOMap operation at
@@ -776,14 +840,23 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
skt->status = soc_common_pcmcia_skt_state(skt);
+#ifdef CONFIG_CPU_FREQ
+ if (skt->ops->frequency_change) {
+ skt->cpufreq_nb.notifier_call = soc_common_pcmcia_cpufreq_nb;
+
+ ret = cpufreq_register_notifier(&skt->cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret < 0)
+ dev_err(skt->socket.dev.parent,
+ "unable to register CPU frequency change notifier for PCMCIA (%d)\n",
+ ret);
+ }
+#endif
+
ret = pcmcia_register_socket(&skt->socket);
if (ret)
goto out_err_7;
- add_timer(&skt->poll_timer);
-
- mutex_unlock(&soc_pcmcia_sockets_lock);
-
ret = device_create_file(&skt->socket.dev, &dev_attr_status);
if (ret)
goto out_err_8;
@@ -791,15 +864,12 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
return ret;
out_err_8:
- mutex_lock(&soc_pcmcia_sockets_lock);
del_timer_sync(&skt->poll_timer);
pcmcia_unregister_socket(&skt->socket);
out_err_7:
soc_pcmcia_hw_shutdown(skt);
out_err_6:
- list_del(&skt->node);
- mutex_unlock(&soc_pcmcia_sockets_lock);
iounmap(skt->virt_io);
out_err_5:
release_resource(&skt->res_attr);
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 94762a54d731..3f3625805353 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -17,7 +17,14 @@
struct device;
+struct gpio_desc;
struct pcmcia_low_level;
+struct regulator;
+
+struct soc_pcmcia_regulator {
+ struct regulator *reg;
+ bool on;
+};
/*
* This structure encapsulates per-socket state which we might need to
@@ -52,18 +59,30 @@ struct soc_pcmcia_socket {
struct {
int gpio;
+ struct gpio_desc *desc;
unsigned int irq;
const char *name;
- } stat[4];
+ } stat[6];
#define SOC_STAT_CD 0 /* Card detect */
#define SOC_STAT_BVD1 1 /* BATDEAD / IOSTSCHG */
#define SOC_STAT_BVD2 2 /* BATWARN / IOSPKR */
#define SOC_STAT_RDY 3 /* Ready / Interrupt */
+#define SOC_STAT_VS1 4 /* Voltage sense 1 */
+#define SOC_STAT_VS2 5 /* Voltage sense 2 */
+
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_bus_enable;
+ struct soc_pcmcia_regulator vcc;
+ struct soc_pcmcia_regulator vpp;
unsigned int irq_state;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block cpufreq_nb;
+#endif
struct timer_list poll_timer;
struct list_head node;
+ void *driver_data;
};
struct skt_dev_info {
@@ -133,10 +152,16 @@ struct soc_pcmcia_timing {
extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *);
void soc_pcmcia_init_one(struct soc_pcmcia_socket *skt,
- struct pcmcia_low_level *ops, struct device *dev);
+ const struct pcmcia_low_level *ops, struct device *dev);
void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt);
int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt);
+int soc_pcmcia_request_gpiods(struct soc_pcmcia_socket *skt);
+
+void soc_common_cf_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state);
+int soc_pcmcia_regulator_set(struct soc_pcmcia_socket *skt,
+ struct soc_pcmcia_regulator *r, int v);
#ifdef CONFIG_PCMCIA_DEBUG
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 04e2653bb8c0..4d5c5f9f0dbd 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,4 +12,11 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config XGENE_PMU
+ depends on PERF_EVENTS && ARCH_XGENE
+ bool "APM X-Gene SoC PMU"
+ default n
+ help
+ Say y if you want to use APM X-Gene SoC performance monitors.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index acd2397ded94..b116e982810b 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
new file mode 100644
index 000000000000..a8ac4bcef2c0
--- /dev/null
+++ b/drivers/perf/xgene_pmu.c
@@ -0,0 +1,1398 @@
+/*
+ * APM X-Gene SoC PMU (Performance Monitor Unit)
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Hoan Tran <hotran@apm.com>
+ * Tai Nguyen <ttnguyen@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CSW_CSWCR 0x0000
+#define CSW_CSWCR_DUALMCB_MASK BIT(0)
+#define MCBADDRMR 0x0000
+#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
+
+#define PCPPMU_INTSTATUS_REG 0x000
+#define PCPPMU_INTMASK_REG 0x004
+#define PCPPMU_INTMASK 0x0000000F
+#define PCPPMU_INTENMASK 0xFFFFFFFF
+#define PCPPMU_INTCLRMASK 0xFFFFFFF0
+#define PCPPMU_INT_MCU BIT(0)
+#define PCPPMU_INT_MCB BIT(1)
+#define PCPPMU_INT_L3C BIT(2)
+#define PCPPMU_INT_IOB BIT(3)
+
+#define PMU_MAX_COUNTERS 4
+#define PMU_CNT_MAX_PERIOD 0x100000000ULL
+#define PMU_OVERFLOW_MASK 0xF
+#define PMU_PMCR_E BIT(0)
+#define PMU_PMCR_P BIT(1)
+
+#define PMU_PMEVCNTR0 0x000
+#define PMU_PMEVCNTR1 0x004
+#define PMU_PMEVCNTR2 0x008
+#define PMU_PMEVCNTR3 0x00C
+#define PMU_PMEVTYPER0 0x400
+#define PMU_PMEVTYPER1 0x404
+#define PMU_PMEVTYPER2 0x408
+#define PMU_PMEVTYPER3 0x40C
+#define PMU_PMAMR0 0xA00
+#define PMU_PMAMR1 0xA04
+#define PMU_PMCNTENSET 0xC00
+#define PMU_PMCNTENCLR 0xC20
+#define PMU_PMINTENSET 0xC40
+#define PMU_PMINTENCLR 0xC60
+#define PMU_PMOVSR 0xC80
+#define PMU_PMCR 0xE04
+
+#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
+#define GET_CNTR(ev) (ev->hw.idx)
+#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
+#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
+#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
+
+struct hw_pmu_info {
+ u32 type;
+ u32 enable_mask;
+ void __iomem *csr;
+};
+
+struct xgene_pmu_dev {
+ struct hw_pmu_info *inf;
+ struct xgene_pmu *parent;
+ struct pmu pmu;
+ u8 max_counters;
+ DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
+ u64 max_period;
+ const struct attribute_group **attr_groups;
+ struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
+};
+
+struct xgene_pmu {
+ struct device *dev;
+ int version;
+ void __iomem *pcppmu_csr;
+ u32 mcb_active_mask;
+ u32 mc_active_mask;
+ cpumask_t cpu;
+ raw_spinlock_t lock;
+ struct list_head l3cpmus;
+ struct list_head iobpmus;
+ struct list_head mcbpmus;
+ struct list_head mcpmus;
+};
+
+struct xgene_pmu_dev_ctx {
+ char *name;
+ struct list_head next;
+ struct xgene_pmu_dev *pmu_dev;
+ struct hw_pmu_info inf;
+};
+
+struct xgene_pmu_data {
+ int id;
+ u32 data;
+};
+
+enum xgene_pmu_version {
+ PCP_PMU_V1 = 1,
+ PCP_PMU_V2,
+};
+
+enum xgene_pmu_dev_type {
+ PMU_TYPE_L3C = 0,
+ PMU_TYPE_IOB,
+ PMU_TYPE_MCB,
+ PMU_TYPE_MC,
+};
+
+/*
+ * sysfs format attributes
+ */
+static ssize_t xgene_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *) eattr->var);
+}
+
+#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *iob_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
+ XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *mc_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = l3c_pmu_format_attrs,
+};
+
+static const struct attribute_group iob_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = iob_pmu_format_attrs,
+};
+
+static const struct attribute_group mcb_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mcb_pmu_format_attrs,
+};
+
+static const struct attribute_group mc_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mc_pmu_format_attrs,
+};
+
+/*
+ * sysfs event attributes
+ */
+static ssize_t xgene_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+}
+
+#define XGENE_PMU_EVENT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
+ XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
+ XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
+ XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
+ XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
+ XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
+ XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
+ XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
+ XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
+ NULL,
+};
+
+static struct attribute *iob_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
+ XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
+ XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
+ XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
+ XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
+ XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
+ XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
+ XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
+ XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
+ XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
+ XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
+ XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
+ NULL,
+};
+
+static struct attribute *mc_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
+ XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
+ XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
+ XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
+ XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
+ XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
+ XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
+ XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
+ XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
+ XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
+ XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
+ XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
+ XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
+ XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
+ XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
+ XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
+ XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = l3c_pmu_events_attrs,
+};
+
+static const struct attribute_group iob_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = iob_pmu_events_attrs,
+};
+
+static const struct attribute_group mcb_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mcb_pmu_events_attrs,
+};
+
+static const struct attribute_group mc_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mc_pmu_events_attrs,
+};
+
+/*
+ * sysfs cpumask attributes
+ */
+static ssize_t xgene_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
+
+static struct attribute *xgene_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_cpumask_attr_group = {
+ .attrs = xgene_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *l3c_pmu_attr_groups[] = {
+ &l3c_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &l3c_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *iob_pmu_attr_groups[] = {
+ &iob_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &iob_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mcb_pmu_attr_groups[] = {
+ &mcb_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mcb_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mc_pmu_attr_groups[] = {
+ &mc_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mc_pmu_events_attr_group,
+ NULL
+};
+
+static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
+{
+ int cntr;
+
+ cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+ if (cntr == pmu_dev->max_counters)
+ return -ENOSPC;
+ set_bit(cntr, pmu_dev->cntr_assign_mask);
+
+ return cntr;
+}
+
+static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
+{
+ clear_bit(cntr, pmu_dev->cntr_assign_mask);
+}
+
+static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
+}
+
+static inline void
+xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
+}
+
+static inline void
+xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
+}
+
+static inline void
+xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
+}
+
+static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_P;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val &= ~PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static void xgene_perf_pmu_enable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+ int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+
+ if (!enabled)
+ return;
+
+ xgene_pmu_start_counters(pmu_dev);
+}
+
+static void xgene_perf_pmu_disable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+
+ xgene_pmu_stop_counters(pmu_dev);
+}
+
+static int xgene_perf_event_init(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct perf_event *sibling;
+
+ /* Test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * SOC PMU counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ /* SOC counters do not have usr/os/guest/host bits */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+ /*
+ * Many perf core operations (eg. events rotation) operate on a
+ * single CPU context. This is obvious for CPU PMUs, where one
+ * expects the same sets of events being observed on all CPUs,
+ * but can lead to issues for off-core PMUs, where each
+ * event could be theoretically assigned to a different CPU. To
+ * mitigate this, we enforce CPU assignment to one, selected
+ * processor (the one described in the "cpumask" attribute).
+ */
+ event->cpu = cpumask_first(&pmu_dev->parent->cpu);
+
+ hw->config = event->attr.config;
+ /*
+ * Each bit of the config1 field represents an agent from which the
+ * request of the event come. The event is counted only if it's caused
+ * by a request of an agent has the bit cleared.
+ * By default, the event is counted for all agents.
+ */
+ hw->config_base = event->attr.config1;
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &event->group_leader->sibling_list,
+ group_entry)
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void xgene_perf_enable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event));
+ xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
+ if (pmu_dev->inf->type == PMU_TYPE_IOB)
+ xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event)));
+
+ xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_disable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_event_set_period(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ /*
+ * The X-Gene PMU counters have a period of 2^32. To account for the
+ * possiblity of extreme interrupt latency we program for a period of
+ * half that. Hopefully we can handle the interrupt before another 2^31
+ * events occur and the counter overtakes its previous value.
+ */
+ u64 val = 1ULL << 31;
+
+ local64_set(&hw->prev_count, val);
+ xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val);
+}
+
+static void xgene_perf_event_update(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hw->prev_count);
+ new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event));
+
+ if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
+
+ local64_add(delta, &event->count);
+}
+
+static void xgene_perf_read(struct perf_event *event)
+{
+ xgene_perf_event_update(event);
+}
+
+static void xgene_perf_start(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
+ hw->state = 0;
+
+ xgene_perf_event_set_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ u64 prev_raw_count = local64_read(&hw->prev_count);
+
+ xgene_pmu_write_counter(pmu_dev, GET_CNTR(event),
+ (u32) prev_raw_count);
+ }
+
+ xgene_perf_enable_event(event);
+ perf_event_update_userpage(event);
+}
+
+static void xgene_perf_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+ u64 config;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ xgene_perf_disable_event(event);
+ WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
+ hw->state |= PERF_HES_STOPPED;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ config = hw->config;
+ xgene_perf_read(event);
+ hw->state |= PERF_HES_UPTODATE;
+}
+
+static int xgene_perf_add(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ /* Allocate an event counter */
+ hw->idx = get_next_avail_cntr(pmu_dev);
+ if (hw->idx < 0)
+ return -EAGAIN;
+
+ /* Update counter event pointer for Interrupt handler */
+ pmu_dev->pmu_counter_event[hw->idx] = event;
+
+ if (flags & PERF_EF_START)
+ xgene_perf_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void xgene_perf_del(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ xgene_perf_stop(event, PERF_EF_UPDATE);
+
+ /* clear the assigned counter */
+ clear_avail_cntr(pmu_dev, GET_CNTR(event));
+
+ perf_event_update_userpage(event);
+ pmu_dev->pmu_counter_event[hw->idx] = NULL;
+}
+
+static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
+{
+ struct xgene_pmu *xgene_pmu;
+
+ pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1;
+ /* First version PMU supports only single event counter */
+ xgene_pmu = pmu_dev->parent;
+ if (xgene_pmu->version == PCP_PMU_V1)
+ pmu_dev->max_counters = 1;
+ else
+ pmu_dev->max_counters = PMU_MAX_COUNTERS;
+
+ /* Perf driver registration */
+ pmu_dev->pmu = (struct pmu) {
+ .attr_groups = pmu_dev->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = xgene_perf_pmu_enable,
+ .pmu_disable = xgene_perf_pmu_disable,
+ .event_init = xgene_perf_event_init,
+ .add = xgene_perf_add,
+ .del = xgene_perf_del,
+ .start = xgene_perf_start,
+ .stop = xgene_perf_stop,
+ .read = xgene_perf_read,
+ };
+
+ /* Hardware counter init */
+ xgene_pmu_stop_counters(pmu_dev);
+ xgene_pmu_reset_counters(pmu_dev);
+
+ return perf_pmu_register(&pmu_dev->pmu, name, -1);
+}
+
+static int
+xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu;
+ int rc;
+
+ pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+ pmu->parent = xgene_pmu;
+ pmu->inf = &ctx->inf;
+ ctx->pmu_dev = pmu;
+
+ switch (pmu->inf->type) {
+ case PMU_TYPE_L3C:
+ pmu->attr_groups = l3c_pmu_attr_groups;
+ break;
+ case PMU_TYPE_IOB:
+ pmu->attr_groups = iob_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MCB:
+ if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mcb_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MC:
+ if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mc_pmu_attr_groups;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = xgene_init_perf(pmu, ctx->name);
+ if (rc) {
+ dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
+ goto dev_err;
+ }
+
+ dev_info(dev, "%s PMU registered\n", ctx->name);
+
+ return rc;
+
+dev_err:
+ devm_kfree(dev, pmu);
+ return -ENODEV;
+}
+
+static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
+{
+ struct xgene_pmu *xgene_pmu = pmu_dev->parent;
+ u32 pmovsr;
+ int idx;
+
+ pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
+ if (!pmovsr)
+ return;
+
+ /* Clear interrupt flag */
+ if (xgene_pmu->version == PCP_PMU_V1)
+ writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR);
+ else
+ writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR);
+
+ for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = pmu_dev->pmu_counter_event[idx];
+ int overflowed = pmovsr & BIT(idx);
+
+ /* Ignore if we don't have an event. */
+ if (!event || !overflowed)
+ continue;
+ xgene_perf_event_update(event);
+ xgene_perf_event_set_period(event);
+ }
+}
+
+static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct xgene_pmu *xgene_pmu = dev_id;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
+
+ /* Get Interrupt PMU source */
+ val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
+ if (val & PCPPMU_INT_MCU) {
+ list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_MCB) {
+ list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_L3C) {
+ list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_IOB) {
+ list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
+ struct resource *res;
+ unsigned int reg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csw_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
+ return PTR_ERR(csw_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mcba_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcba_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
+ return PTR_ERR(mcba_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcbb_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
+ return PTR_ERR(mcbb_csr);
+ }
+
+ reg = readl(csw_csr + CSW_CSWCR);
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ reg = readl(mcbb_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ reg = readl(mcba_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct regmap *csw_map, *mcba_map, *mcbb_map;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int reg;
+
+ csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
+ if (IS_ERR(csw_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
+ return PTR_ERR(csw_map);
+ }
+
+ mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
+ if (IS_ERR(mcba_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
+ return PTR_ERR(mcba_map);
+ }
+
+ mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
+ if (IS_ERR(mcbb_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
+ return PTR_ERR(mcbb_map);
+ }
+
+ if (regmap_read(csw_map, CSW_CSWCR, &reg))
+ return -EINVAL;
+
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcbb_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcba_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+}
+
+static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+{
+ switch (type) {
+ case PMU_TYPE_L3C:
+ return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
+ case PMU_TYPE_IOB:
+ return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
+ case PMU_TYPE_MCB:
+ return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
+ case PMU_TYPE_MC:
+ return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
+ default:
+ return devm_kasprintf(dev, GFP_KERNEL, "unknown");
+ }
+}
+
+#if defined(CONFIG_ACPI)
+static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct resource *res = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
+ acpi_dev_resource_memory(ares, res);
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static struct
+xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct acpi_device *adev, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct list_head resource_list;
+ struct xgene_pmu_dev_ctx *ctx;
+ const union acpi_object *obj;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+ rc = acpi_dev_get_resources(adev, &resource_list,
+ acpi_pmu_dev_add_resource, &res);
+ acpi_dev_free_resource_list(&resource_list);
+ if (rc < 0) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ rc = acpi_dev_get_property(adev, "enable-bit-index",
+ ACPI_TYPE_INTEGER, &obj);
+ if (rc < 0)
+ enable_bit = 0;
+ else
+ enable_bit = (int) obj->integer.value;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct xgene_pmu *xgene_pmu = data;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ if (!strcmp(acpi_device_hid(adev), "APMC0D5D"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5E"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5F"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D60"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ return AE_OK;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ return AE_OK;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ return AE_OK;
+}
+
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct device *dev = xgene_pmu->dev;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to probe PMU devices\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static struct
+xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct device_node *np, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc < 0) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
+ enable_bit = 0;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device_node *np;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_available(np))
+ continue;
+
+ if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ continue;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ continue;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
+}
+
+static const struct xgene_pmu_data xgene_pmu_data = {
+ .id = PCP_PMU_V1,
+};
+
+static const struct xgene_pmu_data xgene_pmu_v2_data = {
+ .id = PCP_PMU_V2,
+};
+
+static const struct of_device_id xgene_pmu_of_match[] = {
+ { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
+ { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_pmu_acpi_match[] = {
+ {"APMC0D5B", PCP_PMU_V1},
+ {"APMC0D5C", PCP_PMU_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
+#endif
+
+static int xgene_pmu_probe(struct platform_device *pdev)
+{
+ const struct xgene_pmu_data *dev_data;
+ const struct of_device_id *of_id;
+ struct xgene_pmu *xgene_pmu;
+ struct resource *res;
+ int irq, rc;
+ int version;
+
+ xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
+ if (!xgene_pmu)
+ return -ENOMEM;
+ xgene_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xgene_pmu);
+
+ version = -EINVAL;
+ of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
+ if (of_id) {
+ dev_data = (const struct xgene_pmu_data *) of_id->data;
+ version = dev_data->id;
+ }
+
+#ifdef CONFIG_ACPI
+ if (ACPI_COMPANION(&pdev->dev)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
+ if (acpi_id)
+ version = (int) acpi_id->driver_data;
+ }
+#endif
+ if (version < 0)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
+ INIT_LIST_HEAD(&xgene_pmu->iobpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcpmus);
+
+ xgene_pmu->version = version;
+ dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xgene_pmu->pcppmu_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
+ rc = PTR_ERR(xgene_pmu->pcppmu_csr);
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), xgene_pmu);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
+ goto err;
+ }
+
+ raw_spin_lock_init(&xgene_pmu->lock);
+
+ /* Check for active MCBs and MCUs */
+ rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
+ xgene_pmu->mcb_active_mask = 0x1;
+ xgene_pmu->mc_active_mask = 0x1;
+ }
+
+ /* Pick one core to use for cpumask attributes */
+ cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
+
+ /* Make sure that the overflow interrupt is handled by this CPU */
+ rc = irq_set_affinity(irq, &xgene_pmu->cpu);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
+ goto err;
+ }
+
+ /* Walk through the tree for all PMU perf devices */
+ rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "No PMU perf devices found!\n");
+ goto err;
+ }
+
+ /* Enable interrupt */
+ xgene_pmu_unmask_int(xgene_pmu);
+
+ return 0;
+
+err:
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return rc;
+}
+
+static void
+xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu_dev;
+
+ list_for_each_entry(ctx, pmus, next) {
+ pmu_dev = ctx->pmu_dev;
+ if (pmu_dev->inf->csr)
+ devm_iounmap(dev, pmu_dev->inf->csr);
+ devm_kfree(dev, ctx);
+ devm_kfree(dev, pmu_dev);
+ }
+}
+
+static int xgene_pmu_remove(struct platform_device *pdev)
+{
+ struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
+
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
+
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return 0;
+}
+
+static struct platform_driver xgene_pmu_driver = {
+ .probe = xgene_pmu_probe,
+ .remove = xgene_pmu_remove,
+ .driver = {
+ .name = "xgene-pmu",
+ .of_match_table = xgene_pmu_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
+ },
+};
+
+builtin_platform_driver(xgene_pmu_driver);
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index 32ae78c8ca17..c85fb0b59729 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -198,7 +198,8 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
} else {
int ret;
- ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy",
+ "ohci-da8xx");
if (ret)
dev_warn(dev, "Failed to create usb11 phy lookup\n");
ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
@@ -216,7 +217,7 @@ static int da8xx_usb_phy_remove(struct platform_device *pdev)
if (!pdev->dev.of_node) {
phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
- phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci-da8xx");
}
return 0;
diff --git a/drivers/phy/phy-rockchip-pcie.c b/drivers/phy/phy-rockchip-pcie.c
index a2b4c6b58aea..6904633cad68 100644
--- a/drivers/phy/phy-rockchip-pcie.c
+++ b/drivers/phy/phy-rockchip-pcie.c
@@ -249,21 +249,10 @@ err_refclk:
static int rockchip_pcie_phy_exit(struct phy *phy)
{
struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy);
- int err = 0;
clk_disable_unprepare(rk_phy->clk_pciephy_ref);
- err = reset_control_deassert(rk_phy->phy_rst);
- if (err) {
- dev_err(&phy->dev, "deassert phy_rst err %d\n", err);
- goto err_reset;
- }
-
- return err;
-
-err_reset:
- clk_prepare_enable(rk_phy->clk_pciephy_ref);
- return err;
+ return 0;
}
static const struct phy_ops ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index b9342a2af7b3..fec34f5213c4 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -264,7 +264,7 @@ static int sun4i_usb_phy_init(struct phy *_phy)
return ret;
}
- if (data->cfg->enable_pmu_unk1) {
+ if (phy->pmu && data->cfg->enable_pmu_unk1) {
val = readl(phy->pmu + REG_PMU_UNK1);
writel(val & ~2, phy->pmu + REG_PMU_UNK1);
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864e1a7f..87b46390b695 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -26,7 +26,7 @@
#define ASPEED_G5_NR_PINS 228
-#define COND1 SIG_DESC_BIT(SCU90, 6, 0)
+#define COND1 { SCU90, BIT(6), 0, 0 }
#define COND2 { SCU94, GENMASK(1, 0), 0, 0 }
#define B14 0
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
-#define D20 26
+#define F20 26
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
-#define D21 27
+#define D20 27
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
#define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C18, GPIOI0, SYSCS);
#define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(E15, GPIOI1, SYSCK);
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
#define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C16, GPIOI3, SYSMISO);
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+ SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
#define L2 73
SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
+ ASPEED_PINCTRL_PIN(A15),
ASPEED_PINCTRL_PIN(A2),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B12),
ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C12),
ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
- ASPEED_PINCTRL_PIN(D21),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F9),
ASPEED_PINCTRL_PIN(H20),
ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(VGABIOSROM),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(VGABIOSROM),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f13f3e..49aeba912531 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
bool enable, struct regmap *map)
{
int i;
- bool ret;
-
- ret = aspeed_sig_expr_eval(expr, enable, map);
- if (ret)
- return ret;
for (i = 0; i < expr->ndescs; i++) {
+ bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, true, map);
}
static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (!aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, false, map);
}
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 7f7700716398..5d1e505c3c63 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -844,6 +844,6 @@ static struct platform_driver iproc_gpio_driver = {
static int __init iproc_gpio_init(void)
{
- return platform_driver_probe(&iproc_gpio_driver, iproc_gpio_probe);
+ return platform_driver_register(&iproc_gpio_driver);
}
arch_initcall_sync(iproc_gpio_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 35783db1c10b..c8deb8be1da7 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -741,6 +741,6 @@ static struct platform_driver nsp_gpio_driver = {
static int __init nsp_gpio_init(void)
{
- return platform_driver_probe(&nsp_gpio_driver, nsp_gpio_probe);
+ return platform_driver_register(&nsp_gpio_driver);
}
arch_initcall_sync(nsp_gpio_init);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 47613201269a..79c4e14a5a75 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -687,6 +687,7 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->functions)
return -ENOMEM;
+ info->group_index = 0;
if (flat_funcs) {
info->ngroups = of_get_child_count(np);
} else {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e6df..71bbeb9321ba 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
+ raw_spin_lock_init(&vg->lock);
+
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, vg);
- raw_spin_lock_init(&vg->lock);
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 30389f4ccab4..c43b1e9a06af 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1652,12 +1652,15 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int chv_pinctrl_suspend(struct device *dev)
+static int chv_pinctrl_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK);
for (i = 0; i < pctrl->community->npins; i++) {
@@ -1678,15 +1681,20 @@ static int chv_pinctrl_suspend(struct device *dev)
ctx->padctrl1 = readl(reg);
}
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
-static int chv_pinctrl_resume(struct device *dev)
+static int chv_pinctrl_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&chv_lock, flags);
+
/*
* Mask all interrupts before restoring per-pin configuration
* registers because we don't know in which state BIOS left them
@@ -1731,12 +1739,15 @@ static int chv_pinctrl_resume(struct device *dev)
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
return 0;
}
#endif
static const struct dev_pm_ops chv_pinctrl_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend, chv_pinctrl_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(chv_pinctrl_suspend_noirq,
+ chv_pinctrl_resume_noirq)
};
static const struct acpi_device_id chv_pinctrl_acpi_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a40b973..01443762e570 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
#include "pinctrl-intel.h"
/* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+ if (!pd || !intel_pad_usable(pctrl, pin))
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace). It is possible that some pins are used by the
+ * BIOS during resume and those are not always locked down so leave
+ * them alone.
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&pctrl->chip, pin))
+ return true;
+
+ return false;
+}
+
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 345c3df669a0..84e144167b44 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -64,11 +64,11 @@ static int orion_mpp_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-#define V(f5181l, f5182, f5281) \
- ((f5181l << 0) | (f5182 << 1) | (f5281 << 2))
+#define V(f5181, f5182, f5281) \
+ ((f5181 << 0) | (f5182 << 1) | (f5281 << 2))
enum orion_variant {
- V_5181L = V(1, 0, 0),
+ V_5181 = V(1, 0, 0),
V_5182 = V(0, 1, 0),
V_5281 = V(0, 0, 1),
V_ALL = V(1, 1, 1),
@@ -103,13 +103,13 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = {
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "req5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "re0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata0", "act", V_5182)),
MPP_MODE(7,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "gnt5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "we0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata1", "act", V_5182)),
MPP_MODE(8,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
@@ -165,7 +165,7 @@ static struct mvebu_mpp_ctrl orion_mpp_controls[] = {
MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl),
};
-static struct pinctrl_gpio_range mv88f5181l_gpio_ranges[] = {
+static struct pinctrl_gpio_range mv88f5181_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
@@ -177,14 +177,14 @@ static struct pinctrl_gpio_range mv88f5281_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
-static struct mvebu_pinctrl_soc_info mv88f5181l_info = {
- .variant = V_5181L,
+static struct mvebu_pinctrl_soc_info mv88f5181_info = {
+ .variant = V_5181,
.controls = orion_mpp_controls,
.ncontrols = ARRAY_SIZE(orion_mpp_controls),
.modes = orion_mpp_modes,
.nmodes = ARRAY_SIZE(orion_mpp_modes),
- .gpioranges = mv88f5181l_gpio_ranges,
- .ngpioranges = ARRAY_SIZE(mv88f5181l_gpio_ranges),
+ .gpioranges = mv88f5181_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f5181_gpio_ranges),
};
static struct mvebu_pinctrl_soc_info mv88f5182_info = {
@@ -212,7 +212,8 @@ static struct mvebu_pinctrl_soc_info mv88f5281_info = {
* muxing, they are identical.
*/
static const struct of_device_id orion_pinctrl_of_match[] = {
- { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181l_info },
+ { .compatible = "marvell,88f5181-pinctrl", .data = &mv88f5181_info },
+ { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181_info },
{ .compatible = "marvell,88f5182-pinctrl", .data = &mv88f5182_info },
{ .compatible = "marvell,88f5281-pinctrl", .data = &mv88f5281_info },
{ }
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 99da4cf91031..b7bb37167969 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1512,7 +1512,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
if (info->irqmux_base || gpio_irq > 0) {
err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
0, handle_simple_irq,
- IRQ_TYPE_LEVEL_LOW);
+ IRQ_TYPE_NONE);
if (err) {
gpiochip_remove(&bank->gpio_chip);
dev_info(dev, "could not add irqchip\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 200667f08c37..efc43711ff5c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1092,9 +1092,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
- if (ret)
- return ret;
+ if (of_find_property(np, "interrupt-parent", NULL)) {
+ ret = stm32_pctrl_dt_setup_irq(pdev, pctl);
+ if (ret)
+ return ret;
+ }
for_each_child_of_node(np, child)
if (of_property_read_bool(child, "gpio-controller"))
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* much memory to the process.
*/
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+ ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+ &page, NULL);
up_read(&current->mm->mmap_sem);
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcca8891..b8a21d7b25d4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@ config ASUS_WMI
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
+ depends on SERIO_I8042 || SERIO_I8042 = n
---help---
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 460fa6708bfc..2acdb0d6ea89 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -405,7 +405,7 @@ static inline void acerhdf_enable_kernelmode(void)
kernelmode = 1;
thz_dev->polling_delay = interval*1000;
- thermal_zone_device_update(thz_dev);
+ thermal_zone_device_update(thz_dev, THERMAL_EVENT_UNSPECIFIED);
pr_notice("kernel mode fan control ON\n");
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 15f131146501..28551f5a2e07 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -932,30 +932,19 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(infos);
-static int parse_arg(const char *buf, unsigned long count, int *val)
-{
- if (!count)
- return 0;
- if (count > 31)
- return -EINVAL;
- if (sscanf(buf, "%i", val) != 1)
- return -EINVAL;
- return count;
-}
-
static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
const char *buf, size_t count,
const char *method)
{
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv <= 0)
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
return rv;
if (write_acpi_int(asus->handle, method, value))
return -ENODEV;
- return rv;
+ return count;
}
/*
@@ -975,15 +964,17 @@ static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0) {
- if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
- pr_warn("LED display write failed\n");
- return -ENODEV;
- }
- asus->ledd_status = (u32) value;
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
+ pr_warn("LED display write failed\n");
+ return -ENODEV;
}
- return rv;
+
+ asus->ledd_status = (u32) value;
+ return count;
}
static DEVICE_ATTR_RW(ledd);
@@ -1148,10 +1139,12 @@ static ssize_t display_store(struct device *dev, struct device_attribute *attr,
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0)
- asus_set_display(asus, value);
- return rv;
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ asus_set_display(asus, value);
+ return count;
}
static DEVICE_ATTR_WO(display);
@@ -1190,11 +1183,12 @@ static ssize_t ls_switch_store(struct device *dev,
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0)
- asus_als_switch(asus, value ? 1 : 0);
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
- return rv;
+ asus_als_switch(asus, value ? 1 : 0);
+ return count;
}
static DEVICE_ATTR_RW(ls_switch);
@@ -1219,14 +1213,15 @@ static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
- rv = parse_arg(buf, count, &value);
- if (rv > 0) {
- value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
- /* 0 <= value <= 15 */
- asus_als_level(asus, value);
- }
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
+
+ value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
+ /* 0 <= value <= 15 */
+ asus_als_level(asus, value);
- return rv;
+ return count;
}
static DEVICE_ATTR_RW(ls_level);
@@ -1301,14 +1296,14 @@ static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
int rv, value;
int ret;
- rv = parse_arg(buf, count, &value);
- if (rv <= 0)
- return -EINVAL;
+ rv = kstrtoint(buf, 0, &value);
+ if (rv < 0)
+ return rv;
ret = asus_gps_switch(asus, !!value);
if (ret)
return ret;
rfkill_set_sw_state(asus->gps.rfkill, !value);
- return rv;
+ return count;
}
static DEVICE_ATTR_RW(gps);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index adecc1c555f0..26e4cbc34db8 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -27,6 +27,7 @@
#include <linux/input/sparse-keymap.h>
#include <linux/fb.h>
#include <linux/dmi.h>
+#include <linux/i8042.h>
#include "asus-wmi.h"
@@ -55,10 +56,34 @@ MODULE_PARM_DESC(wapf, "WAPF value");
static struct quirk_entry *quirks;
+static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+ bool ret = false;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (unlikely(data == 0xe1)) {
+ extended = true;
+ ret = true;
+ } else if (unlikely(extended)) {
+ extended = false;
+ ret = true;
+ }
+
+ return ret;
+}
+
static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
};
+static struct quirk_entry quirk_asus_q500a = {
+ .i8042_filter = asus_q500a_i8042_filter,
+};
+
/*
* For those machines that need software to control bt/wifi status
* and can't adjust brightness through ACPI interface
@@ -87,6 +112,10 @@ static struct quirk_entry quirk_no_rfkill_wapf4 = {
.no_rfkill = true,
};
+static struct quirk_entry quirk_asus_ux303ub = {
+ .wmi_backlight_native = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
@@ -96,6 +125,15 @@ static int dmi_matched(const struct dmi_system_id *dmi)
static const struct dmi_system_id asus_quirks[] = {
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. Q500A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Q500A"),
+ },
+ .driver_data = &quirk_asus_q500a,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. U32U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
@@ -351,11 +389,22 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_no_rfkill,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX303UB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
+ },
+ .driver_data = &quirk_asus_ux303ub,
+ },
{},
};
static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
{
+ int ret;
+
quirks = &quirk_asus_unknown;
dmi_check_system(asus_quirks);
@@ -367,6 +416,15 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
quirks->wapf = wapf;
else
wapf = quirks->wapf;
+
+ if (quirks->i8042_filter) {
+ ret = i8042_install_filter(quirks->i8042_filter);
+ if (ret) {
+ pr_warn("Unable to install key filter\n");
+ return;
+ }
+ pr_info("Using i8042 filter function for receiving events\n");
+ }
}
static const struct key_entry asus_nb_wmi_keymap[] = {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 7c093a0b78bb..ce6ca31a2d09 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2084,6 +2084,9 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_backlight_power)
acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
+ if (asus->driver->quirks->wmi_backlight_native)
+ acpi_video_set_dmi_backlight_type(acpi_backlight_native);
+
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 5de1df510ebd..0e19014e9f54 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -28,6 +28,7 @@
#define _ASUS_WMI_H_
#include <linux/platform_device.h>
+#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
#define ASUS_WMI_BRN_DOWN 0x20
@@ -43,6 +44,7 @@ struct quirk_entry {
bool scalar_panel_brightness;
bool store_backlight_power;
bool wmi_backlight_power;
+ bool wmi_backlight_native;
int wapf;
/*
* For machines with AMD graphic chips, it will send out WMI event
@@ -51,6 +53,9 @@ struct quirk_entry {
* and let the ACPI interrupt to send out the key event.
*/
int no_display_toggle;
+
+ bool (*i8042_filter)(unsigned char data, unsigned char str,
+ struct serio *serio);
};
struct asus_wmi_driver {
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index 0aec4fd4c48e..37e646034ef8 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -24,6 +24,7 @@
#include <linux/acpi.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
struct smo8800_device {
u32 irq; /* acpi device irq */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b93192..a7614fc542b5 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
},
},
+ {
+ .ident = "Lenovo Yoga 900",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
+ },
+ },
+ {
+ .ident = "Lenovo YOGA 910-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index ed5874217ee7..12dbb5063376 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -264,7 +264,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-hid: created platform device\n");
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 146d02f8c9bc..78080763df51 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -164,7 +164,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
if (acpi_match_device_ids(dev, ids) == 0)
- if (acpi_create_platform_device(dev))
+ if (acpi_create_platform_device(dev, NULL))
dev_info(&dev->dev,
"intel-vbtn: created platform device\n");
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 520b58a04daa..e8b1b836ca2d 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -100,7 +100,7 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
struct dentry *dir, *file;
dir = debugfs_create_dir("pmc_core", NULL);
- if (IS_ERR_OR_NULL(dir))
+ if (!dir)
return -ENOMEM;
pmcdev->dbgfs_dir = dir;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index a511d518206b..0bf51d574fa9 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -522,48 +522,36 @@ static struct resource telemetry_res[] = {
static int ipc_create_punit_device(void)
{
struct platform_device *pdev;
- int ret;
-
- pdev = platform_device_alloc(PUNIT_DEVICE_NAME, -1);
- if (!pdev) {
- dev_err(ipcdev.dev, "Failed to alloc punit platform device\n");
- return -ENOMEM;
- }
-
- pdev->dev.parent = ipcdev.dev;
- ret = platform_device_add_resources(pdev, punit_res_array,
- ARRAY_SIZE(punit_res_array));
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add platform punit resources\n");
- goto err;
- }
+ const struct platform_device_info pdevinfo = {
+ .parent = ipcdev.dev,
+ .name = PUNIT_DEVICE_NAME,
+ .id = -1,
+ .res = punit_res_array,
+ .num_res = ARRAY_SIZE(punit_res_array),
+ };
+
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- ret = platform_device_add(pdev);
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add punit platform device\n");
- goto err;
- }
ipcdev.punit_dev = pdev;
return 0;
-err:
- platform_device_put(pdev);
- return ret;
}
static int ipc_create_tco_device(void)
{
struct platform_device *pdev;
struct resource *res;
- int ret;
-
- pdev = platform_device_alloc(TCO_DEVICE_NAME, -1);
- if (!pdev) {
- dev_err(ipcdev.dev, "Failed to alloc tco platform device\n");
- return -ENOMEM;
- }
-
- pdev->dev.parent = ipcdev.dev;
+ const struct platform_device_info pdevinfo = {
+ .parent = ipcdev.dev,
+ .name = TCO_DEVICE_NAME,
+ .id = -1,
+ .res = tco_res,
+ .num_res = ARRAY_SIZE(tco_res),
+ .data = &tco_info,
+ .size_data = sizeof(tco_info),
+ };
res = tco_res + TCO_RESOURCE_ACPI_IO;
res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
@@ -577,45 +565,26 @@ static int ipc_create_tco_device(void)
res->start = ipcdev.gcr_base + TCO_PMC_OFFSET;
res->end = res->start + TCO_PMC_SIZE - 1;
- ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add tco platform resources\n");
- goto err;
- }
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add tco platform data\n");
- goto err;
- }
-
- ret = platform_device_add(pdev);
- if (ret) {
- dev_err(ipcdev.dev, "Failed to add tco platform device\n");
- goto err;
- }
ipcdev.tco_dev = pdev;
return 0;
-err:
- platform_device_put(pdev);
- return ret;
}
static int ipc_create_telemetry_device(void)
{
struct platform_device *pdev;
struct resource *res;
- int ret;
-
- pdev = platform_device_alloc(TELEMETRY_DEVICE_NAME, -1);
- if (!pdev) {
- dev_err(ipcdev.dev,
- "Failed to allocate telemetry platform device\n");
- return -ENOMEM;
- }
-
- pdev->dev.parent = ipcdev.dev;
+ const struct platform_device_info pdevinfo = {
+ .parent = ipcdev.dev,
+ .name = TELEMETRY_DEVICE_NAME,
+ .id = -1,
+ .res = telemetry_res,
+ .num_res = ARRAY_SIZE(telemetry_res),
+ };
res = telemetry_res + TELEMETRY_RESOURCE_PUNIT_SSRAM;
res->start = ipcdev.telem_punit_ssram_base;
@@ -625,26 +594,13 @@ static int ipc_create_telemetry_device(void)
res->start = ipcdev.telem_pmc_ssram_base;
res->end = res->start + ipcdev.telem_pmc_ssram_size - 1;
- ret = platform_device_add_resources(pdev, telemetry_res,
- ARRAY_SIZE(telemetry_res));
- if (ret) {
- dev_err(ipcdev.dev,
- "Failed to add telemetry platform resources\n");
- goto err;
- }
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- ret = platform_device_add(pdev);
- if (ret) {
- dev_err(ipcdev.dev,
- "Failed to add telemetry platform device\n");
- goto err;
- }
ipcdev.telemetry_dev = pdev;
return 0;
-err:
- platform_device_put(pdev);
- return ret;
}
static int ipc_create_pmc_devices(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 1dba3598cfcb..c890a49587e4 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4116,7 +4116,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_fs_time(inode->i_sb);
+ inode->i_atime = current_time(inode);
}
return ret;
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index feac4576b837..2df07ee8f3c3 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -24,14 +24,15 @@
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Azael Avalos");
MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
MODULE_LICENSE("GPL");
-#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
+#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
-MODULE_ALIAS("wmi:"TOSHIBA_WMI_EVENT_GUID);
+MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
static struct input_dev *toshiba_wmi_input_dev;
@@ -63,6 +64,16 @@ static void toshiba_wmi_notify(u32 value, void *context)
kfree(response.pointer);
}
+static struct dmi_system_id toshiba_wmi_dmi_table[] __initdata = {
+ {
+ .ident = "Toshiba laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ },
+ },
+ {}
+};
+
static int __init toshiba_wmi_input_setup(void)
{
acpi_status status;
@@ -81,7 +92,7 @@ static int __init toshiba_wmi_input_setup(void)
if (err)
goto err_free_dev;
- status = wmi_install_notify_handler(TOSHIBA_WMI_EVENT_GUID,
+ status = wmi_install_notify_handler(WMI_EVENT_GUID,
toshiba_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
@@ -95,7 +106,7 @@ static int __init toshiba_wmi_input_setup(void)
return 0;
err_remove_notifier:
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
err_free_keymap:
sparse_keymap_free(toshiba_wmi_input_dev);
err_free_dev:
@@ -105,7 +116,7 @@ static int __init toshiba_wmi_input_setup(void)
static void toshiba_wmi_input_destroy(void)
{
- wmi_remove_notify_handler(TOSHIBA_WMI_EVENT_GUID);
+ wmi_remove_notify_handler(WMI_EVENT_GUID);
sparse_keymap_free(toshiba_wmi_input_dev);
input_unregister_device(toshiba_wmi_input_dev);
}
@@ -114,7 +125,8 @@ static int __init toshiba_wmi_init(void)
{
int ret;
- if (!wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (!wmi_has_guid(WMI_EVENT_GUID) ||
+ !dmi_check_system(toshiba_wmi_dmi_table))
return -ENODEV;
ret = toshiba_wmi_input_setup();
@@ -130,7 +142,7 @@ static int __init toshiba_wmi_init(void)
static void __exit toshiba_wmi_exit(void)
{
- if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
+ if (wmi_has_guid(WMI_EVENT_GUID))
toshiba_wmi_input_destroy();
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 9d60a40d8b3f..074bf2fa1c55 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -321,10 +321,9 @@ static int write_acpi_int(const char *methodName, int val)
static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
const u32 in[TCI_WORDS], u32 out[TCI_WORDS])
{
+ union acpi_object in_objs[TCI_WORDS], out_objs[TCI_WORDS + 1];
struct acpi_object_list params;
- union acpi_object in_objs[TCI_WORDS];
struct acpi_buffer results;
- union acpi_object out_objs[TCI_WORDS + 1];
acpi_status status;
int i;
@@ -387,9 +386,8 @@ static int sci_open(struct toshiba_acpi_dev *dev)
{
u32 in[TCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
- acpi_status status;
+ acpi_status status = tci_raw(dev, in, out);
- status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to open SCI failed\n");
return 0;
@@ -425,9 +423,8 @@ static void sci_close(struct toshiba_acpi_dev *dev)
{
u32 in[TCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
- acpi_status status;
+ acpi_status status = tci_raw(dev, in, out);
- status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to close SCI failed\n");
return;
@@ -479,10 +476,15 @@ static void toshiba_illumination_available(struct toshiba_acpi_dev *dev)
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query Illumination support failed\n");
- else if (out[0] == TOS_SUCCESS)
- dev->illumination_supported = 1;
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->illumination_supported = 1;
}
static void toshiba_illumination_set(struct led_classdev *cdev,
@@ -509,7 +511,8 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
- u32 state, result;
+ u32 result;
+ u32 state;
/* First request : initialize communication. */
if (!sci_open(dev))
@@ -546,24 +549,28 @@ static void toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query kbd illumination support failed\n");
- } else if (out[0] == TOS_SUCCESS) {
- /*
- * Check for keyboard backlight timeout max value,
- * previous kbd backlight implementation set this to
- * 0x3c0003, and now the new implementation set this
- * to 0x3c001a, use this to distinguish between them.
- */
- if (out[3] == SCI_KBD_TIME_MAX)
- dev->kbd_type = 2;
- else
- dev->kbd_type = 1;
- /* Get the current keyboard backlight mode */
- dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
- /* Get the current time (1-60 seconds) */
- dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
- /* Flag as supported */
- dev->kbd_illum_supported = 1;
+ return;
}
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ /*
+ * Check for keyboard backlight timeout max value,
+ * previous kbd backlight implementation set this to
+ * 0x3c0003, and now the new implementation set this
+ * to 0x3c001a, use this to distinguish between them.
+ */
+ if (out[3] == SCI_KBD_TIME_MAX)
+ dev->kbd_type = 2;
+ else
+ dev->kbd_type = 1;
+ /* Get the current keyboard backlight mode */
+ dev->kbd_mode = out[2] & SCI_KBD_MODE_MASK;
+ /* Get the current time (1-60 seconds) */
+ dev->kbd_time = out[2] >> HCI_MISC_SHIFT;
+ /* Flag as supported */
+ dev->kbd_illum_supported = 1;
}
static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
@@ -672,9 +679,9 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
/* Eco Mode support */
static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
{
- acpi_status status;
u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
u32 out[TCI_WORDS];
+ acpi_status status;
dev->eco_supported = 0;
dev->eco_led_registered = false;
@@ -682,7 +689,10 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get ECO led failed\n");
- } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+ return;
+ }
+
+ if (out[0] == TOS_INPUT_DATA_ERROR) {
/*
* If we receive 0x8300 (Input Data Error), it means that the
* LED device is present, but that we just screwed the input
@@ -694,10 +704,15 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
*/
in[3] = 1;
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get ECO led failed\n");
- else if (out[0] == TOS_SUCCESS)
- dev->eco_supported = 1;
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->eco_supported = 1;
}
}
@@ -714,10 +729,11 @@ toshiba_eco_mode_get_status(struct led_classdev *cdev)
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get ECO led failed\n");
return LED_OFF;
- } else if (out[0] != TOS_SUCCESS) {
- return LED_OFF;
}
+ if (out[0] != TOS_SUCCESS)
+ return LED_OFF;
+
return out[2] ? LED_FULL : LED_OFF;
}
@@ -751,10 +767,15 @@ static void toshiba_accelerometer_available(struct toshiba_acpi_dev *dev)
* this call also serves as initialization
*/
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query the accelerometer failed\n");
- else if (out[0] == TOS_SUCCESS)
- dev->accelerometer_supported = 1;
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->accelerometer_supported = 1;
}
static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
@@ -769,15 +790,18 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to query the accelerometer failed\n");
return -EIO;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- return -ENODEV;
- } else if (out[0] == TOS_SUCCESS) {
- *xy = out[2];
- *z = out[4];
- return 0;
}
- return -EIO;
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *xy = out[2];
+ *z = out[4];
+
+ return 0;
}
/* Sleep (Charge and Music) utilities support */
@@ -797,24 +821,29 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
sci_close(dev);
return;
- } else if (out[0] == TOS_NOT_SUPPORTED) {
+ }
+
+ if (out[0] != TOS_SUCCESS) {
sci_close(dev);
return;
- } else if (out[0] == TOS_SUCCESS) {
- dev->usbsc_mode_base = out[4];
}
+ dev->usbsc_mode_base = out[4];
+
in[5] = SCI_USB_CHARGE_BAT_LVL;
status = tci_raw(dev, in, out);
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
- } else if (out[0] == TOS_SUCCESS) {
- dev->usbsc_bat_level = out[2];
- /* Flag as supported */
- dev->usb_sleep_charge_supported = 1;
+ return;
}
+ if (out[0] != TOS_SUCCESS)
+ return;
+
+ dev->usbsc_bat_level = out[2];
+ /* Flag as supported */
+ dev->usb_sleep_charge_supported = 1;
}
static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
@@ -868,14 +897,19 @@ static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB S&C battery level failed\n");
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- return -ENODEV;
- } else if (out[0] == TOS_SUCCESS) {
- *mode = out[2];
- return 0;
+ return -EIO;
}
- return -EIO;
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *mode = out[2];
+
+ return 0;
+
}
static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
@@ -892,9 +926,12 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
in[5] = SCI_USB_CHARGE_BAT_LVL;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to set USB S&C battery level failed\n");
- else if (out[0] == TOS_NOT_SUPPORTED)
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
return -ENODEV;
return out[0] == TOS_SUCCESS ? 0 : -EIO;
@@ -915,14 +952,18 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
sci_close(dev);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get USB Rapid Charge failed\n");
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- return -ENODEV;
- } else if (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) {
- *state = out[2];
- return 0;
+ return -EIO;
}
- return -EIO;
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return -EIO;
+
+ *state = out[2];
+
+ return 0;
}
static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
@@ -939,9 +980,12 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
in[5] = SCI_USB_CHARGE_RAPID_DSP;
status = tci_raw(dev, in, out);
sci_close(dev);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to set USB Rapid Charge failed\n");
- else if (out[0] == TOS_NOT_SUPPORTED)
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
return -ENODEV;
return (out[0] == TOS_SUCCESS || out[0] == TOS_SUCCESS2) ? 0 : -EIO;
@@ -1097,14 +1141,18 @@ static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get System type failed\n");
- } else if (out[0] == TOS_NOT_SUPPORTED) {
- return -ENODEV;
- } else if (out[0] == TOS_SUCCESS) {
- *type = out[3];
- return 0;
+ return -EIO;
}
- return -EIO;
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] != TOS_SUCCESS)
+ return -EIO;
+
+ *type = out[3];
+
+ return 0;
}
/* Wireless status (RFKill, WLAN, BT, WWAN) */
@@ -1154,7 +1202,6 @@ static void toshiba_wwan_available(struct toshiba_acpi_dev *dev)
*/
in[3] = HCI_WIRELESS_WWAN;
status = tci_raw(dev, in, out);
-
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get WWAN status failed\n");
return;
@@ -1174,7 +1221,6 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state)
in[3] = HCI_WIRELESS_WWAN_STATUS;
status = tci_raw(dev, in, out);
-
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to set WWAN status failed\n");
return -EIO;
@@ -1193,7 +1239,6 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state)
*/
in[3] = HCI_WIRELESS_WWAN_POWER;
status = tci_raw(dev, in, out);
-
if (ACPI_FAILURE(status)) {
pr_err("ACPI call to set WWAN power failed\n");
return -EIO;
@@ -1216,8 +1261,10 @@ static void toshiba_cooling_method_available(struct toshiba_acpi_dev *dev)
dev->max_cooling_method = 0;
status = tci_raw(dev, in, out);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
pr_err("ACPI call to get Cooling Method failed\n");
+ return;
+ }
if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
return;
@@ -1244,7 +1291,7 @@ static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
u32 result = hci_write(dev, HCI_COOLING_METHOD, state);
if (result == TOS_FAILURE)
- pr_err("ACPI call to get Cooling Method failed\n");
+ pr_err("ACPI call to set Cooling Method failed\n");
if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
@@ -1282,9 +1329,9 @@ static struct proc_dir_entry *toshiba_proc_dir;
/* LCD Brightness */
static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
{
+ int brightness = 0;
u32 result;
u32 value;
- int brightness = 0;
if (dev->tr_backlight_supported) {
int ret = get_tr_backlight_status(dev, &value);
@@ -1301,10 +1348,10 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
pr_err("ACPI call to get LCD Brightness failed\n");
else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- if (result == TOS_SUCCESS)
- return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
- return -EIO;
+ return result == TOS_SUCCESS ?
+ brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT) :
+ -EIO;
}
static int get_lcd_brightness(struct backlight_device *bd)
@@ -1325,15 +1372,15 @@ static int lcd_proc_show(struct seq_file *m, void *v)
levels = dev->backlight_dev->props.max_brightness + 1;
value = get_lcd_brightness(dev->backlight_dev);
- if (value >= 0) {
- seq_printf(m, "brightness: %d\n", value);
- seq_printf(m, "brightness_levels: %d\n", levels);
- return 0;
+ if (value < 0) {
+ pr_err("Error reading LCD brightness\n");
+ return value;
}
- pr_err("Error reading LCD brightness\n");
+ seq_printf(m, "brightness: %d\n", value);
+ seq_printf(m, "brightness_levels: %d\n", levels);
- return -EIO;
+ return 0;
}
static int lcd_proc_open(struct inode *inode, struct file *file)
@@ -1377,7 +1424,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
- int levels = dev->backlight_dev->props.max_brightness + 1;
+ int levels;
int value;
len = min(count, sizeof(cmd) - 1);
@@ -1385,6 +1432,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
return -EFAULT;
cmd[len] = '\0';
+ levels = dev->backlight_dev->props.max_brightness + 1;
if (sscanf(cmd, " brightness : %i", &value) != 1 &&
value < 0 && value > levels)
return -EINVAL;
@@ -1420,20 +1468,21 @@ static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
static int video_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
+ int is_lcd, is_crt, is_tv;
u32 value;
- if (!get_video_status(dev, &value)) {
- int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
- int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
- int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+ if (get_video_status(dev, &value))
+ return -EIO;
- seq_printf(m, "lcd_out: %d\n", is_lcd);
- seq_printf(m, "crt_out: %d\n", is_crt);
- seq_printf(m, "tv_out: %d\n", is_tv);
- return 0;
- }
+ is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
+ is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
+ is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
- return -EIO;
+ seq_printf(m, "lcd_out: %d\n", is_lcd);
+ seq_printf(m, "crt_out: %d\n", is_crt);
+ seq_printf(m, "tv_out: %d\n", is_tv);
+
+ return 0;
}
static int video_proc_open(struct inode *inode, struct file *file)
@@ -1447,10 +1496,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *buffer;
char *cmd;
+ int lcd_out, crt_out, tv_out;
int remain = count;
- int lcd_out = -1;
- int crt_out = -1;
- int tv_out = -1;
int value;
int ret;
u32 video_out;
@@ -1486,6 +1533,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
kfree(cmd);
+ lcd_out = crt_out = tv_out = -1;
ret = get_video_status(dev, &video_out);
if (!ret) {
unsigned int new_video_out = video_out;
@@ -1980,8 +2028,8 @@ static ssize_t usb_sleep_charge_store(struct device *dev,
const char *buf, size_t count)
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- u32 mode;
int state;
+ u32 mode;
int ret;
ret = kstrtoint(buf, 0, &state);
@@ -2021,9 +2069,8 @@ static ssize_t sleep_functions_on_battery_show(struct device *dev,
char *buf)
{
struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int bat_lvl, status;
u32 state;
- int bat_lvl;
- int status;
int ret;
int tmp;
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5db495dd018e..be1d137c6079 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -80,7 +80,9 @@ static int toshiba_bluetooth_present(acpi_handle handle)
if (ACPI_FAILURE(result)) {
pr_err("ACPI call to query Bluetooth presence failed\n");
return -ENXIO;
- } else if (!bt_present) {
+ }
+
+ if (!bt_present) {
pr_info("Bluetooth device not present\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index 7f2afc6b5eb9..b3dec521e2b6 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -59,7 +59,7 @@ static int toshiba_haps_protection_level(acpi_handle handle, int level)
return -EIO;
}
- pr_info("HDD protection level set to: %d\n", level);
+ pr_debug("HDD protection level set to: %d\n", level);
return 0;
}
@@ -141,7 +141,7 @@ static struct attribute_group haps_attr_group = {
*/
static void toshiba_haps_notify(struct acpi_device *device, u32 event)
{
- pr_info("Received event: 0x%x", event);
+ pr_debug("Received event: 0x%x", event);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev),
@@ -168,9 +168,13 @@ static int toshiba_haps_available(acpi_handle handle)
* A non existent device as well as having (only)
* Solid State Drives can cause the call to fail.
*/
- status = acpi_evaluate_integer(handle, "_STA", NULL,
- &hdd_present);
- if (ACPI_FAILURE(status) || !hdd_present) {
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &hdd_present);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to query HDD protection failed\n");
+ return 0;
+ }
+
+ if (!hdd_present) {
pr_info("HDD protection not available or using SSD\n");
return 0;
}
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 7512e98e9311..564a51abeece 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -31,7 +31,7 @@ config PPS_DEBUG
config NTP_PPS
bool "PPS kernel consumer support"
- depends on !NO_HZ
+ depends on !NO_HZ_COMMON
help
This option adds support for direct in-kernel time
synchronization using an external PPS signal.
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 632701a1d993..b7f300b79ffd 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -958,7 +958,7 @@ static int ps3_vuart_bus_interrupt_get(void)
fail_request_irq:
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
+ vuart_bus_priv.virq = 0;
fail_alloc_irq:
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
@@ -982,7 +982,7 @@ static int ps3_vuart_bus_interrupt_put(void)
free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
+ vuart_bus_priv.virq = 0;
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index d637c933c8a9..58a97d420572 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
if (err)
break;
+ memset(&precise_offset, 0, sizeof(precise_offset));
ts = ktime_to_timespec64(xtstamp.device);
precise_offset.device.sec = ts.tv_sec;
precise_offset.device.nsec = ts.tv_nsec;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 80a566a00d04..bf0128899c09 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -262,6 +262,15 @@ config PWM_LPSS_PLATFORM
To compile this driver as a module, choose M here: the module
will be called pwm-lpss-platform.
+config PWM_MESON
+ tristate "Amlogic Meson PWM driver"
+ depends on ARCH_MESON
+ help
+ The platform driver for Amlogic Meson PWM controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-meson.
+
config PWM_MTK_DISP
tristate "MediaTek display PWM driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index feef1dd29f73..1194c54efcc2 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
+obj-$(CONFIG_PWM_MESON) += pwm-meson.o
obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0dbd29e287db..172ef8245811 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -339,6 +339,8 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
+ pwmchip_sysfs_unexport_children(chip);
+
mutex_lock(&pwm_lock);
for (i = 0; i < chip->npwm; i++) {
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 65108129d505..01339c152ab0 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
+#include <linux/slab.h>
#define BERLIN_PWM_EN 0x0
#define BERLIN_PWM_ENABLE BIT(0)
@@ -27,6 +28,13 @@
#define BERLIN_PWM_TCNT 0xc
#define BERLIN_PWM_MAX_TCNT 65535
+struct berlin_pwm_channel {
+ u32 enable;
+ u32 ctrl;
+ u32 duty;
+ u32 tcnt;
+};
+
struct berlin_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
@@ -55,6 +63,25 @@ static inline void berlin_pwm_writel(struct berlin_pwm_chip *chip,
writel_relaxed(value, chip->base + channel * 0x10 + offset);
}
+static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct berlin_pwm_channel *channel;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return -ENOMEM;
+
+ return pwm_set_chip_data(pwm, channel);
+}
+
+static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
+
+ pwm_set_chip_data(pwm, NULL);
+ kfree(channel);
+}
+
static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm_dev,
int duty_ns, int period_ns)
{
@@ -137,6 +164,8 @@ static void berlin_pwm_disable(struct pwm_chip *chip,
}
static const struct pwm_ops berlin_pwm_ops = {
+ .request = berlin_pwm_request,
+ .free = berlin_pwm_free,
.config = berlin_pwm_config,
.set_polarity = berlin_pwm_set_polarity,
.enable = berlin_pwm_enable,
@@ -204,12 +233,67 @@ static int berlin_pwm_remove(struct platform_device *pdev)
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int berlin_pwm_suspend(struct device *dev)
+{
+ struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < pwm->chip.npwm; i++) {
+ struct berlin_pwm_channel *channel;
+
+ channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+ if (!channel)
+ continue;
+
+ channel->enable = berlin_pwm_readl(pwm, i, BERLIN_PWM_ENABLE);
+ channel->ctrl = berlin_pwm_readl(pwm, i, BERLIN_PWM_CONTROL);
+ channel->duty = berlin_pwm_readl(pwm, i, BERLIN_PWM_DUTY);
+ channel->tcnt = berlin_pwm_readl(pwm, i, BERLIN_PWM_TCNT);
+ }
+
+ clk_disable_unprepare(pwm->clk);
+
+ return 0;
+}
+
+static int berlin_pwm_resume(struct device *dev)
+{
+ struct berlin_pwm_chip *pwm = dev_get_drvdata(dev);
+ unsigned int i;
+ int ret;
+
+ ret = clk_prepare_enable(pwm->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pwm->chip.npwm; i++) {
+ struct berlin_pwm_channel *channel;
+
+ channel = pwm_get_chip_data(&pwm->chip.pwms[i]);
+ if (!channel)
+ continue;
+
+ berlin_pwm_writel(pwm, i, channel->ctrl, BERLIN_PWM_CONTROL);
+ berlin_pwm_writel(pwm, i, channel->duty, BERLIN_PWM_DUTY);
+ berlin_pwm_writel(pwm, i, channel->tcnt, BERLIN_PWM_TCNT);
+ berlin_pwm_writel(pwm, i, channel->enable, BERLIN_PWM_ENABLE);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
+ berlin_pwm_resume);
+
static struct platform_driver berlin_pwm_driver = {
.probe = berlin_pwm_probe,
.remove = berlin_pwm_remove,
.driver = {
.name = "berlin-pwm",
.of_match_table = berlin_pwm_match,
+ .pm = &berlin_pwm_pm_ops,
},
};
module_platform_driver(berlin_pwm_driver);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 99b9acc1a420..f6ca4e8c6253 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -38,7 +38,7 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
struct {
struct cros_ec_command msg;
struct ec_params_pwm_set_duty params;
- } buf;
+ } __packed buf;
struct ec_params_pwm_set_duty *params = &buf.params;
struct cros_ec_command *msg = &buf.msg;
@@ -65,7 +65,7 @@ static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
struct ec_params_pwm_get_duty params;
struct ec_response_pwm_get_duty resp;
};
- } buf;
+ } __packed buf;
struct ec_params_pwm_get_duty *params = &buf.params;
struct ec_response_pwm_get_duty *resp = &buf.resp;
struct cros_ec_command *msg = &buf.msg;
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 19dc64cab2f0..d7f5f7de030d 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -413,14 +413,18 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
}
for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
+ struct lpc18xx_pwm_data *data;
+
pwm = &lpc18xx_pwm->chip.pwms[i];
- pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev,
- sizeof(struct lpc18xx_pwm_data),
- GFP_KERNEL);
- if (!pwm->chip_data) {
+
+ data = devm_kzalloc(lpc18xx_pwm->dev, sizeof(*data),
+ GFP_KERNEL);
+ if (!data) {
ret = -ENOMEM;
goto remove_pwmchip;
}
+
+ pwm_set_chip_data(pwm, data);
}
platform_set_drvdata(pdev, lpc18xx_pwm);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
new file mode 100644
index 000000000000..381871b2bb46
--- /dev/null
+++ b/drivers/pwm/pwm-meson.c
@@ -0,0 +1,529 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define REG_PWM_A 0x0
+#define REG_PWM_B 0x4
+#define PWM_HIGH_SHIFT 16
+
+#define REG_MISC_AB 0x8
+#define MISC_B_CLK_EN BIT(23)
+#define MISC_A_CLK_EN BIT(15)
+#define MISC_CLK_DIV_MASK 0x7f
+#define MISC_B_CLK_DIV_SHIFT 16
+#define MISC_A_CLK_DIV_SHIFT 8
+#define MISC_B_CLK_SEL_SHIFT 6
+#define MISC_A_CLK_SEL_SHIFT 4
+#define MISC_CLK_SEL_WIDTH 2
+#define MISC_B_EN BIT(1)
+#define MISC_A_EN BIT(0)
+
+static const unsigned int mux_reg_shifts[] = {
+ MISC_A_CLK_SEL_SHIFT,
+ MISC_B_CLK_SEL_SHIFT
+};
+
+struct meson_pwm_channel {
+ unsigned int hi;
+ unsigned int lo;
+ u8 pre_div;
+
+ struct pwm_state state;
+
+ struct clk *clk_parent;
+ struct clk_mux mux;
+ struct clk *clk;
+};
+
+struct meson_pwm_data {
+ const char * const *parent_names;
+};
+
+struct meson_pwm {
+ struct pwm_chip chip;
+ const struct meson_pwm_data *data;
+ void __iomem *base;
+ u8 inverter_mask;
+ spinlock_t lock;
+};
+
+static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct meson_pwm, chip);
+}
+
+static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct device *dev = chip->dev;
+ int err;
+
+ if (!channel)
+ return -ENODEV;
+
+ if (channel->clk_parent) {
+ err = clk_set_parent(channel->clk, channel->clk_parent);
+ if (err < 0) {
+ dev_err(dev, "failed to set parent %s for %s: %d\n",
+ __clk_get_name(channel->clk_parent),
+ __clk_get_name(channel->clk), err);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(channel->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock %s: %d\n",
+ __clk_get_name(channel->clk), err);
+ return err;
+ }
+
+ chip->ops->get_state(chip, pwm, &channel->state);
+
+ return 0;
+}
+
+static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+
+ if (channel)
+ clk_disable_unprepare(channel->clk);
+}
+
+static int meson_pwm_calc(struct meson_pwm *meson,
+ struct meson_pwm_channel *channel, unsigned int id,
+ unsigned int duty, unsigned int period)
+{
+ unsigned int pre_div, cnt, duty_cnt;
+ unsigned long fin_freq = -1, fin_ns;
+
+ if (~(meson->inverter_mask >> id) & 0x1)
+ duty = period - duty;
+
+ if (period == channel->state.period &&
+ duty == channel->state.duty_cycle)
+ return 0;
+
+ fin_freq = clk_get_rate(channel->clk);
+ if (fin_freq == 0) {
+ dev_err(meson->chip.dev, "invalid source clock frequency\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
+ fin_ns = NSEC_PER_SEC / fin_freq;
+
+ /* Calc pre_div with the period */
+ for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
+ cnt = DIV_ROUND_CLOSEST(period, fin_ns * (pre_div + 1));
+ dev_dbg(meson->chip.dev, "fin_ns=%lu pre_div=%u cnt=%u\n",
+ fin_ns, pre_div, cnt);
+ if (cnt <= 0xffff)
+ break;
+ }
+
+ if (pre_div == MISC_CLK_DIV_MASK) {
+ dev_err(meson->chip.dev, "unable to get period pre_div\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period,
+ pre_div, cnt);
+
+ if (duty == period) {
+ channel->pre_div = pre_div;
+ channel->hi = cnt;
+ channel->lo = 0;
+ } else if (duty == 0) {
+ channel->pre_div = pre_div;
+ channel->hi = 0;
+ channel->lo = cnt;
+ } else {
+ /* Then check is we can have the duty with the same pre_div */
+ duty_cnt = DIV_ROUND_CLOSEST(duty, fin_ns * (pre_div + 1));
+ if (duty_cnt > 0xffff) {
+ dev_err(meson->chip.dev, "unable to get duty cycle\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n",
+ duty, pre_div, duty_cnt);
+
+ channel->pre_div = pre_div;
+ channel->hi = duty_cnt;
+ channel->lo = cnt - duty_cnt;
+ }
+
+ return 0;
+}
+
+static void meson_pwm_enable(struct meson_pwm *meson,
+ struct meson_pwm_channel *channel,
+ unsigned int id)
+{
+ u32 value, clk_shift, clk_enable, enable;
+ unsigned int offset;
+
+ switch (id) {
+ case 0:
+ clk_shift = MISC_A_CLK_DIV_SHIFT;
+ clk_enable = MISC_A_CLK_EN;
+ enable = MISC_A_EN;
+ offset = REG_PWM_A;
+ break;
+
+ case 1:
+ clk_shift = MISC_B_CLK_DIV_SHIFT;
+ clk_enable = MISC_B_CLK_EN;
+ enable = MISC_B_EN;
+ offset = REG_PWM_B;
+ break;
+
+ default:
+ return;
+ }
+
+ value = readl(meson->base + REG_MISC_AB);
+ value &= ~(MISC_CLK_DIV_MASK << clk_shift);
+ value |= channel->pre_div << clk_shift;
+ value |= clk_enable;
+ writel(value, meson->base + REG_MISC_AB);
+
+ value = (channel->hi << PWM_HIGH_SHIFT) | channel->lo;
+ writel(value, meson->base + offset);
+
+ value = readl(meson->base + REG_MISC_AB);
+ value |= enable;
+ writel(value, meson->base + REG_MISC_AB);
+}
+
+static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
+{
+ u32 value, enable;
+
+ switch (id) {
+ case 0:
+ enable = MISC_A_EN;
+ break;
+
+ case 1:
+ enable = MISC_B_EN;
+ break;
+
+ default:
+ return;
+ }
+
+ value = readl(meson->base + REG_MISC_AB);
+ value &= ~enable;
+ writel(value, meson->base + REG_MISC_AB);
+}
+
+static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ unsigned long flags;
+ int err = 0;
+
+ if (!state)
+ return -EINVAL;
+
+ spin_lock_irqsave(&meson->lock, flags);
+
+ if (!state->enabled) {
+ meson_pwm_disable(meson, pwm->hwpwm);
+ channel->state.enabled = false;
+
+ goto unlock;
+ }
+
+ if (state->period != channel->state.period ||
+ state->duty_cycle != channel->state.duty_cycle ||
+ state->polarity != channel->state.polarity) {
+ if (channel->state.enabled) {
+ meson_pwm_disable(meson, pwm->hwpwm);
+ channel->state.enabled = false;
+ }
+
+ if (state->polarity != channel->state.polarity) {
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ meson->inverter_mask |= BIT(pwm->hwpwm);
+ else
+ meson->inverter_mask &= ~BIT(pwm->hwpwm);
+ }
+
+ err = meson_pwm_calc(meson, channel, pwm->hwpwm,
+ state->duty_cycle, state->period);
+ if (err < 0)
+ goto unlock;
+
+ channel->state.polarity = state->polarity;
+ channel->state.period = state->period;
+ channel->state.duty_cycle = state->duty_cycle;
+ }
+
+ if (state->enabled && !channel->state.enabled) {
+ meson_pwm_enable(meson, channel, pwm->hwpwm);
+ channel->state.enabled = true;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&meson->lock, flags);
+ return err;
+}
+
+static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ u32 value, mask;
+
+ if (!state)
+ return;
+
+ switch (pwm->hwpwm) {
+ case 0:
+ mask = MISC_A_EN;
+ break;
+
+ case 1:
+ mask = MISC_B_EN;
+ break;
+
+ default:
+ return;
+ }
+
+ value = readl(meson->base + REG_MISC_AB);
+ state->enabled = (value & mask) != 0;
+}
+
+static const struct pwm_ops meson_pwm_ops = {
+ .request = meson_pwm_request,
+ .free = meson_pwm_free,
+ .apply = meson_pwm_apply,
+ .get_state = meson_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static const char * const pwm_meson8b_parent_names[] = {
+ "xtal", "vid_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_meson8b_data = {
+ .parent_names = pwm_meson8b_parent_names,
+};
+
+static const char * const pwm_gxbb_parent_names[] = {
+ "xtal", "hdmi_pll", "fclk_div4", "fclk_div3"
+};
+
+static const struct meson_pwm_data pwm_gxbb_data = {
+ .parent_names = pwm_gxbb_parent_names,
+};
+
+static const struct of_device_id meson_pwm_matches[] = {
+ { .compatible = "amlogic,meson8b-pwm", .data = &pwm_meson8b_data },
+ { .compatible = "amlogic,meson-gxbb-pwm", .data = &pwm_gxbb_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, meson_pwm_matches);
+
+static int meson_pwm_init_channels(struct meson_pwm *meson,
+ struct meson_pwm_channel *channels)
+{
+ struct device *dev = meson->chip.dev;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data init;
+ unsigned int i;
+ char name[255];
+ int err;
+
+ for (i = 0; i < meson->chip.npwm; i++) {
+ struct meson_pwm_channel *channel = &channels[i];
+
+ snprintf(name, sizeof(name), "%s#mux%u", np->full_name, i);
+
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = meson->data->parent_names;
+ init.num_parents = 1 << MISC_CLK_SEL_WIDTH;
+
+ channel->mux.reg = meson->base + REG_MISC_AB;
+ channel->mux.shift = mux_reg_shifts[i];
+ channel->mux.mask = BIT(MISC_CLK_SEL_WIDTH) - 1;
+ channel->mux.flags = 0;
+ channel->mux.lock = &meson->lock;
+ channel->mux.table = NULL;
+ channel->mux.hw.init = &init;
+
+ channel->clk = devm_clk_register(dev, &channel->mux.hw);
+ if (IS_ERR(channel->clk)) {
+ err = PTR_ERR(channel->clk);
+ dev_err(dev, "failed to register %s: %d\n", name, err);
+ return err;
+ }
+
+ snprintf(name, sizeof(name), "clkin%u", i);
+
+ channel->clk_parent = devm_clk_get(dev, name);
+ if (IS_ERR(channel->clk_parent)) {
+ err = PTR_ERR(channel->clk_parent);
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ channel->clk_parent = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void meson_pwm_add_channels(struct meson_pwm *meson,
+ struct meson_pwm_channel *channels)
+{
+ unsigned int i;
+
+ for (i = 0; i < meson->chip.npwm; i++)
+ pwm_set_chip_data(&meson->chip.pwms[i], &channels[i]);
+}
+
+static int meson_pwm_probe(struct platform_device *pdev)
+{
+ struct meson_pwm_channel *channels;
+ struct meson_pwm *meson;
+ struct resource *regs;
+ int err;
+
+ meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
+ if (!meson)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ meson->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(meson->base))
+ return PTR_ERR(meson->base);
+
+ meson->chip.dev = &pdev->dev;
+ meson->chip.ops = &meson_pwm_ops;
+ meson->chip.base = -1;
+ meson->chip.npwm = 2;
+ meson->chip.of_xlate = of_pwm_xlate_with_flags;
+ meson->chip.of_pwm_n_cells = 3;
+
+ meson->data = of_device_get_match_data(&pdev->dev);
+ meson->inverter_mask = BIT(meson->chip.npwm) - 1;
+
+ channels = devm_kcalloc(&pdev->dev, meson->chip.npwm, sizeof(*meson),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ err = meson_pwm_init_channels(meson, channels);
+ if (err < 0)
+ return err;
+
+ err = pwmchip_add(&meson->chip);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register PWM chip: %d\n", err);
+ return err;
+ }
+
+ meson_pwm_add_channels(meson, channels);
+
+ platform_set_drvdata(pdev, meson);
+
+ return 0;
+}
+
+static int meson_pwm_remove(struct platform_device *pdev)
+{
+ struct meson_pwm *meson = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&meson->chip);
+}
+
+static struct platform_driver meson_pwm_driver = {
+ .driver = {
+ .name = "meson-pwm",
+ .of_match_table = meson_pwm_matches,
+ },
+ .probe = meson_pwm_probe,
+ .remove = meson_pwm_remove,
+};
+module_platform_driver(meson_pwm_driver);
+
+MODULE_ALIAS("platform:meson-pwm");
+MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 0ad3385298c0..893940d45f0d 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -18,30 +18,40 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
#define DISP_PWM_EN 0x00
-#define PWM_ENABLE_MASK BIT(0)
-#define DISP_PWM_COMMIT 0x08
-#define PWM_COMMIT_MASK BIT(0)
-
-#define DISP_PWM_CON_0 0x10
#define PWM_CLKDIV_SHIFT 16
#define PWM_CLKDIV_MAX 0x3ff
#define PWM_CLKDIV_MASK (PWM_CLKDIV_MAX << PWM_CLKDIV_SHIFT)
-#define DISP_PWM_CON_1 0x14
#define PWM_PERIOD_BIT_WIDTH 12
#define PWM_PERIOD_MASK ((1 << PWM_PERIOD_BIT_WIDTH) - 1)
#define PWM_HIGH_WIDTH_SHIFT 16
#define PWM_HIGH_WIDTH_MASK (0x1fff << PWM_HIGH_WIDTH_SHIFT)
+struct mtk_pwm_data {
+ u32 enable_mask;
+ unsigned int con0;
+ u32 con0_sel;
+ unsigned int con1;
+
+ bool has_commit;
+ unsigned int commit;
+ unsigned int commit_mask;
+
+ unsigned int bls_debug;
+ u32 bls_debug_mask;
+};
+
struct mtk_disp_pwm {
struct pwm_chip chip;
+ const struct mtk_pwm_data *data;
struct clk *clk_main;
struct clk *clk_mm;
void __iomem *base;
@@ -106,12 +116,21 @@ static int mtk_disp_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return err;
}
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_0, PWM_CLKDIV_MASK,
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ PWM_CLKDIV_MASK,
clk_div << PWM_CLKDIV_SHIFT);
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_CON_1,
- PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK, value);
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 1);
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_COMMIT, PWM_COMMIT_MASK, 0);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con1,
+ PWM_PERIOD_MASK | PWM_HIGH_WIDTH_MASK,
+ value);
+
+ if (mdp->data->has_commit) {
+ mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+ mdp->data->commit_mask,
+ mdp->data->commit_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+ mdp->data->commit_mask,
+ 0x0);
+ }
clk_disable(mdp->clk_mm);
clk_disable(mdp->clk_main);
@@ -134,7 +153,8 @@ static int mtk_disp_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return err;
}
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 1);
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ mdp->data->enable_mask);
return 0;
}
@@ -143,7 +163,8 @@ static void mtk_disp_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct mtk_disp_pwm *mdp = to_mtk_disp_pwm(chip);
- mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, PWM_ENABLE_MASK, 0);
+ mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+ 0x0);
clk_disable(mdp->clk_mm);
clk_disable(mdp->clk_main);
@@ -166,6 +187,8 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
if (!mdp)
return -ENOMEM;
+ mdp->data = of_device_get_match_data(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdp->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(mdp->base))
@@ -200,6 +223,19 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mdp);
+ /*
+ * For MT2701, disable double buffer before writing register
+ * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+ */
+ if (!mdp->data->has_commit) {
+ mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+ mdp->data->bls_debug_mask,
+ mdp->data->bls_debug_mask);
+ mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ mdp->data->con0_sel,
+ mdp->data->con0_sel);
+ }
+
return 0;
disable_clk_mm:
@@ -221,9 +257,30 @@ static int mtk_disp_pwm_remove(struct platform_device *pdev)
return ret;
}
+static const struct mtk_pwm_data mt2701_pwm_data = {
+ .enable_mask = BIT(16),
+ .con0 = 0xa8,
+ .con0_sel = 0x2,
+ .con1 = 0xac,
+ .has_commit = false,
+ .bls_debug = 0xb0,
+ .bls_debug_mask = 0x3,
+};
+
+static const struct mtk_pwm_data mt8173_pwm_data = {
+ .enable_mask = BIT(0),
+ .con0 = 0x10,
+ .con0_sel = 0x0,
+ .con1 = 0x14,
+ .has_commit = true,
+ .commit = 0x8,
+ .commit_mask = 0x1,
+};
+
static const struct of_device_id mtk_disp_pwm_of_match[] = {
- { .compatible = "mediatek,mt8173-disp-pwm" },
- { .compatible = "mediatek,mt6595-disp-pwm" },
+ { .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data},
+ { .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data},
+ { .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data},
{ }
};
MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index ada2d326dc3e..f113cda47032 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -193,9 +193,18 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
* divider settings and choose the lowest divisor that can generate
* frequencies lower than requested.
*/
- for (div = variant->div_base; div < 4; ++div)
- if ((rate >> (variant->bits + div)) < freq)
- break;
+ if (variant->bits < 32) {
+ /* Only for s3c24xx */
+ for (div = variant->div_base; div < 4; ++div)
+ if ((rate >> (variant->bits + div)) < freq)
+ break;
+ } else {
+ /*
+ * Other variants have enough counter bits to generate any
+ * requested rate, so no need to check higher divisors.
+ */
+ div = variant->div_base;
+ }
pwm_samsung_set_divisor(chip, chan, BIT(div));
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 92abbd56b9f7..dd82dc840af9 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -1,8 +1,10 @@
/*
- * PWM device driver for ST SoCs.
- * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ * PWM device driver for ST SoCs
+ *
+ * Copyright (C) 2013-2016 STMicroelectronics (R&D) Limited
*
- * Copyright (C) 2013-2014 STMicroelectronics (R&D) Limited
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ * Lee Jones <lee.jones@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,6 +13,7 @@
*/
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -18,43 +21,82 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/wait.h>
+
+#define PWM_OUT_VAL(x) (0x00 + (4 * (x))) /* Device's Duty Cycle register */
+#define PWM_CPT_VAL(x) (0x10 + (4 * (x))) /* Capture value */
+#define PWM_CPT_EDGE(x) (0x30 + (4 * (x))) /* Edge to capture on */
-#define STI_DS_REG(ch) (4 * (ch)) /* Channel's Duty Cycle register */
-#define STI_PWMCR 0x50 /* Control/Config register */
-#define STI_INTEN 0x54 /* Interrupt Enable/Disable register */
-#define PWM_PRESCALE_LOW_MASK 0x0f
-#define PWM_PRESCALE_HIGH_MASK 0xf0
+#define STI_PWM_CTRL 0x50 /* Control/Config register */
+#define STI_INT_EN 0x54 /* Interrupt Enable/Disable register */
+#define STI_INT_STA 0x58 /* Interrupt Status register */
+#define PWM_INT_ACK 0x5c
+#define PWM_PRESCALE_LOW_MASK 0x0f
+#define PWM_PRESCALE_HIGH_MASK 0xf0
+#define PWM_CPT_EDGE_MASK 0x03
+#define PWM_INT_ACK_MASK 0x1ff
+
+#define STI_MAX_CPT_DEVS 4
+#define CPT_DC_MAX 0xff
/* Regfield IDs */
enum {
+ /* Bits in PWM_CTRL*/
PWMCLK_PRESCALE_LOW,
PWMCLK_PRESCALE_HIGH,
- PWM_EN,
- PWM_INT_EN,
+ CPTCLK_PRESCALE,
+
+ PWM_OUT_EN,
+ PWM_CPT_EN,
+
+ PWM_CPT_INT_EN,
+ PWM_CPT_INT_STAT,
/* Keep last */
MAX_REGFIELDS
};
+/*
+ * Each capture input can be programmed to detect rising-edge, falling-edge,
+ * either edge or neither egde.
+ */
+enum sti_cpt_edge {
+ CPT_EDGE_DISABLED,
+ CPT_EDGE_RISING,
+ CPT_EDGE_FALLING,
+ CPT_EDGE_BOTH,
+};
+
+struct sti_cpt_ddata {
+ u32 snapshot[3];
+ unsigned int index;
+ struct mutex lock;
+ wait_queue_head_t wait;
+};
+
struct sti_pwm_compat_data {
const struct reg_field *reg_fields;
- unsigned int num_chan;
+ unsigned int pwm_num_devs;
+ unsigned int cpt_num_devs;
unsigned int max_pwm_cnt;
unsigned int max_prescale;
};
struct sti_pwm_chip {
struct device *dev;
- struct clk *clk;
- unsigned long clk_rate;
+ struct clk *pwm_clk;
+ struct clk *cpt_clk;
struct regmap *regmap;
struct sti_pwm_compat_data *cdata;
struct regmap_field *prescale_low;
struct regmap_field *prescale_high;
- struct regmap_field *pwm_en;
- struct regmap_field *pwm_int_en;
+ struct regmap_field *pwm_out_en;
+ struct regmap_field *pwm_cpt_en;
+ struct regmap_field *pwm_cpt_int_en;
+ struct regmap_field *pwm_cpt_int_stat;
struct pwm_chip chip;
struct pwm_device *cur;
unsigned long configured;
@@ -64,10 +106,13 @@ struct sti_pwm_chip {
};
static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = {
- [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWMCR, 0, 3),
- [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWMCR, 11, 14),
- [PWM_EN] = REG_FIELD(STI_PWMCR, 9, 9),
- [PWM_INT_EN] = REG_FIELD(STI_INTEN, 0, 0),
+ [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWM_CTRL, 0, 3),
+ [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWM_CTRL, 11, 14),
+ [CPTCLK_PRESCALE] = REG_FIELD(STI_PWM_CTRL, 4, 8),
+ [PWM_OUT_EN] = REG_FIELD(STI_PWM_CTRL, 9, 9),
+ [PWM_CPT_EN] = REG_FIELD(STI_PWM_CTRL, 10, 10),
+ [PWM_CPT_INT_EN] = REG_FIELD(STI_INT_EN, 1, 4),
+ [PWM_CPT_INT_STAT] = REG_FIELD(STI_INT_STA, 1, 4),
};
static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip)
@@ -82,61 +127,68 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period,
unsigned int *prescale)
{
struct sti_pwm_compat_data *cdata = pc->cdata;
- unsigned long val;
+ unsigned long clk_rate;
+ unsigned long value;
unsigned int ps;
+ clk_rate = clk_get_rate(pc->pwm_clk);
+ if (!clk_rate) {
+ dev_err(pc->dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
/*
- * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_count + 1)) - 1
+ * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_cnt + 1)) - 1
*/
- val = NSEC_PER_SEC / pc->clk_rate;
- val *= cdata->max_pwm_cnt + 1;
+ value = NSEC_PER_SEC / clk_rate;
+ value *= cdata->max_pwm_cnt + 1;
- if (period % val) {
+ if (period % value)
return -EINVAL;
- } else {
- ps = period / val - 1;
- if (ps > cdata->max_prescale)
- return -EINVAL;
- }
+
+ ps = period / value - 1;
+ if (ps > cdata->max_prescale)
+ return -EINVAL;
+
*prescale = ps;
return 0;
}
/*
- * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
- * The only way to change the period (apart from changing the PWM input clock)
- * is to change the PWM clock prescaler.
- * The prescaler is of 8 bits, so 256 prescaler values and hence
- * 256 possible period values are supported (for a particular clock rate).
- * The requested period will be applied only if it matches one of these
- * 256 values.
+ * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. The
+ * only way to change the period (apart from changing the PWM input clock) is
+ * to change the PWM clock prescaler.
+ *
+ * The prescaler is of 8 bits, so 256 prescaler values and hence 256 possible
+ * period values are supported (for a particular clock rate). The requested
+ * period will be applied only if it matches one of these 256 values.
*/
static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ int duty_ns, int period_ns)
{
struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
struct sti_pwm_compat_data *cdata = pc->cdata;
+ unsigned int ncfg, value, prescale = 0;
struct pwm_device *cur = pc->cur;
struct device *dev = pc->dev;
- unsigned int prescale = 0, pwmvalx;
- int ret;
- unsigned int ncfg;
bool period_same = false;
+ int ret;
ncfg = hweight_long(pc->configured);
if (ncfg)
period_same = (period_ns == pwm_get_period(cur));
- /* Allow configuration changes if one of the
- * following conditions satisfy.
- * 1. No channels have been configured.
- * 2. Only one channel has been configured and the new request
- * is for the same channel.
- * 3. Only one channel has been configured and the new request is
- * for a new channel and period of the new channel is same as
- * the current configured period.
- * 4. More than one channels are configured and period of the new
+ /*
+ * Allow configuration changes if one of the following conditions
+ * satisfy.
+ * 1. No devices have been configured.
+ * 2. Only one device has been configured and the new request is for
+ * the same device.
+ * 3. Only one device has been configured and the new request is for
+ * a new device and period of the new device is same as the current
+ * configured period.
+ * 4. More than one devices are configured and period of the new
* requestis the same as the current period.
*/
if (!ncfg ||
@@ -144,7 +196,11 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) ||
((ncfg > 1) && period_same)) {
/* Enable clock before writing to PWM registers. */
- ret = clk_enable(pc->clk);
+ ret = clk_enable(pc->pwm_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(pc->cpt_clk);
if (ret)
return ret;
@@ -153,15 +209,15 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
goto clk_dis;
- ret =
- regmap_field_write(pc->prescale_low,
- prescale & PWM_PRESCALE_LOW_MASK);
+ value = prescale & PWM_PRESCALE_LOW_MASK;
+
+ ret = regmap_field_write(pc->prescale_low, value);
if (ret)
goto clk_dis;
- ret =
- regmap_field_write(pc->prescale_high,
- (prescale & PWM_PRESCALE_HIGH_MASK) >> 4);
+ value = (prescale & PWM_PRESCALE_HIGH_MASK) >> 4;
+
+ ret = regmap_field_write(pc->prescale_high, value);
if (ret)
goto clk_dis;
}
@@ -172,25 +228,26 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* PWM pulse = (max_pwm_count + 1) local cycles,
* that is continuous pulse: signal never goes low.
*/
- pwmvalx = cdata->max_pwm_cnt * duty_ns / period_ns;
+ value = cdata->max_pwm_cnt * duty_ns / period_ns;
- ret = regmap_write(pc->regmap, STI_DS_REG(pwm->hwpwm), pwmvalx);
+ ret = regmap_write(pc->regmap, PWM_OUT_VAL(pwm->hwpwm), value);
if (ret)
goto clk_dis;
- ret = regmap_field_write(pc->pwm_int_en, 0);
+ ret = regmap_field_write(pc->pwm_cpt_int_en, 0);
set_bit(pwm->hwpwm, &pc->configured);
pc->cur = pwm;
- dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
- prescale, period_ns, duty_ns, pwmvalx);
+ dev_dbg(dev, "prescale:%u, period:%i, duty:%i, value:%u\n",
+ prescale, period_ns, duty_ns, value);
} else {
return -EINVAL;
}
clk_dis:
- clk_disable(pc->clk);
+ clk_disable(pc->pwm_clk);
+ clk_disable(pc->cpt_clk);
return ret;
}
@@ -201,23 +258,30 @@ static int sti_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int ret = 0;
/*
- * Since we have a common enable for all PWM channels,
- * do not enable if already enabled.
+ * Since we have a common enable for all PWM devices, do not enable if
+ * already enabled.
*/
mutex_lock(&pc->sti_pwm_lock);
+
if (!pc->en_count) {
- ret = clk_enable(pc->clk);
+ ret = clk_enable(pc->pwm_clk);
+ if (ret)
+ goto out;
+
+ ret = clk_enable(pc->cpt_clk);
if (ret)
goto out;
- ret = regmap_field_write(pc->pwm_en, 1);
+ ret = regmap_field_write(pc->pwm_out_en, 1);
if (ret) {
- dev_err(dev, "failed to enable PWM device:%d\n",
- pwm->hwpwm);
+ dev_err(dev, "failed to enable PWM device %u: %d\n",
+ pwm->hwpwm, ret);
goto out;
}
}
+
pc->en_count++;
+
out:
mutex_unlock(&pc->sti_pwm_lock);
return ret;
@@ -228,13 +292,17 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
mutex_lock(&pc->sti_pwm_lock);
+
if (--pc->en_count) {
mutex_unlock(&pc->sti_pwm_lock);
return;
}
- regmap_field_write(pc->pwm_en, 0);
- clk_disable(pc->clk);
+ regmap_field_write(pc->pwm_out_en, 0);
+
+ clk_disable(pc->pwm_clk);
+ clk_disable(pc->cpt_clk);
+
mutex_unlock(&pc->sti_pwm_lock);
}
@@ -245,7 +313,90 @@ static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clear_bit(pwm->hwpwm, &pc->configured);
}
+static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_capture *result, unsigned long timeout)
+{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
+ struct device *dev = pc->dev;
+ unsigned int effective_ticks;
+ unsigned long long high, low;
+ int ret;
+
+ if (pwm->hwpwm >= cdata->cpt_num_devs) {
+ dev_err(dev, "device %u is not valid\n", pwm->hwpwm);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ddata->lock);
+ ddata->index = 0;
+
+ /* Prepare capture measurement */
+ regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_RISING);
+ regmap_field_write(pc->pwm_cpt_int_en, BIT(pwm->hwpwm));
+
+ /* Enable capture */
+ ret = regmap_field_write(pc->pwm_cpt_en, 1);
+ if (ret) {
+ dev_err(dev, "failed to enable PWM capture %u: %d\n",
+ pwm->hwpwm, ret);
+ goto out;
+ }
+
+ ret = wait_event_interruptible_timeout(ddata->wait, ddata->index > 1,
+ msecs_to_jiffies(timeout));
+
+ regmap_write(pc->regmap, PWM_CPT_EDGE(pwm->hwpwm), CPT_EDGE_DISABLED);
+
+ if (ret == -ERESTARTSYS)
+ goto out;
+
+ switch (ddata->index) {
+ case 0:
+ case 1:
+ /*
+ * Getting here could mean:
+ * - input signal is constant of less than 1 Hz
+ * - there is no input signal at all
+ *
+ * In such case the frequency is rounded down to 0
+ */
+ result->period = 0;
+ result->duty_cycle = 0;
+
+ break;
+
+ case 2:
+ /* We have everying we need */
+ high = ddata->snapshot[1] - ddata->snapshot[0];
+ low = ddata->snapshot[2] - ddata->snapshot[1];
+
+ effective_ticks = clk_get_rate(pc->cpt_clk);
+
+ result->period = (high + low) * NSEC_PER_SEC;
+ result->period /= effective_ticks;
+
+ result->duty_cycle = high * NSEC_PER_SEC;
+ result->duty_cycle /= effective_ticks;
+
+ break;
+
+ default:
+ dev_err(dev, "internal error\n");
+ break;
+ }
+
+out:
+ /* Disable capture */
+ regmap_field_write(pc->pwm_cpt_en, 0);
+
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+
static const struct pwm_ops sti_pwm_ops = {
+ .capture = sti_pwm_capture,
.config = sti_pwm_config,
.enable = sti_pwm_enable,
.disable = sti_pwm_disable,
@@ -253,17 +404,98 @@ static const struct pwm_ops sti_pwm_ops = {
.owner = THIS_MODULE,
};
+static irqreturn_t sti_pwm_interrupt(int irq, void *data)
+{
+ struct sti_pwm_chip *pc = data;
+ struct device *dev = pc->dev;
+ struct sti_cpt_ddata *ddata;
+ int devicenum;
+ unsigned int cpt_int_stat;
+ unsigned int reg;
+ int ret = IRQ_NONE;
+
+ ret = regmap_field_read(pc->pwm_cpt_int_stat, &cpt_int_stat);
+ if (ret)
+ return ret;
+
+ while (cpt_int_stat) {
+ devicenum = ffs(cpt_int_stat) - 1;
+
+ ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
+
+ /*
+ * Capture input:
+ * _______ _______
+ * | | | |
+ * __| |_________________| |________
+ * ^0 ^1 ^2
+ *
+ * Capture start by the first available rising edge. When a
+ * capture event occurs, capture value (CPT_VALx) is stored,
+ * index incremented, capture edge changed.
+ *
+ * After the capture, if the index > 1, we have collected the
+ * necessary data so we signal the thread waiting for it and
+ * disable the capture by setting capture edge to none
+ */
+
+ regmap_read(pc->regmap,
+ PWM_CPT_VAL(devicenum),
+ &ddata->snapshot[ddata->index]);
+
+ switch (ddata->index) {
+ case 0:
+ case 1:
+ regmap_read(pc->regmap, PWM_CPT_EDGE(devicenum), &reg);
+ reg ^= PWM_CPT_EDGE_MASK;
+ regmap_write(pc->regmap, PWM_CPT_EDGE(devicenum), reg);
+
+ ddata->index++;
+ break;
+
+ case 2:
+ regmap_write(pc->regmap,
+ PWM_CPT_EDGE(devicenum),
+ CPT_EDGE_DISABLED);
+ wake_up(&ddata->wait);
+ break;
+
+ default:
+ dev_err(dev, "Internal error\n");
+ }
+
+ cpt_int_stat &= ~BIT_MASK(devicenum);
+
+ ret = IRQ_HANDLED;
+ }
+
+ /* Just ACK everything */
+ regmap_write(pc->regmap, PWM_INT_ACK, PWM_INT_ACK_MASK);
+
+ return ret;
+}
+
static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
{
struct device *dev = pc->dev;
const struct reg_field *reg_fields;
struct device_node *np = dev->of_node;
struct sti_pwm_compat_data *cdata = pc->cdata;
- u32 num_chan;
+ u32 num_devs;
+ int ret;
+
+ ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
+ if (!ret)
+ cdata->pwm_num_devs = num_devs;
+
+ ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
+ if (!ret)
+ cdata->cpt_num_devs = num_devs;
- of_property_read_u32(np, "st,pwm-num-chan", &num_chan);
- if (num_chan)
- cdata->num_chan = num_chan;
+ if (!cdata->pwm_num_devs && !cdata->cpt_num_devs) {
+ dev_err(dev, "No channels configured\n");
+ return -EINVAL;
+ }
reg_fields = cdata->reg_fields;
@@ -277,15 +509,26 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
if (IS_ERR(pc->prescale_high))
return PTR_ERR(pc->prescale_high);
- pc->pwm_en = devm_regmap_field_alloc(dev, pc->regmap,
- reg_fields[PWM_EN]);
- if (IS_ERR(pc->pwm_en))
- return PTR_ERR(pc->pwm_en);
- pc->pwm_int_en = devm_regmap_field_alloc(dev, pc->regmap,
- reg_fields[PWM_INT_EN]);
- if (IS_ERR(pc->pwm_int_en))
- return PTR_ERR(pc->pwm_int_en);
+ pc->pwm_out_en = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_OUT_EN]);
+ if (IS_ERR(pc->pwm_out_en))
+ return PTR_ERR(pc->pwm_out_en);
+
+ pc->pwm_cpt_en = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_CPT_EN]);
+ if (IS_ERR(pc->pwm_cpt_en))
+ return PTR_ERR(pc->pwm_cpt_en);
+
+ pc->pwm_cpt_int_en = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_CPT_INT_EN]);
+ if (IS_ERR(pc->pwm_cpt_int_en))
+ return PTR_ERR(pc->pwm_cpt_int_en);
+
+ pc->pwm_cpt_int_stat = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_CPT_INT_STAT]);
+ if (PTR_ERR_OR_ZERO(pc->pwm_cpt_int_stat))
+ return PTR_ERR(pc->pwm_cpt_int_stat);
return 0;
}
@@ -302,7 +545,8 @@ static int sti_pwm_probe(struct platform_device *pdev)
struct sti_pwm_compat_data *cdata;
struct sti_pwm_chip *pc;
struct resource *res;
- int ret;
+ unsigned int i;
+ int irq, ret;
pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -323,14 +567,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->regmap))
return PTR_ERR(pc->regmap);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to obtain IRQ\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sti_pwm_interrupt, 0,
+ pdev->name, pc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
/*
* Setup PWM data with default values: some values could be replaced
* with specific ones provided from Device Tree.
*/
- cdata->reg_fields = &sti_pwm_regfields[0];
+ cdata->reg_fields = sti_pwm_regfields;
cdata->max_prescale = 0xff;
- cdata->max_pwm_cnt = 255;
- cdata->num_chan = 1;
+ cdata->max_pwm_cnt = 255;
+ cdata->pwm_num_devs = 0;
+ cdata->cpt_num_devs = 0;
pc->cdata = cdata;
pc->dev = dev;
@@ -341,36 +599,64 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- pc->clk = of_clk_get_by_name(dev->of_node, "pwm");
- if (IS_ERR(pc->clk)) {
+ if (!cdata->pwm_num_devs)
+ goto skip_pwm;
+
+ pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+ if (IS_ERR(pc->pwm_clk)) {
dev_err(dev, "failed to get PWM clock\n");
- return PTR_ERR(pc->clk);
+ return PTR_ERR(pc->pwm_clk);
}
- pc->clk_rate = clk_get_rate(pc->clk);
- if (!pc->clk_rate) {
- dev_err(dev, "failed to get clock rate\n");
- return -EINVAL;
+ ret = clk_prepare(pc->pwm_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
}
- ret = clk_prepare(pc->clk);
+skip_pwm:
+ if (!cdata->cpt_num_devs)
+ goto skip_cpt;
+
+ pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+ if (IS_ERR(pc->cpt_clk)) {
+ dev_err(dev, "failed to get PWM capture clock\n");
+ return PTR_ERR(pc->cpt_clk);
+ }
+
+ ret = clk_prepare(pc->cpt_clk);
if (ret) {
dev_err(dev, "failed to prepare clock\n");
return ret;
}
+skip_cpt:
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.base = -1;
- pc->chip.npwm = pc->cdata->num_chan;
+ pc->chip.npwm = pc->cdata->pwm_num_devs;
pc->chip.can_sleep = true;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
- clk_unprepare(pc->clk);
+ clk_unprepare(pc->pwm_clk);
+ clk_unprepare(pc->cpt_clk);
return ret;
}
+ for (i = 0; i < cdata->cpt_num_devs; i++) {
+ struct sti_cpt_ddata *ddata;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ init_waitqueue_head(&ddata->wait);
+ mutex_init(&ddata->lock);
+
+ pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+ }
+
platform_set_drvdata(pdev, pc);
return 0;
@@ -381,10 +667,11 @@ static int sti_pwm_remove(struct platform_device *pdev)
struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
unsigned int i;
- for (i = 0; i < pc->cdata->num_chan; i++)
+ for (i = 0; i < pc->cdata->pwm_num_devs; i++)
pwm_disable(&pc->chip.pwms[i]);
- clk_unprepare(pc->clk);
+ clk_unprepare(pc->pwm_clk);
+ clk_unprepare(pc->cpt_clk);
return pwmchip_remove(&pc->chip);
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 03a99a53c39e..b0803f6c64d9 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -284,6 +284,12 @@ static const struct sun4i_pwm_data sun4i_pwm_data_a20 = {
.npwm = 2,
};
+static const struct sun4i_pwm_data sun4i_pwm_data_h3 = {
+ .has_prescaler_bypass = true,
+ .has_rdy = true,
+ .npwm = 1,
+};
+
static const struct of_device_id sun4i_pwm_dt_ids[] = {
{
.compatible = "allwinner,sun4i-a10-pwm",
@@ -298,6 +304,9 @@ static const struct of_device_id sun4i_pwm_dt_ids[] = {
.compatible = "allwinner,sun7i-a20-pwm",
.data = &sun4i_pwm_data_a20,
}, {
+ .compatible = "allwinner,sun8i-h3-pwm",
+ .data = &sun4i_pwm_data_h3,
+ }, {
/* sentinel */
},
};
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 829f4991c96f..7fa85a1604da 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -34,7 +34,6 @@ static int pwmss_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
/* Populate all the child nodes here... */
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -46,31 +45,13 @@ static int pwmss_probe(struct platform_device *pdev)
static int pwmss_remove(struct platform_device *pdev)
{
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int pwmss_suspend(struct device *dev)
-{
- pm_runtime_put_sync(dev);
- return 0;
-}
-
-static int pwmss_resume(struct device *dev)
-{
- pm_runtime_get_sync(dev);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(pwmss_pm_ops, pwmss_suspend, pwmss_resume);
-
static struct platform_driver pwmss_driver = {
.driver = {
.name = "pwmss",
- .pm = &pwmss_pm_ops,
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 04f76725d591..7a993b056638 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -269,6 +269,22 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
goto out;
}
+ val |= TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ goto out;
+ }
+
+ val &= ~TWL6030_PWM_TOGGLE(pwm->hwpwm, TWL6030_PWMXEN);
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
+ if (ret < 0) {
+ dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ goto out;
+ }
+
twl->twl6030_toggle3 = val;
out:
mutex_unlock(&twl->mutex);
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 18ed725594c3..0296d8178ae2 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -409,6 +409,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
}
}
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+ struct device *parent;
+ unsigned int i;
+
+ parent = class_find_device(&pwm_class, NULL, chip,
+ pwmchip_sysfs_match);
+ if (!parent)
+ return;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ if (test_bit(PWMF_EXPORTED, &pwm->flags))
+ pwm_unexport_child(parent, pwm);
+ }
+}
+
static int __init pwm_sysfs_init(void)
{
return class_register(&pwm_class);
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
down_read(&current->mm->mmap_sem);
pinned = get_user_pages(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, 0,
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
page_list, NULL);
up_read(&current->mm->mmap_sem);
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index cebc296463ad..bad0e0ea4f30 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -1841,24 +1841,19 @@ static int cm_chan_msg_send(void __user *arg)
{
struct rio_cm_msg msg;
void *buf;
- int ret = 0;
+ int ret;
if (copy_from_user(&msg, arg, sizeof(msg)))
return -EFAULT;
if (msg.size > RIO_MAX_MSG_SIZE)
return -EINVAL;
- buf = kmalloc(msg.size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
- ret = -EFAULT;
- goto out;
- }
+ buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
ret = riocm_ch_send(msg.ch_num, buf, msg.size);
-out:
+
kfree(buf);
return ret;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 67426c0477d3..5c1519b229e0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2754,7 +2754,7 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
ramp_delay = rdev->desc->ramp_delay;
if (ramp_delay == 0) {
- rdev_warn(rdev, "ramp_delay not set\n");
+ rdev_dbg(rdev, "ramp_delay not set\n");
return 0;
}
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 3958f50c5975..e0c747aa9f85 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -495,7 +495,8 @@ static irqreturn_t max8973_thermal_irq(int irq, void *data)
{
struct max8973_chip *mchip = data;
- thermal_zone_device_update(mchip->tz_device);
+ thermal_zone_device_update(mchip->tz_device,
+ THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 4be1b8c21f6f..06d9fa2f3bc0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -14,9 +14,58 @@ menuconfig RESET_CONTROLLER
if RESET_CONTROLLER
+config RESET_ATH79
+ bool "AR71xx Reset Driver" if COMPILE_TEST
+ default ATH79
+ help
+ This enables the ATH79 reset controller driver that supports the
+ AR71xx SoC reset controller.
+
+config RESET_BERLIN
+ bool "Berlin Reset Driver" if COMPILE_TEST
+ default ARCH_BERLIN
+ help
+ This enables the reset controller driver for Marvell Berlin SoCs.
+
+config RESET_LPC18XX
+ bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST
+ default ARCH_LPC18XX
+ help
+ This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
+
+config RESET_MESON
+ bool "Meson Reset Driver" if COMPILE_TEST
+ default ARCH_MESON
+ help
+ This enables the reset driver for Amlogic Meson SoCs.
+
config RESET_OXNAS
bool
+config RESET_PISTACHIO
+ bool "Pistachio Reset Driver" if COMPILE_TEST
+ default MACH_PISTACHIO
+ help
+ This enables the reset driver for ImgTec Pistachio SoCs.
+
+config RESET_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST
+ default ARCH_SOCFPGA
+ help
+ This enables the reset controller driver for Altera SoCFPGAs.
+
+config RESET_STM32
+ bool "STM32 Reset Driver" if COMPILE_TEST
+ default ARCH_STM32
+ help
+ This enables the RCC reset controller driver for STM32 MCUs.
+
+config RESET_SUNXI
+ bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
+ default ARCH_SUNXI
+ help
+ This enables the reset driver for Allwinner SoCs.
+
config TI_SYSCON_RESET
tristate "TI SYSCON Reset Driver"
depends on HAS_IOMEM
@@ -27,6 +76,22 @@ config TI_SYSCON_RESET
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_UNIPHIER
+ tristate "Reset controller driver for UniPhier SoCs"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for reset controllers on UniPhier SoCs.
+ Say Y if you want to control reset signals provided by System Control
+ block, Media I/O block, Peripheral Block.
+
+config RESET_ZYNQ
+ bool "ZYNQ Reset Driver" if COMPILE_TEST
+ default ARCH_ZYNQ
+ help
+ This enables the reset controller driver for Xilinx Zynq SoCs.
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 5d65a93d3c43..bbe7026617fc 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,13 +1,15 @@
obj-y += core.o
-obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
-obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
-obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
-obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
-obj-$(CONFIG_ARCH_MESON) += reset-meson.o
-obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
-obj-$(CONFIG_ARCH_HISI) += hisilicon/
-obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
-obj-$(CONFIG_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
+obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+obj-$(CONFIG_RESET_STM32) += reset-stm32.o
+obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
+obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 395dc9ce492e..b8ae1dbd4c17 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -138,7 +138,8 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(rstc->shared))
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
+ WARN_ON(rstc->shared))
return -EINVAL;
if (rstc->rcdev->ops->reset)
@@ -161,6 +162,9 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*/
int reset_control_assert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->assert)
return -ENOTSUPP;
@@ -184,6 +188,9 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
*/
int reset_control_deassert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->deassert)
return -ENOTSUPP;
@@ -204,6 +211,9 @@ EXPORT_SYMBOL_GPL(reset_control_deassert);
*/
int reset_control_status(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (rstc->rcdev->ops->status)
return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig
index 26bf95a83a8e..1ff8b0c80980 100644
--- a/drivers/reset/hisilicon/Kconfig
+++ b/drivers/reset/hisilicon/Kconfig
@@ -1,5 +1,6 @@
config COMMON_RESET_HI6220
tristate "Hi6220 Reset Driver"
- depends on (ARCH_HISI && RESET_CONTROLLER)
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
help
Build the Hisilicon Hi6220 reset driver.
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 16d410cd6146..6b97631f5489 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 12add9b0fa49..78ebf8424375 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -28,7 +28,6 @@
struct socfpga_reset_data {
spinlock_t lock;
void __iomem *membase;
- u32 modrst_offset;
struct reset_controller_dev rcdev;
};
@@ -45,9 +44,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg | BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
return 0;
@@ -67,9 +65,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg & ~BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
@@ -85,7 +82,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
int offset = id % BITS_PER_LONG;
u32 reg;
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
return !(reg & BIT(offset));
}
@@ -102,6 +99,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ u32 modrst_offset;
/*
* The binding was mainlined without the required property.
@@ -122,10 +120,11 @@ static int socfpga_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
- if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) {
+ if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) {
dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
- data->modrst_offset = 0x10;
+ modrst_offset = 0x10;
}
+ data->membase += modrst_offset;
spin_lock_init(&data->lock);
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c
new file mode 100644
index 000000000000..3a7c8527e66a
--- /dev/null
+++ b/drivers/reset/reset-stm32.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Heavily based on sunxi driver from Maxime Ripard.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct stm32_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int stm32_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg | BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg & ~BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops stm32_reset_ops = {
+ .assert = stm32_reset_assert,
+ .deassert = stm32_reset_deassert,
+};
+
+static const struct of_device_id stm32_reset_dt_ids[] = {
+ { .compatible = "st,stm32-rcc", },
+ { /* sentinel */ },
+};
+
+static int stm32_reset_probe(struct platform_device *pdev)
+{
+ struct stm32_reset_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * 8;
+ data->rcdev.ops = &stm32_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static struct platform_driver stm32_reset_driver = {
+ .probe = stm32_reset_probe,
+ .driver = {
+ .name = "stm32-rcc-reset",
+ .of_match_table = stm32_reset_dt_ids,
+ },
+};
+builtin_platform_driver(stm32_reset_driver);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
new file mode 100644
index 000000000000..968c3ae4535c
--- /dev/null
+++ b/drivers/reset/reset-uniphier.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct uniphier_reset_data {
+ unsigned int id;
+ unsigned int reg;
+ unsigned int bit;
+ unsigned int flags;
+#define UNIPHIER_RESET_ACTIVE_LOW BIT(0)
+};
+
+#define UNIPHIER_RESET_ID_END (unsigned int)(-1)
+
+#define UNIPHIER_RESET_END \
+ { .id = UNIPHIER_RESET_ID_END }
+
+#define UNIPHIER_RESET(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ }
+
+#define UNIPHIER_RESETX(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ .flags = UNIPHIER_RESET_ACTIVE_LOW, \
+ }
+
+/* System reset data */
+#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x2000, 10)
+
+#define UNIPHIER_LD11_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x200c, 8)
+
+#define UNIPHIER_PRO4_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x2000, 6)
+
+#define UNIPHIER_LD20_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x200c, 5)
+
+#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \
+ UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17)
+
+const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */
+ UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */
+ UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */
+ UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */
+ UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
+ UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */
+ UNIPHIER_RESET_END,
+};
+
+/* Media I/O reset data */
+#define UNIPHIER_MIO_RESET_SD(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_SD_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 26)
+
+#define UNIPHIER_MIO_RESET_EMMC_HW_RESET(id, ch) \
+ UNIPHIER_RESETX((id), 0x80 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2(id, ch) \
+ UNIPHIER_RESETX((id), 0x114 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 24)
+
+#define UNIPHIER_MIO_RESET_DMAC(id) \
+ UNIPHIER_RESETX((id), 0x110, 17)
+
+const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_SD(2, 2),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(3, 0),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(4, 1),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(5, 2),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_MIO_RESET_DMAC(7),
+ UNIPHIER_MIO_RESET_USB2(8, 0),
+ UNIPHIER_MIO_RESET_USB2(9, 1),
+ UNIPHIER_MIO_RESET_USB2(10, 2),
+ UNIPHIER_MIO_RESET_USB2(11, 3),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(15, 3),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_RESET_END,
+};
+
+/* Peripheral reset data */
+#define UNIPHIER_PERI_RESET_UART(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 19 + (ch))
+
+#define UNIPHIER_PERI_RESET_I2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 5 + (ch))
+
+#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 24 + (ch))
+
+const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_I2C(4, 0),
+ UNIPHIER_PERI_RESET_I2C(5, 1),
+ UNIPHIER_PERI_RESET_I2C(6, 2),
+ UNIPHIER_PERI_RESET_I2C(7, 3),
+ UNIPHIER_PERI_RESET_I2C(8, 4),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_FI2C(4, 0),
+ UNIPHIER_PERI_RESET_FI2C(5, 1),
+ UNIPHIER_PERI_RESET_FI2C(6, 2),
+ UNIPHIER_PERI_RESET_FI2C(7, 3),
+ UNIPHIER_PERI_RESET_FI2C(8, 4),
+ UNIPHIER_PERI_RESET_FI2C(9, 5),
+ UNIPHIER_PERI_RESET_FI2C(10, 6),
+ UNIPHIER_RESET_END,
+};
+
+/* core implementaton */
+struct uniphier_reset_priv {
+ struct reset_controller_dev rcdev;
+ struct device *dev;
+ struct regmap *regmap;
+ const struct uniphier_reset_data *data;
+};
+
+#define to_uniphier_reset_priv(_rcdev) \
+ container_of(_rcdev, struct uniphier_reset_priv, rcdev)
+
+static int uniphier_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, int assert)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int mask, val;
+
+ if (p->id != id)
+ continue;
+
+ mask = BIT(p->bit);
+
+ if (assert)
+ val = mask;
+ else
+ val = ~mask;
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ val = ~val;
+
+ return regmap_write_bits(priv->regmap, p->reg, mask, val);
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not handled\n", id);
+ return -EINVAL;
+}
+
+static int uniphier_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 1);
+}
+
+static int uniphier_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 0);
+}
+
+static int uniphier_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int val;
+ int ret, asserted;
+
+ if (p->id != id)
+ continue;
+
+ ret = regmap_read(priv->regmap, p->reg, &val);
+ if (ret)
+ return ret;
+
+ asserted = !!(val & BIT(p->bit));
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ asserted = !asserted;
+
+ return asserted;
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not found\n", id);
+ return -EINVAL;
+}
+
+static const struct reset_control_ops uniphier_reset_ops = {
+ .assert = uniphier_reset_assert,
+ .deassert = uniphier_reset_deassert,
+ .status = uniphier_reset_status,
+};
+
+static int uniphier_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_reset_priv *priv;
+ const struct uniphier_reset_data *p, *data;
+ struct regmap *regmap;
+ struct device_node *parent;
+ unsigned int nr_resets = 0;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap (error %ld)\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (p = data; p->id != UNIPHIER_RESET_ID_END; p++)
+ nr_resets = max(nr_resets, p->id + 1);
+
+ priv->rcdev.ops = &uniphier_reset_ops;
+ priv->rcdev.owner = dev->driver->owner;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.nr_resets = nr_resets;
+ priv->dev = dev;
+ priv->regmap = regmap;
+ priv->data = data;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id uniphier_reset_match[] = {
+ /* System reset */
+ {
+ .compatible = "socionext,uniphier-sld3-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-reset",
+ .data = uniphier_pro4_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-reset",
+ .data = uniphier_pro5_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-reset",
+ .data = uniphier_pxs2_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-reset",
+ .data = uniphier_ld11_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-reset",
+ .data = uniphier_ld20_sys_reset_data,
+ },
+ /* Media I/O reset, SD reset */
+ {
+ .compatible = "socionext,uniphier-sld3-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
+ /* Peripheral reset */
+ {
+ .compatible = "socionext,uniphier-ld4-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_reset_match);
+
+static struct platform_driver uniphier_reset_driver = {
+ .probe = uniphier_reset_probe,
+ .driver = {
+ .name = "uniphier-reset",
+ .of_match_table = uniphier_reset_match,
+ },
+};
+module_platform_driver(uniphier_reset_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d1e080701264..e859d148aba9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -208,14 +208,14 @@ config RTC_DRV_AS3722
will be called rtc-as3722.
config RTC_DRV_DS1307
- tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
+ tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057"
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00,
- EPSON RX-8025 and probably other chips. In some cases the RTC
- must already have been initialized (by manufacturing or a
- bootloader).
+ EPSON RX-8025, Intersil ISL12057 and probably other chips. In some
+ cases the RTC must already have been initialized (by manufacturing or
+ a bootloader).
The first seven registers on these chips hold an RTC, and other
registers may add features such as NVRAM, a trickle charger for
@@ -234,6 +234,20 @@ config RTC_DRV_DS1307_HWMON
Say Y here if you want to expose temperature sensor data on
rtc-ds1307 (only DS3231)
+config RTC_DRV_DS1307_CENTURY
+ bool "Century bit support for rtc-ds1307"
+ depends on RTC_DRV_DS1307
+ default n
+ help
+ The DS1307 driver suffered from a bug where it was enabling the
+ century bit inconditionnally but never used it when reading the time.
+ It made the driver unable to support dates beyond 2099.
+ Setting this option will add proper support for the century bit but if
+ the time was previously set using a kernel predating this option,
+ reading the date will return a date in the next century.
+ To solve that, you could boot a kernel without this option set, set
+ the RTC date and then boot a kernel with this option set.
+
config RTC_DRV_DS1374
tristate "Dallas/Maxim DS1374"
help
@@ -374,16 +388,6 @@ config RTC_DRV_ISL12022
This driver can also be built as a module. If so, the module
will be called rtc-isl12022.
-config RTC_DRV_ISL12057
- select REGMAP_I2C
- tristate "Intersil ISL12057"
- help
- If you say yes here you get support for the Intersil ISL12057
- I2C RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-isl12057.
-
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -661,6 +665,7 @@ config RTC_DRV_DS1343
will be called rtc-ds1343.
config RTC_DRV_DS1347
+ select REGMAP_SPI
tristate "Dallas/Maxim DS1347"
help
If you say yes here you get support for the
@@ -1201,7 +1206,7 @@ comment "on-CPU RTC drivers"
config RTC_DRV_ASM9260
tristate "Alphascale asm9260 RTC"
- depends on MACH_ASM9260
+ depends on MACH_ASM9260 || COMPILE_TEST
help
If you say yes here you get support for the RTC on the
Alphascale asm9260 SoC.
@@ -1241,6 +1246,9 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select GENERIC_PINCONF
help
Say "yes" here to support the on chip real time clock
present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8fb994bacdf7..1ac694a330c8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
-obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 70b4fd0f6122..9e336184491c 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -327,6 +327,8 @@ static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip)
.flags = 0,
};
+ of_property_read_string_index(np, "clock-output-names",
+ i, &init.name);
clk->regmap = chip->regmap;
clk->offset = AC100_CLKOUT_CTRL1 + i;
clk->hw.init = &init;
@@ -552,6 +554,9 @@ static int ac100_rtc_probe(struct platform_device *pdev)
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
chip->regmap = ac100->regmap;
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 5219916ce11d..d36534965635 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -112,8 +112,6 @@ struct asm9260_rtc_priv {
void __iomem *iobase;
struct rtc_device *rtc;
struct clk *clk;
- /* io lock */
- spinlock_t lock;
};
static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
@@ -122,11 +120,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
u32 isr;
unsigned long events = 0;
+ mutex_lock(&priv->rtc->ops_lock);
isr = ioread32(priv->iobase + HW_CIIR);
- if (!isr)
+ if (!isr) {
+ mutex_unlock(&priv->rtc->ops_lock);
return IRQ_NONE;
+ }
iowrite32(0, priv->iobase + HW_CIIR);
+ mutex_unlock(&priv->rtc->ops_lock);
events |= RTC_AF | RTC_IRQF;
@@ -139,9 +141,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
u32 ctime0, ctime1, ctime2;
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
ctime0 = ioread32(priv->iobase + HW_CTIME0);
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
@@ -155,7 +155,6 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
}
- spin_unlock_irqrestore(&priv->lock, irq_flags);
tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M;
tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M;
@@ -174,9 +173,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
/*
* make sure SEC counter will not flip other counter on write time,
* real value will be written at the enf of sequence.
@@ -191,7 +188,6 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
iowrite32(tm->tm_min, priv->iobase + HW_MIN);
iowrite32(tm->tm_sec, priv->iobase + HW_SEC);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
@@ -199,9 +195,7 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON);
alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
@@ -213,7 +207,6 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return rtc_valid_tm(&alrm->time);
}
@@ -221,9 +214,7 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON);
iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
@@ -234,7 +225,6 @@ static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC);
iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
@@ -337,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = {
{ .compatible = "alphascale,asm9260-rtc", },
{}
};
+MODULE_DEVICE_TABLE(of, asm9260_dt_ids);
static struct platform_driver asm9260_rtc_driver = {
.probe = asm9260_rtc_probe,
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 83ac2337c0f7..de8bf56a41e7 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
return ret;
}
-static struct rtc_class_ops at32_rtc_ops = {
+static const struct rtc_class_ops at32_rtc_ops = {
.read_time = at32_rtc_readtime,
.set_time = at32_rtc_settime,
.read_alarm = at32_rtc_readalarm,
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 0299988b4f13..397742446007 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -93,8 +93,15 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (error)
return error;
+ /*
+ * In case of oscillator failure, the register contents should be
+ * considered invalid. The flag is cleared the next time the RTC is set.
+ */
+ if (regs.minutes & BQ32K_OF)
+ return -EINVAL;
+
tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
- tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK);
tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
tm->tm_mday = bcd2bin(regs.date);
tm->tm_wday = bcd2bin(regs.day) - 1;
@@ -204,13 +211,10 @@ static int bq32k_probe(struct i2c_client *client,
/* Check Oscillator Failure flag */
error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
- if (!error && (reg & BQ32K_OF)) {
- dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
- reg &= ~BQ32K_OF;
- error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
- }
if (error)
return error;
+ if (reg & BQ32K_OF)
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
if (client->dev.of_node)
trickle_charger_of_init(dev, client->dev.of_node);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 43745cac0141..7030d7cd3861 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -62,6 +62,8 @@ struct cmos_rtc {
u8 day_alrm;
u8 mon_alrm;
u8 century;
+
+ struct rtc_wkalrm saved_wkalrm;
};
/* both platform and pnp busses use negative numbers for invalid irqs */
@@ -707,6 +709,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
+ hpet_rtc_timer_init();
+
if (is_valid_irq(rtc_irq)) {
irq_handler_t rtc_cmos_int_handler;
@@ -714,6 +718,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = hpet_rtc_interrupt;
retval = hpet_register_irq_handler(cmos_interrupt);
if (retval) {
+ hpet_mask_rtc_irq_bit(RTC_IRQMASK);
dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
@@ -729,7 +734,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
}
- hpet_rtc_timer_init();
/* export at least the first block of NVRAM */
nvram.size = address_space - NVRAM_OFFSET;
@@ -772,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq)
spin_unlock_irq(&rtc_lock);
}
-static void __exit cmos_do_remove(struct device *dev)
+static void cmos_do_remove(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
struct resource *ports;
@@ -844,8 +848,6 @@ static int cmos_aie_poweroff(struct device *dev)
return retval;
}
-#ifdef CONFIG_PM
-
static int cmos_suspend(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -877,6 +879,8 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
+ cmos_read_alarm(dev, &cmos->saved_wkalrm);
+
dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -892,12 +896,32 @@ static int cmos_suspend(struct device *dev)
*/
static inline int cmos_poweroff(struct device *dev)
{
+ if (!IS_ENABLED(CONFIG_PM))
+ return -ENOSYS;
+
return cmos_suspend(dev);
}
-#ifdef CONFIG_PM_SLEEP
+static void cmos_check_wkalrm(struct device *dev)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_wkalrm current_alarm;
+ time64_t t_current_expires;
+ time64_t t_saved_expires;
+
+ cmos_read_alarm(dev, &current_alarm);
+ t_current_expires = rtc_tm_to_time64(&current_alarm.time);
+ t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
+ if (t_current_expires != t_saved_expires ||
+ cmos->saved_wkalrm.enabled != current_alarm.enabled) {
+ cmos_set_alarm(dev, &cmos->saved_wkalrm);
+ }
+}
+
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control);
-static int cmos_resume(struct device *dev)
+static int __maybe_unused cmos_resume(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
@@ -910,6 +934,9 @@ static int cmos_resume(struct device *dev)
cmos->enabled_wake = 0;
}
+ /* The BIOS might have changed the alarm, restore it */
+ cmos_check_wkalrm(dev);
+
spin_lock_irq(&rtc_lock);
tmp = cmos->suspend_ctrl;
cmos->suspend_ctrl = 0;
@@ -936,6 +963,9 @@ static int cmos_resume(struct device *dev)
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
} while (mask & RTC_AIE);
+
+ if (tmp & RTC_AIE)
+ cmos_check_acpi_rtc_status(dev, &tmp);
}
spin_unlock_irq(&rtc_lock);
@@ -944,16 +974,6 @@ static int cmos_resume(struct device *dev)
return 0;
}
-#endif
-#else
-
-static inline int cmos_poweroff(struct device *dev)
-{
- return -ENOSYS;
-}
-
-#endif
-
static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
/*----------------------------------------------------------------*/
@@ -973,6 +993,21 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
static u32 rtc_handler(void *context)
{
struct device *dev = context;
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned char rtc_control = 0;
+ unsigned char rtc_intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ if (cmos_rtc.suspend_ctrl)
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (rtc_control & RTC_AIE) {
+ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1039,12 +1074,39 @@ static void cmos_wake_setup(struct device *dev)
device_init_wakeup(dev, 1);
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ acpi_event_status rtc_status;
+ acpi_status status;
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+ return;
+
+ status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Could not get RTC status\n");
+ } else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+ unsigned char mask;
+ *rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(*rtc_control, RTC_CONTROL);
+ mask = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, mask);
+ }
+}
+
#else
static void cmos_wake_setup(struct device *dev)
{
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+}
+
#endif
#ifdef CONFIG_PNP
@@ -1068,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
pnp_irq(pnp, 0));
}
-static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
+static void cmos_pnp_remove(struct pnp_dev *pnp)
{
cmos_do_remove(&pnp->dev);
}
@@ -1100,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = {
.name = (char *) driver_name,
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
- .remove = __exit_p(cmos_pnp_remove),
+ .remove = cmos_pnp_remove,
.shutdown = cmos_pnp_shutdown,
/* flag ensures resume() gets called, and stops syslog spam */
@@ -1177,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
return cmos_do_probe(&pdev->dev, resource, irq);
}
-static int __exit cmos_platform_remove(struct platform_device *pdev)
+static int cmos_platform_remove(struct platform_device *pdev)
{
cmos_do_remove(&pdev->dev);
return 0;
@@ -1202,13 +1264,11 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
MODULE_ALIAS("platform:rtc_cmos");
static struct platform_driver cmos_platform_driver = {
- .remove = __exit_p(cmos_platform_remove),
+ .remove = cmos_platform_remove,
.shutdown = cmos_platform_shutdown,
.driver = {
.name = driver_name,
-#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
-#endif
.of_match_table = of_match_ptr(of_cmos_match),
}
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 101b7a240e0f..cfc4141d99cd 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -140,7 +140,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops coh901331_ops = {
+static const struct rtc_class_ops coh901331_ops = {
.read_time = coh901331_read_time,
.set_mmss = coh901331_set_mmss,
.read_alarm = coh901331_read_alarm,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index dba60c1dfce2..caf35567e14c 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops davinci_rtc_ops = {
+static const struct rtc_class_ops davinci_rtc_ops = {
.ioctl = davinci_rtc_ioctl,
.read_time = davinci_rtc_read_time,
.set_time = davinci_rtc_set_time,
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 8d05596a6765..b253bf1b3531 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -159,7 +159,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops dc_rtc_ops = {
+static const struct rtc_class_ops dc_rtc_ops = {
.read_time = dc_rtc_read_time,
.set_mmss = dc_rtc_set_mmss,
.read_alarm = dc_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index f5dd09fe5add..0ec4be62322b 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -102,7 +102,7 @@ static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time)
return rtc_valid_tm(time);
}
-static struct rtc_class_ops ds1302_rtc_ops = {
+static const struct rtc_class_ops ds1302_rtc_ops = {
.read_time = ds1302_rtc_get_time,
.set_time = ds1302_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8e1c5cb6ece6..4e31036ee259 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -186,6 +186,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "mcp7941x", mcp794xx },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
+ { "isl12057", ds_1337 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
@@ -382,10 +383,25 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
-
- /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ case ds_1340:
+ if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ default:
+ break;
+ }
+#endif
+
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
@@ -409,6 +425,27 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ if (t->tm_year < 100)
+ return -EINVAL;
+
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ case ds_1340:
+ if (t->tm_year > 299)
+ return -EINVAL;
+ default:
+ if (t->tm_year > 199)
+ return -EINVAL;
+ break;
+ }
+#else
+ if (t->tm_year < 100 || t->tm_year > 199)
+ return -EINVAL;
+#endif
+
buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -424,11 +461,13 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
case ds_1337:
case ds_1339:
case ds_3231:
- buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
break;
case ds_1340:
- buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
- | DS1340_BIT_CENTURY;
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY;
break;
case mcp794xx:
/*
@@ -1295,6 +1334,11 @@ static int ds1307_probe(struct i2c_client *client,
if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
ds1307_can_wakeup_device = true;
}
+ /* Intersil ISL12057 DT backward compatibility */
+ if (of_property_read_bool(client->dev.of_node,
+ "isil,irq2-can-wakeup-machine")) {
+ ds1307_can_wakeup_device = true;
+ }
#endif
switch (ds1307->type) {
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 641e8e8a0dd7..ccfc9d43eb1e 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
+#include <linux/regmap.h>
/* Registers in ds1347 rtc */
@@ -32,37 +33,28 @@
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
-static int ds1347_read_reg(struct device *dev, unsigned char address,
- unsigned char *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- *data = address | 0x80;
-
- return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds1347_write_reg(struct device *dev, unsigned char address,
- unsigned char data)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[2];
-
- buf[0] = address & 0x7F;
- buf[1] = data;
+static const struct regmap_range ds1347_ranges[] = {
+ {
+ .range_min = DS1347_SECONDS_REG,
+ .range_max = DS1347_STATUS_REG,
+ },
+};
- return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
+static const struct regmap_access_table ds1347_access_table = {
+ .yes_ranges = ds1347_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ds1347_ranges),
+};
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
+ struct regmap *map;
int err;
unsigned char buf[8];
- buf[0] = DS1347_CLOCK_BURST | 0x80;
+ map = spi_get_drvdata(spi);
- err = spi_write_then_read(spi, buf, 1, buf, 8);
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
if (err)
return err;
@@ -80,25 +72,27 @@ static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[9];
+ struct regmap *map;
+ unsigned char buf[8];
+
+ map = spi_get_drvdata(spi);
- buf[0] = DS1347_CLOCK_BURST & 0x7F;
- buf[1] = bin2bcd(dt->tm_sec);
- buf[2] = bin2bcd(dt->tm_min);
- buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
- buf[4] = bin2bcd(dt->tm_mday);
- buf[5] = bin2bcd(dt->tm_mon + 1);
- buf[6] = bin2bcd(dt->tm_wday + 1);
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = (bin2bcd(dt->tm_hour) & 0x3F);
+ buf[3] = bin2bcd(dt->tm_mday);
+ buf[4] = bin2bcd(dt->tm_mon + 1);
+ buf[5] = bin2bcd(dt->tm_wday + 1);
/* year in linux is from 1900 i.e in range of 100
in rtc it is from 00 to 99 */
dt->tm_year = dt->tm_year % 100;
- buf[7] = bin2bcd(dt->tm_year);
- buf[8] = bin2bcd(0x00);
+ buf[6] = bin2bcd(dt->tm_year);
+ buf[7] = bin2bcd(0x00);
/* write the rtc settings */
- return spi_write_then_read(spi, buf, 9, NULL, 0);
+ return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -109,35 +103,53 @@ static const struct rtc_class_ops ds1347_rtc_ops = {
static int ds1347_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
- unsigned char data;
+ struct regmap_config config;
+ struct regmap *map;
+ unsigned int data;
int res;
+ memset(&config, 0, sizeof(config));
+ config.reg_bits = 8;
+ config.val_bits = 8;
+ config.read_flag_mask = 0x80;
+ config.max_register = 0x3F;
+ config.wr_table = &ds1347_access_table;
+
/* spi setup with ds1347 in mode 3 and bits per word as 8 */
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
+ map = devm_regmap_init_spi(spi, &config);
+
+ if (IS_ERR(map)) {
+ dev_err(&spi->dev, "ds1347 regmap init spi failed\n");
+ return PTR_ERR(map);
+ }
+
+ spi_set_drvdata(spi, map);
+
/* RTC Settings */
- res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
+ res = regmap_read(map, DS1347_SECONDS_REG, &data);
if (res)
return res;
/* Disable the write protect of rtc */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
data = data & ~(1<<7);
- ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
+ regmap_write(map, DS1347_CONTROL_REG, data);
/* Enable the oscillator , disable the oscillator stop flag,
and glitch filter to reduce current consumption */
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
data = data & 0x1B;
- ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
+ regmap_write(map, DS1347_STATUS_REG, data);
/* display the settings */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
rtc = devm_rtc_device_register(&spi->dev, "ds1347",
@@ -146,8 +158,6 @@ static int ds1347_probe(struct spi_device *spi)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- spi_set_drvdata(spi, rtc);
-
return 0;
}
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index b57505efadbc..688debc14348 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -110,7 +110,7 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static struct rtc_class_ops gemini_rtc_ops = {
+static const struct rtc_class_ops gemini_rtc_ops = {
.read_time = gemini_rtc_read_time,
.set_time = gemini_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
deleted file mode 100644
index 0e7f0f52bfe4..000000000000
--- a/drivers/rtc/rtc-isl12057.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
- *
- * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
- *
- * This work is largely based on Intersil ISL1208 driver developed by
- * Hebert Valerio Riedel <hvr@gnu.org>.
- *
- * Detailed datasheet on which this development is based is available here:
- *
- * http://natisbad.org/NAS2/refs/ISL12057.pdf
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/rtc.h>
-#include <linux/i2c.h>
-#include <linux/bcd.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define DRV_NAME "rtc-isl12057"
-
-/* RTC section */
-#define ISL12057_REG_RTC_SC 0x00 /* Seconds */
-#define ISL12057_REG_RTC_MN 0x01 /* Minutes */
-#define ISL12057_REG_RTC_HR 0x02 /* Hours */
-#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
-#define ISL12057_REG_RTC_DT 0x04 /* Date */
-#define ISL12057_REG_RTC_MO 0x05 /* Month */
-#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */
-#define ISL12057_REG_RTC_YR 0x06 /* Year */
-#define ISL12057_RTC_SEC_LEN 7
-
-/* Alarm 1 section */
-#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */
-#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */
-#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */
-#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */
-#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */
-#define ISL12057_A1_SEC_LEN 4
-
-/* Alarm 2 section */
-#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */
-#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */
-#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */
-#define ISL12057_A2_SEC_LEN 3
-
-/* Control/Status registers */
-#define ISL12057_REG_INT 0x0E
-#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */
-#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */
-#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */
-#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */
-#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */
-#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */
-
-#define ISL12057_REG_SR 0x0F
-#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */
-#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */
-#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */
-
-/* Register memory map length */
-#define ISL12057_MEM_MAP_LEN 0x10
-
-struct isl12057_rtc_data {
- struct rtc_device *rtc;
- struct regmap *regmap;
- struct mutex lock;
- int irq;
-};
-
-static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
-{
- tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
- tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
-
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
- tm->tm_hour += 12;
- } else { /* 24 hour mode */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
- }
-
- tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
- tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
- tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
- tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
-
- /* Check if years register has overflown from 99 to 00 */
- if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN)
- tm->tm_year += 100;
-}
-
-static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
-{
- u8 century_bit;
-
- /*
- * The clock has an 8 bit wide bcd-coded register for the year.
- * It also has a century bit encoded in MO flag which provides
- * information about overflow of year register from 99 to 00.
- * tm_year is an offset from 1900 and we are interested in the
- * 2000-2199 range, so any value less than 100 or larger than
- * 299 is invalid.
- */
- if (tm->tm_year < 100 || tm->tm_year > 299)
- return -EINVAL;
-
- century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0;
-
- regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
- regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
- regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
- regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
- regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit;
- regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100);
- regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
-
- return 0;
-}
-
-/*
- * Try and match register bits w/ fixed null values to see whether we
- * are dealing with an ISL12057. Note: this function is called early
- * during init and hence does need mutex protection.
- */
-static int isl12057_i2c_validate_chip(struct regmap *regmap)
-{
- u8 regs[ISL12057_MEM_MAP_LEN];
- static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
- 0xc0, 0x60, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x7c };
- int ret, i;
-
- ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
- if (ret)
- return ret;
-
- for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
- if (regs[i] & mask[i]) /* check if bits are cleared */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int _isl12057_rtc_clear_alarm(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret)
- dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
-
- return ret;
-}
-
-static int _isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_A1IE,
- enable ? ISL12057_REG_INT_A1IE : 0);
- if (ret)
- dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n",
- __func__, ret);
-
- return ret;
-}
-
-/*
- * Note: as we only read from device and do not perform any update, there is
- * no need for an equivalent function which would try and get driver's main
- * lock. Here, it is safe for everyone if we just use regmap internal lock
- * on the device when reading.
- */
-static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- unsigned int sr;
- int ret;
-
- ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
- if (ret) {
- dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
- __func__, ret);
- goto out;
- } else {
- if (sr & ISL12057_REG_SR_OSF) {
- ret = -ENODATA;
- goto out;
- }
- }
-
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret)
- dev_err(dev, "%s: unable to read RTC time section (%d)\n",
- __func__, ret);
-
-out:
- if (ret)
- return ret;
-
- isl12057_rtc_regs_to_tm(tm, regs);
-
- return rtc_valid_tm(tm);
-}
-
-static int isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_update_alarm(dev, enable);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- u8 regs[ISL12057_A1_SEC_LEN];
- unsigned int ir;
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: reading alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f);
- alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
- alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
- alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
-
- ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
- if (ret) {
- dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
- u8 regs[ISL12057_A1_SEC_LEN];
- struct rtc_time rtc_tm;
- int ret, enable = 1;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err_unlock;
-
- /* If alarm time is before current time, disable the alarm */
- if (!alarm->enabled || alarm_secs <= rtc_secs) {
- enable = 0;
- } else {
- /*
- * Chip only support alarms up to one month in the future. Let's
- * return an error if we get something after that limit.
- * Comparison is done by incrementing rtc_tm month field by one
- * and checking alarm value is still below.
- */
- if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_year += 1;
- } else {
- rtc_tm.tm_mon += 1;
- }
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- if (alarm_secs > rtc_secs) {
- dev_err(dev, "%s: max for alarm is one month (%d)\n",
- __func__, ret);
- ret = -EINVAL;
- goto err_unlock;
- }
- }
-
- /* Disable the alarm before modifying it */
- ret = _isl12057_rtc_update_alarm(dev, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to disable the alarm (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Program alarm registers */
- regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f;
- regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f;
- regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
- regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
-
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret < 0) {
- dev_err(dev, "%s: writing alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Enable or disable alarm */
- ret = _isl12057_rtc_update_alarm(dev, enable);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- int ret;
-
- ret = isl12057_rtc_tm_to_regs(regs, tm);
- if (ret)
- return ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: unable to write RTC time section (%d)\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * Now that RTC time has been updated, let's clear oscillator
- * failure flag, if needed.
- */
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_OSF, 0);
- if (ret < 0)
- dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n",
- __func__, ret);
-
-out:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-/*
- * Check current RTC status and enable/disable what needs to be. Return 0 if
- * everything went ok and a negative value upon error. Note: this function
- * is called early during init and hence does need mutex protection.
- */
-static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
-{
- int ret;
-
- /* Enable oscillator if not already running */
- ret = regmap_update_bits(regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_EOSC, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to enable oscillator (%d)\n",
- __func__, ret);
- return ret;
- }
-
- /* Clear alarm bit if needed */
- ret = regmap_update_bits(regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to clear alarm bit (%d)\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-/*
- * One would expect the device to be marked as a wakeup source only
- * when an IRQ pin of the RTC is routed to an interrupt line of the
- * CPU. In practice, such an IRQ pin can be connected to a PMIC and
- * this allows the device to be powered up when RTC alarm rings. This
- * is for instance the case on ReadyNAS 102, 104 and 2120. On those
- * devices with no IRQ driectly connected to the SoC, the RTC chip
- * can be forced as a wakeup source by stating that explicitly in
- * the device's .dts file using the "wakeup-source" boolean property.
- * This will guarantee 'wakealarm' sysfs entry is available on the device.
- *
- * The function below returns 1, i.e. the capability of the chip to
- * wakeup the device, based on IRQ availability or if the boolean
- * property has been set in the .dts file. Otherwise, it returns 0.
- */
-
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return data->irq || of_property_read_bool(dev->of_node, "wakeup-source")
- || of_property_read_bool(dev->of_node, /* legacy */
- "isil,irq2-can-wakeup-machine");
-}
-#else
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return !!data->irq;
-}
-#endif
-
-static int isl12057_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enable)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
- int ret = -ENOTTY;
-
- if (rtc_data->irq)
- ret = isl12057_rtc_update_alarm(dev, enable);
-
- return ret;
-}
-
-static irqreturn_t isl12057_rtc_interrupt(int irq, void *data)
-{
- struct i2c_client *client = data;
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
- struct rtc_device *rtc = rtc_data->rtc;
- int ret, handled = IRQ_NONE;
- unsigned int sr;
-
- ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr);
- if (!ret && (sr & ISL12057_REG_SR_A1F)) {
- dev_dbg(&client->dev, "RTC alarm!\n");
-
- rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
-
- /* Acknowledge and disable the alarm */
- _isl12057_rtc_clear_alarm(&client->dev);
- _isl12057_rtc_update_alarm(&client->dev, 0);
-
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static const struct rtc_class_ops rtc_ops = {
- .read_time = _isl12057_rtc_read_time,
- .set_time = isl12057_rtc_set_time,
- .read_alarm = isl12057_rtc_read_alarm,
- .set_alarm = isl12057_rtc_set_alarm,
- .alarm_irq_enable = isl12057_rtc_alarm_irq_enable,
-};
-
-static const struct regmap_config isl12057_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static int isl12057_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct isl12057_rtc_data *data;
- struct regmap *regmap;
- int ret;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENODEV;
-
- regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- dev_err(dev, "%s: regmap allocation failed (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ret = isl12057_i2c_validate_chip(regmap);
- if (ret)
- return ret;
-
- ret = isl12057_check_rtc_status(dev, regmap);
- if (ret)
- return ret;
-
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_init(&data->lock);
- data->regmap = regmap;
- dev_set_drvdata(dev, data);
-
- if (client->irq > 0) {
- ret = devm_request_threaded_irq(dev, client->irq, NULL,
- isl12057_rtc_interrupt,
- IRQF_SHARED|IRQF_ONESHOT,
- DRV_NAME, client);
- if (!ret)
- data->irq = client->irq;
- else
- dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__,
- client->irq, ret);
- }
-
- if (isl12057_can_wakeup_machine(dev))
- device_init_wakeup(dev, true);
-
- data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
- THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(data->rtc);
- if (ret) {
- dev_err(dev, "%s: unable to register RTC device (%d)\n",
- __func__, ret);
- goto err;
- }
-
- /* We cannot support UIE mode if we do not have an IRQ line */
- if (!data->irq)
- data->rtc->uie_unsupported = 1;
-
-err:
- return ret;
-}
-
-static int isl12057_remove(struct i2c_client *client)
-{
- if (isl12057_can_wakeup_machine(&client->dev))
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl12057_rtc_suspend(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return enable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-
-static int isl12057_rtc_resume(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return disable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend,
- isl12057_rtc_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id isl12057_dt_match[] = {
- { .compatible = "isl,isl12057" }, /* for backward compat., don't use */
- { .compatible = "isil,isl12057" },
- { },
-};
-MODULE_DEVICE_TABLE(of, isl12057_dt_match);
-#endif
-
-static const struct i2c_device_id isl12057_id[] = {
- { "isl12057", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, isl12057_id);
-
-static struct i2c_driver isl12057_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &isl12057_rtc_pm_ops,
- .of_match_table = of_match_ptr(isl12057_dt_match),
- },
- .probe = isl12057_probe,
- .remove = isl12057_remove,
- .id_table = isl12057_id,
-};
-module_i2c_driver(isl12057_driver);
-
-MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
-MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b2bcfc0bf2e5..5e14651b71a8 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
}
-static struct rtc_class_ops jz4740_rtc_ops = {
+static const struct rtc_class_ops jz4740_rtc_ops = {
.read_time = jz4740_rtc_read_time,
.set_mmss = jz4740_rtc_set_mmss,
.read_alarm = jz4740_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 025bb33b9cd2..4021fd04cb0a 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -151,7 +151,7 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim)
return rtc_valid_tm(tim);
}
-static struct rtc_class_ops mcp795_rtc_ops = {
+static const struct rtc_class_ops mcp795_rtc_ops = {
.read_time = mcp795_read_time,
.set_time = mcp795_set_time
};
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 44f622c3e048..1a61fa56f3ad 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@ exit:
return ret;
}
-static struct rtc_class_ops mtk_rtc_ops = {
+static const struct rtc_class_ops mtk_rtc_ops = {
.read_time = mtk_rtc_read_time,
.set_time = mtk_rtc_set_time,
.read_alarm = mtk_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 09fc1c19f0df..b1b6b3041bfb 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static struct rtc_class_ops nuc900_rtc_ops = {
+static const struct rtc_class_ops nuc900_rtc_ops = {
.read_time = nuc900_rtc_read_time,
.set_time = nuc900_rtc_set_time,
.read_alarm = nuc900_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index ec2e9c5fb993..51e52446eacb 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -13,19 +13,23 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/kernel.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/rtc.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -109,12 +113,15 @@
/* OMAP_RTC_OSC_REG bit fields: */
#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
+#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
/* OMAP_RTC_IRQWAKEEN bit fields: */
#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
/* OMAP_RTC_PMIC bit fields: */
#define OMAP_RTC_PMIC_POWER_EN_EN BIT(16)
+#define OMAP_RTC_PMIC_EXT_WKUP_EN(x) BIT(x)
+#define OMAP_RTC_PMIC_EXT_WKUP_POL(x) BIT(4 + x)
/* OMAP_RTC_KICKER values */
#define KICK0_VALUE 0x83e70b13
@@ -140,7 +147,9 @@ struct omap_rtc {
u8 interrupts_reg;
bool is_pmic_controller;
bool has_ext_clk;
+ bool is_suspending;
const struct omap_rtc_device_type *type;
+ struct pinctrl_dev *pctldev;
};
static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg)
@@ -469,7 +478,7 @@ static void omap_rtc_power_off(void)
mdelay(2500);
}
-static struct rtc_class_ops omap_rtc_ops = {
+static const struct rtc_class_ops omap_rtc_ops = {
.read_time = omap_rtc_read_time,
.set_time = omap_rtc_set_time,
.read_alarm = omap_rtc_read_alarm,
@@ -525,6 +534,139 @@ static const struct of_device_id omap_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
+static const struct pinctrl_pin_desc rtc_pins_desc[] = {
+ PINCTRL_PIN(0, "ext_wakeup0"),
+ PINCTRL_PIN(1, "ext_wakeup1"),
+ PINCTRL_PIN(2, "ext_wakeup2"),
+ PINCTRL_PIN(3, "ext_wakeup3"),
+};
+
+static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return NULL;
+}
+
+static const struct pinctrl_ops rtc_pinctrl_ops = {
+ .get_groups_count = rtc_pinctrl_get_groups_count,
+ .get_group_name = rtc_pinctrl_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+enum rtc_pin_config_param {
+ PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params rtc_params[] = {
+ {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = {
+ PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false),
+};
+#endif
+
+static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 val;
+ u16 arg = 0;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin)))
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin))
+ return -EINVAL;
+ break;
+ default:
+ return -ENOTSUPP;
+ };
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ u32 val;
+ unsigned int param;
+ u16 param_val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ /* active low by default */
+ val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (param_val)
+ val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ else
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+ break;
+ default:
+ dev_err(&rtc->rtc->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ rtc->type->unlock(rtc);
+ rtc_writel(rtc, OMAP_RTC_PMIC_REG, val);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static const struct pinconf_ops rtc_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = rtc_pinconf_get,
+ .pin_config_set = rtc_pinconf_set,
+};
+
+static struct pinctrl_desc rtc_pinctrl_desc = {
+ .pins = rtc_pins_desc,
+ .npins = ARRAY_SIZE(rtc_pins_desc),
+ .pctlops = &rtc_pinctrl_ops,
+ .confops = &rtc_pinconf_ops,
+ .custom_params = rtc_params,
+ .num_custom_params = ARRAY_SIZE(rtc_params),
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = rtc_conf_items,
+#endif
+ .owner = THIS_MODULE,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -646,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
*/
if (rtc->has_ext_clk) {
reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
- rtc_write(rtc, OMAP_RTC_OSC_REG,
- reg | OMAP_RTC_OSC_SEL_32KCLK_SRC);
+ reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
+ reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
+ rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
}
rtc->type->lock(rtc);
@@ -681,6 +824,15 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
}
+ /* Support ext_wakeup pinconf */
+ rtc_pinctrl_desc.name = dev_name(&pdev->dev);
+
+ rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ if (IS_ERR(rtc->pctldev)) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return PTR_ERR(rtc->pctldev);
+ }
+
return 0;
err:
@@ -724,6 +876,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ /* Remove ext_wakeup pinconf */
+ pinctrl_unregister(rtc->pctldev);
+
return 0;
}
@@ -746,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
rtc->type->lock(rtc);
- /* Disable the clock/module */
- pm_runtime_put_sync(dev);
+ rtc->is_suspending = true;
return 0;
}
@@ -756,9 +910,6 @@ static int omap_rtc_resume(struct device *dev)
{
struct omap_rtc *rtc = dev_get_drvdata(dev);
- /* Enable the clock/module so that we can access the registers */
- pm_runtime_get_sync(dev);
-
rtc->type->unlock(rtc);
if (device_may_wakeup(dev))
disable_irq_wake(rtc->irq_alarm);
@@ -766,11 +917,34 @@ static int omap_rtc_resume(struct device *dev)
rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
rtc->type->lock(rtc);
+ rtc->is_suspending = false;
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int omap_rtc_runtime_suspend(struct device *dev)
+{
+ struct omap_rtc *rtc = dev_get_drvdata(dev);
+
+ if (rtc->is_suspending && !rtc->has_ext_clk)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int omap_rtc_runtime_resume(struct device *dev)
+{
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume);
+static const struct dev_pm_ops omap_rtc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume)
+ SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend,
+ omap_rtc_runtime_resume, NULL)
+};
static void omap_rtc_shutdown(struct platform_device *pdev)
{
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 6080e0edef63..4bcfb88674d3 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -225,7 +225,7 @@ static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
return IRQ_HANDLED;
}
-static struct rtc_class_ops palmas_rtc_ops = {
+static const struct rtc_class_ops palmas_rtc_ops = {
.read_time = palmas_rtc_read_time,
.set_time = palmas_rtc_set_time,
.read_alarm = palmas_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index b4478cc92b55..8895f77726e8 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
- const char *buffer, size_t count) {
+ const char *buffer, size_t count)
+{
struct pcf2123_sysfs_reg *r;
unsigned long reg;
unsigned long val;
@@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- pcf2123_write_reg(dev, reg, val);
+ ret = pcf2123_write_reg(dev, reg, val);
if (ret < 0)
return -EIO;
return count;
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index e6b6911c8e05..00c31c91b245 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -232,7 +232,7 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static struct rtc_class_ops pcf50633_rtc_ops = {
+static const struct rtc_class_ops pcf50633_rtc_ops = {
.read_time = pcf50633_rtc_read_time,
.set_time = pcf50633_rtc_set_time,
.read_alarm = pcf50633_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 64e1e4578492..5cfb6df5c430 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -400,7 +400,6 @@ static struct platform_driver pic32_rtc_driver = {
.remove = pic32_rtc_remove,
.driver = {
.name = "pic32-rtc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 9a2f6a95d5a7..f9277e536f7e 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,11 +52,21 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8900_BACKUP_CTRL 0x18
+#define RX8900_FLAG_SWOFF BIT(2)
+#define RX8900_FLAG_VDETOFF BIT(3)
+
+enum rv8803_type {
+ rv_8803,
+ rx_8900
+};
+
struct rv8803_data {
struct i2c_client *client;
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ enum rv8803_type type;
};
static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
@@ -497,6 +507,35 @@ static struct rtc_class_ops rv8803_rtc_ops = {
.ioctl = rv8803_ioctl,
};
+static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
+{
+ struct i2c_client *client = rv8803->client;
+ struct device_node *node = client->dev.of_node;
+ int err;
+ u8 flags;
+
+ if (!node)
+ return 0;
+
+ if (rv8803->type != rx_8900)
+ return 0;
+
+ err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL);
+ if (err < 0)
+ return err;
+
+ flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
+
+ if (of_property_read_bool(node, "epson,vdet-disable"))
+ flags |= RX8900_FLAG_VDETOFF;
+
+ if (of_property_read_bool(node, "trickle-diode-disable"))
+ flags |= RX8900_FLAG_SWOFF;
+
+ return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
+ flags);
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -517,6 +556,7 @@ static int rv8803_probe(struct i2c_client *client,
mutex_init(&rv8803->flags_lock);
rv8803->client = client;
+ rv8803->type = id->driver_data;
i2c_set_clientdata(client, rv8803);
flags = rv8803_read_reg(client, RV8803_FLAG);
@@ -558,6 +598,12 @@ static int rv8803_probe(struct i2c_client *client,
if (err)
return err;
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&client->dev, "failed to init charger\n");
+ return err;
+ }
+
err = device_create_bin_file(&client->dev, &rv8803_nvram_attr);
if (err)
return err;
@@ -575,8 +621,8 @@ static int rv8803_remove(struct i2c_client *client)
}
static const struct i2c_device_id rv8803_id[] = {
- { "rv8803", 0 },
- { "rx8900", 0 },
+ { "rv8803", rv_8803 },
+ { "rx8900", rx_8900 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rv8803_id);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index bbad00b233bc..7c9c08eab5e5 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -317,7 +317,7 @@ static int rx6110_init(struct rx6110_data *rx6110)
return ret;
}
-static struct rtc_class_ops rx6110_rtc_ops = {
+static const struct rtc_class_ops rx6110_rtc_ops = {
.read_time = rx6110_get_time,
.set_time = rx6110_set_time,
};
@@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(spi, rx6110_id);
static struct spi_driver rx6110_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = rx6110_probe,
.remove = rx6110_remove,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 2b85cc7a24e7..91857d8d2df8 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -403,7 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops rx8025_rtc_ops = {
+static const struct rtc_class_ops rx8025_rtc_ops = {
.read_time = rx8025_get_time,
.set_time = rx8025_set_time,
.read_alarm = rx8025_read_alarm,
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index f05ef8568480..e377f42abae7 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -343,7 +343,7 @@ static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled)
return ret;
}
-static struct rtc_class_ops spear_rtc_ops = {
+static const struct rtc_class_ops spear_rtc_ops = {
.read_time = spear_rtc_read_time,
.set_time = spear_rtc_set_time,
.read_alarm = spear_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index e6aaaa52e7fe..d578e40d5a50 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -231,7 +231,7 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops stmp3xxx_rtc_ops = {
+static const struct rtc_class_ops stmp3xxx_rtc_ops = {
.alarm_irq_enable =
stmp3xxx_alarm_irq_enable,
.read_time = stmp3xxx_rtc_gettime,
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 63b9fb1318c2..1218d5d4224d 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -160,7 +160,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
unsigned long push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
- char *buf_ptr;
+ const char *buf_ptr;
int adjust = 0;
/* Only request alarms that trigger in the future. Disable them
@@ -171,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
rtc_tm_to_time(&alm.time, &now);
- buf_ptr = (char *)buf;
+ buf_ptr = buf;
if (*buf_ptr == '+') {
buf_ptr++;
if (*buf_ptr == '=') {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 15ac597d54da..3853ba963bb5 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -291,7 +291,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct rtc_class_ops tegra_rtc_ops = {
+static const struct rtc_class_ops tegra_rtc_ops = {
.read_time = tegra_rtc_read_time,
.set_time = tegra_rtc_set_time,
.read_alarm = tegra_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2dc787dc06c1..176720b7b9e5 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -462,7 +462,7 @@ out:
return ret;
}
-static struct rtc_class_ops twl_rtc_ops = {
+static const struct rtc_class_ops twl_rtc_ops = {
.read_time = twl_rtc_read_time,
.set_time = twl_rtc_set_time,
.read_alarm = twl_rtc_read_alarm,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 831935af7389..a7a88476e215 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
mdc, lpm);
return mdc;
}
- fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
" data size for zHPF requests failed\n");
return 0;
} else
- return mdc * FCX_MAX_DATA_FACTOR;
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 46be25c7461e..876c7e6e3a99 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
static int __init chp_init(void)
{
struct chp_id chpid;
- int ret;
+ int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
- if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ state = chp_info_get_status(chpid);
+ if (state == CHP_STATUS_CONFIGURED ||
+ state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 637cf8973c9e..581001989937 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
/* if (len > rec_len):
* dump data up to cap_len ignoring small duplicate in rec->payload
*/
- spin_lock_irqsave(&dbf->pay_lock, flags);
+ spin_lock(&dbf->pay_lock);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
payload->fsf_req_id = req_id;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index db2739079cbb..790babc5ef66 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
#endif
-static int probe_irq __initdata;
+static int probe_irq;
/**
* probe_intr - helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
* used by the IRQ probe code.
*/
-static irqreturn_t __init probe_intr(int irq, void *dev_id)
+static irqreturn_t probe_intr(int irq, void *dev_id)
{
probe_irq = irq;
return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* and then looking to see what interrupt actually turned up.
*/
-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
int possible)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b17b..f0cfb0451757 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 6a6906f847db..d9239c2d49b1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(be_max_phys_size,
"memory that can be allocated. Range is 16 - 128");
#define beiscsi_disp_param(_name)\
-ssize_t \
+static ssize_t \
beiscsi_##_name##_disp(struct device *dev,\
struct device_attribute *attrib, char *buf) \
{ \
@@ -74,7 +74,7 @@ beiscsi_##_name##_disp(struct device *dev,\
}
#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
-int \
+static int \
beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
{\
if (val >= _minval && val <= _maxval) {\
@@ -93,7 +93,7 @@ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
}
#define beiscsi_store_param(_name) \
-ssize_t \
+static ssize_t \
beiscsi_##_name##_store(struct device *dev,\
struct device_attribute *attr, const char *buf,\
size_t count) \
@@ -112,7 +112,7 @@ beiscsi_##_name##_store(struct device *dev,\
}
#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
-int \
+static int \
beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
{ \
if (val >= _minval && val <= _maxval) {\
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->io_sgl_lock);
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->io_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
}
static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
unsigned int wrbs_per_cxn)
{
struct wrb_handle *pwrb_handle;
+ unsigned long flags;
- spin_lock_bh(&pwrb_context->wrb_lock);
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
pwrb_context->alloc_index = 0;
else
pwrb_context->alloc_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
if (pwrb_handle)
memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle,
unsigned int wrbs_per_cxn)
{
- spin_lock_bh(&pwrb_context->wrb_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
pwrb_context->wrb_handles_available++;
if (pwrb_context->free_index == (wrbs_per_cxn - 1))
pwrb_context->free_index = 0;
else
pwrb_context->free_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
}
/**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
}
static void
@@ -4584,7 +4593,7 @@ free_hndls:
io_task->cmd_bhs = NULL;
return -ENOMEM;
}
-int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
unsigned int writedir)
{
@@ -4973,7 +4982,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
return rc;
}
-void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
{
/* Set the logging parameter */
beiscsi_log_enable_init(phba, beiscsi_log_enable);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 7c0d7af0d3b7..0039bebaa9e2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -685,6 +685,11 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+ } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
}
total_size += skb->truesize;
t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d1421139e6ea..2ffe029ff2b6 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2081,9 +2081,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
/* never reached the xmit task callout */
if (tdata->skb)
__kfree_skb(tdata->skb);
- memset(tdata, 0, sizeof(*tdata));
task_release_itt(task, task->hdr_itt);
+ memset(tdata, 0, sizeof(*tdata));
+
iscsi_tcp_cleanup_task(task);
}
EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 241829e59668..7bb20684e9fa 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -793,6 +793,7 @@ static void alua_rtpg_work(struct work_struct *work)
WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
spin_unlock_irqrestore(&pg->lock, flags);
+ kref_put(&pg->kref, release_port_group);
return;
}
if (pg->flags & ALUA_SYNC_STPG)
@@ -890,6 +891,7 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
+ sdev = NULL;
start_queue = 1;
}
}
@@ -901,7 +903,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
if (start_queue &&
!queue_delayed_work(alua_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- scsi_device_put(sdev);
+ if (sdev)
+ scsi_device_put(sdev);
kref_put(&pg->kref, release_port_group);
}
}
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 516bd6c4f442..cbf010324c18 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -30,161 +30,41 @@
#include "NCR5380.h"
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/isapnp.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/interrupt.h>
+#define MAX_CARDS 8
+
+/* old-style parameters for compatibility */
static int ncr_irq;
-static int ncr_dma;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
static int ncr_53c400a;
static int dtc_3181e;
static int hp_c2502;
+module_param(ncr_irq, int, 0);
+module_param(ncr_addr, int, 0);
+module_param(ncr_5380, int, 0);
+module_param(ncr_53c400, int, 0);
+module_param(ncr_53c400a, int, 0);
+module_param(dtc_3181e, int, 0);
+module_param(hp_c2502, int, 0);
-static struct override {
- NCR5380_map_type NCR5380_map_name;
- int irq;
- int dma;
- int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
-} overrides
-#ifdef GENERIC_NCR5380_OVERRIDE
-[] __initdata = GENERIC_NCR5380_OVERRIDE;
-#else
-[1] __initdata = { { 0,},};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-#ifndef MODULE
-
-/**
- * internal_setup - handle lilo command string override
- * @board: BOARD_* identifier for the board
- * @str: unused
- * @ints: numeric parameters
- *
- * Do LILO command line initialization of the overrides array. Display
- * errors when needed
- *
- * Locks: none
- */
-
-static void __init internal_setup(int board, char *str, int *ints)
-{
- static int commandline_current;
- switch (board) {
- case BOARD_NCR5380:
- if (ints[0] != 2 && ints[0] != 3) {
- printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
- return;
- }
- break;
- case BOARD_NCR53C400:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_NCR53C400A:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_DTC3181E:
- if (ints[0] != 2) {
- printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- }
-
- if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1];
- overrides[commandline_current].irq = ints[2];
- if (ints[0] == 3)
- overrides[commandline_current].dma = ints[3];
- else
- overrides[commandline_current].dma = DMA_NONE;
- overrides[commandline_current].board = board;
- ++commandline_current;
- }
-}
-
-
-/**
- * do_NCR53C80_setup - set up entry point
- * @str: unused
- *
- * Setup function invoked at boot to parse the ncr5380= command
- * line.
- */
-
-static int __init do_NCR5380_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR5380, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400= command
- * line.
- */
-
-static int __init do_NCR53C400_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400A_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400a= command
- * line.
- */
-
-static int __init do_NCR53C400A_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400A, str, ints);
- return 1;
-}
-
-/**
- * do_DTC3181E_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the dtc3181e= command
- * line.
- */
+static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ number(s)");
-static int __init do_DTC3181E_setup(char *str)
-{
- int ints[10];
+static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(base, int, NULL, 0);
+MODULE_PARM_DESC(base, "base address(es)");
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_DTC3181E, str, ints);
- return 1;
-}
+static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+module_param_array(card, int, NULL, 0);
+MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
-#endif
+MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
/*
@@ -210,21 +90,9 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
}
#endif
-/**
- * generic_NCR5380_detect - look for NCR5380 controllers
- * @tpnt: the scsi template
- *
- * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E
- * and DTC436(ISAPnP) controllers. If overrides have been set we use
- * them.
- *
- * Locks: none
- */
-
-static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
+static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
+ struct device *pdev, int base, int irq, int board)
{
- static int current_override;
- int count;
unsigned int *ports;
u8 *magic = NULL;
#ifndef SCSI_G_NCR5380_MEM
@@ -232,272 +100,222 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
int port_idx = -1;
unsigned long region_size;
#endif
- static unsigned int __initdata ncr_53c400a_ports[] = {
+ static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
- static unsigned int __initdata dtc_3181e_ports[] = {
+ static unsigned int dtc_3181e_ports[] = {
0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
};
- static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */
+ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
0x59, 0xb9, 0xc5, 0xae, 0xa6
};
- static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */
+ static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
- int flags;
+ int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
#ifdef SCSI_G_NCR5380_MEM
- unsigned long base;
void __iomem *iomem;
resource_size_t iomem_size;
#endif
- if (ncr_irq)
- overrides[0].irq = ncr_irq;
- if (ncr_dma)
- overrides[0].dma = ncr_dma;
- if (ncr_addr)
- overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr;
- if (ncr_5380)
- overrides[0].board = BOARD_NCR5380;
- else if (ncr_53c400)
- overrides[0].board = BOARD_NCR53C400;
- else if (ncr_53c400a)
- overrides[0].board = BOARD_NCR53C400A;
- else if (dtc_3181e)
- overrides[0].board = BOARD_DTC3181E;
- else if (hp_c2502)
- overrides[0].board = BOARD_HP_C2502;
-#ifndef SCSI_G_NCR5380_MEM
- if (!current_override && isapnp_present()) {
- struct pnp_dev *dev = NULL;
- count = 0;
- while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
- if (count >= NO_OVERRIDES)
- break;
- if (pnp_device_attach(dev) < 0)
- continue;
- if (pnp_activate_dev(dev) < 0) {
- printk(KERN_ERR "dtc436e probe: activate failed\n");
- pnp_device_detach(dev);
- continue;
- }
- if (!pnp_port_valid(dev, 0)) {
- printk(KERN_ERR "dtc436e probe: no valid port\n");
- pnp_device_detach(dev);
- continue;
- }
- if (pnp_irq_valid(dev, 0))
- overrides[count].irq = pnp_irq(dev, 0);
- else
- overrides[count].irq = NO_IRQ;
- if (pnp_dma_valid(dev, 0))
- overrides[count].dma = pnp_dma(dev, 0);
- else
- overrides[count].dma = DMA_NONE;
- overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0);
- overrides[count].board = BOARD_DTC3181E;
- count++;
- }
+ ports = NULL;
+ flags = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
+ break;
+ case BOARD_NCR53C400A:
+ ports = ncr_53c400a_ports;
+ magic = ncr_53c400a_magic;
+ break;
+ case BOARD_HP_C2502:
+ ports = ncr_53c400a_ports;
+ magic = hp_c2502_magic;
+ break;
+ case BOARD_DTC3181E:
+ ports = dtc_3181e_ports;
+ magic = ncr_53c400a_magic;
+ break;
}
-#endif
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- if (!(overrides[current_override].NCR5380_map_name))
- continue;
-
- ports = NULL;
- flags = 0;
- switch (overrides[current_override].board) {
- case BOARD_NCR5380:
- flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
- break;
- case BOARD_NCR53C400A:
- ports = ncr_53c400a_ports;
- magic = ncr_53c400a_magic;
- break;
- case BOARD_HP_C2502:
- ports = ncr_53c400a_ports;
- magic = hp_c2502_magic;
- break;
- case BOARD_DTC3181E:
- ports = dtc_3181e_ports;
- magic = ncr_53c400a_magic;
- break;
- }
#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
- /* wakeup sequence for the NCR53C400A and DTC3181E */
-
- /* Disable the adapter and look for a free io port */
- magic_configure(-1, 0, magic);
-
- region_size = 16;
-
- if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (overrides[current_override].NCR5380_map_name == ports[i])
- break;
- release_region(ports[i], region_size);
- } else
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (inb(ports[i]) == 0xff)
- break;
- release_region(ports[i], region_size);
+ if (ports && magic) {
+ /* wakeup sequence for the NCR53C400A and DTC3181E */
+
+ /* Disable the adapter and look for a free io port */
+ magic_configure(-1, 0, magic);
+
+ region_size = 16;
+ if (base)
+ for (i = 0; ports[i]; i++) {
+ if (base == ports[i]) { /* index found */
+ if (!request_region(ports[i],
+ region_size,
+ "ncr53c80"))
+ return -EBUSY;
+ break;
}
- if (ports[i]) {
- /* At this point we have our region reserved */
- magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80)
- continue;
- overrides[current_override].NCR5380_map_name = ports[i];
- port_idx = i;
- } else
- continue;
- }
+ }
else
- {
- /* Not a 53C400A style setup - just grab */
- region_size = 8;
- if (!request_region(overrides[current_override].NCR5380_map_name,
- region_size, "ncr5380"))
- continue;
- }
+ for (i = 0; ports[i]; i++) {
+ if (!request_region(ports[i], region_size,
+ "ncr53c80"))
+ continue;
+ if (inb(ports[i]) == 0xff)
+ break;
+ release_region(ports[i], region_size);
+ }
+ if (ports[i]) {
+ /* At this point we have our region reserved */
+ magic_configure(i, 0, magic); /* no IRQ yet */
+ outb(0xc0, ports[i] + 9);
+ if (inb(ports[i] + 9) != 0x80) {
+ ret = -ENODEV;
+ goto out_release;
+ }
+ base = ports[i];
+ port_idx = i;
+ } else
+ return -EINVAL;
+ }
+ else
+ {
+ /* NCR5380 - no configuration, just grab */
+ region_size = 8;
+ if (!base || !request_region(base, region_size, "ncr5380"))
+ return -EBUSY;
+ }
#else
- base = overrides[current_override].NCR5380_map_name;
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- continue;
- iomem = ioremap(base, iomem_size);
- if (!iomem) {
- release_mem_region(base, iomem_size);
- continue;
- }
+ iomem_size = NCR53C400_region_size;
+ if (!request_mem_region(base, iomem_size, "ncr5380"))
+ return -EBUSY;
+ iomem = ioremap(base, iomem_size);
+ if (!iomem) {
+ release_mem_region(base, iomem_size);
+ return -ENOMEM;
+ }
#endif
- instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
- if (instance == NULL)
- goto out_release;
- hostdata = shost_priv(instance);
+ instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ hostdata = shost_priv(instance);
#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = overrides[current_override].NCR5380_map_name;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
+ instance->io_port = base;
+ instance->n_io_port = region_size;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ instance->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
#else
- instance->base = overrides[current_override].NCR5380_map_name;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- goto out_unregister;
- }
+ instance->base = base;
+ hostdata->iomem = iomem;
+ hostdata->iomem_size = iomem_size;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
#endif
- if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
- goto out_unregister;
+ ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
+ if (ret)
+ goto out_unregister;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
- }
+ switch (board) {
+ case BOARD_NCR53C400:
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+ }
- NCR5380_maybe_reset_bus(instance);
+ NCR5380_maybe_reset_bus(instance);
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
+ if (irq != IRQ_AUTO)
+ instance->irq = irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
- if (instance->irq != NO_IRQ) {
+ if (instance->irq != NO_IRQ) {
#ifndef SCSI_G_NCR5380_MEM
- /* set IRQ for HP C2502 */
- if (overrides[current_override].board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
+ /* set IRQ for HP C2502 */
+ if (board == BOARD_HP_C2502)
+ magic_configure(port_idx, instance->irq, magic);
#endif
- if (request_irq(instance->irq, generic_NCR5380_intr,
- 0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
- }
-
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ if (request_irq(instance->irq, generic_NCR5380_intr,
+ 0, "NCR5380", instance)) {
+ printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
}
+ }
- ++current_override;
- ++count;
+ if (instance->irq == NO_IRQ) {
+ printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
}
- return count;
+ ret = scsi_add_host(instance, pdev);
+ if (ret)
+ goto out_free_irq;
+ scsi_scan_host(instance);
+ dev_set_drvdata(pdev, instance);
+ return 0;
+
+out_free_irq:
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
out_unregister:
- scsi_unregister(instance);
+ scsi_host_put(instance);
out_release:
#ifndef SCSI_G_NCR5380_MEM
- release_region(overrides[current_override].NCR5380_map_name, region_size);
+ release_region(base, region_size);
#else
iounmap(iomem);
release_mem_region(base, iomem_size);
#endif
- return count;
+ return ret;
}
-/**
- * generic_NCR5380_release_resources - free resources
- * @instance: host adapter to clean up
- *
- * Free the generic interface resources from this adapter.
- *
- * Locks: none
- */
-
-static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
+static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
@@ -511,7 +329,7 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
release_mem_region(instance->base, hostdata->iomem_size);
}
#endif
- return 0;
+ scsi_host_put(instance);
}
/**
@@ -701,10 +519,9 @@ static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.proc_name = DRV_MODULE_NAME,
.name = "Generic NCR5380/NCR53C400 SCSI",
- .detect = generic_NCR5380_detect,
- .release = generic_NCR5380_release_resources,
.info = generic_NCR5380_info,
.queuecommand = generic_NCR5380_queue_command,
.eh_abort_handler = generic_NCR5380_abort,
@@ -718,31 +535,115 @@ static struct scsi_host_template driver_template = {
.max_sectors = 128,
};
-#include "scsi_module.c"
-module_param(ncr_irq, int, 0);
-module_param(ncr_dma, int, 0);
-module_param(ncr_addr, int, 0);
-module_param(ncr_5380, int, 0);
-module_param(ncr_53c400, int, 0);
-module_param(ncr_53c400a, int, 0);
-module_param(dtc_3181e, int, 0);
-module_param(hp_c2502, int, 0);
-MODULE_LICENSE("GPL");
+static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
+{
+ int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
+ irq[ndev], card[ndev]);
+ if (ret) {
+ if (base[ndev])
+ printk(KERN_WARNING "Card not found at address 0x%03x\n",
+ base[ndev]);
+ return 0;
+ }
-#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
-static struct isapnp_device_id id_table[] = {
- {
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
- 0},
- {0}
+ return 1;
+}
+
+static int generic_NCR5380_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ generic_NCR5380_release_resources(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver generic_NCR5380_isa_driver = {
+ .match = generic_NCR5380_isa_match,
+ .remove = generic_NCR5380_isa_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME
+ },
+};
+
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+ { .id = "DTC436e", .driver_data = BOARD_DTC3181E },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
+
+static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
+{
+ int base, irq;
+
+ if (pnp_activate_dev(pdev) < 0)
+ return -EBUSY;
+
+ base = pnp_port_start(pdev, 0);
+ irq = pnp_irq(pdev, 0);
+
+ return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
+ id->driver_data);
+}
+
+static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
+{
+ generic_NCR5380_release_resources(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver generic_NCR5380_pnp_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = generic_NCR5380_pnp_ids,
+ .probe = generic_NCR5380_pnp_probe,
+ .remove = generic_NCR5380_pnp_remove,
};
+#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+
+static int pnp_registered, isa_registered;
+
+static int __init generic_NCR5380_init(void)
+{
+ int ret = 0;
+
+ /* compatibility with old-style parameters */
+ if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ irq[0] = ncr_irq;
+ base[0] = ncr_addr;
+ if (ncr_5380)
+ card[0] = BOARD_NCR5380;
+ if (ncr_53c400)
+ card[0] = BOARD_NCR53C400;
+ if (ncr_53c400a)
+ card[0] = BOARD_NCR53C400A;
+ if (dtc_3181e)
+ card[0] = BOARD_DTC3181E;
+ if (hp_c2502)
+ card[0] = BOARD_HP_C2502;
+ }
-MODULE_DEVICE_TABLE(isapnp, id_table);
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
+ pnp_registered = 1;
#endif
+ ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS);
+ if (!ret)
+ isa_registered = 1;
+
+ return (pnp_registered || isa_registered) ? 0 : ret;
+}
+
+static void __exit generic_NCR5380_exit(void)
+{
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (pnp_registered)
+ pnp_unregister_driver(&generic_NCR5380_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&generic_NCR5380_isa_driver);
+}
-__setup("ncr5380=", do_NCR5380_setup);
-__setup("ncr53c400=", do_NCR53C400_setup);
-__setup("ncr53c400a=", do_NCR53C400A_setup);
-__setup("dtc3181e=", do_DTC3181E_setup);
+module_init(generic_NCR5380_init);
+module_exit(generic_NCR5380_exit);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 595177428d76..b175b9234458 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,15 +14,9 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#define __STRVAL(x) #x
-#define STRVAL(x) __STRVAL(x)
-
#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
-#define NCR5380_map_type int
-#define NCR5380_map_name port
-
#define NCR5380_read(reg) \
inb(instance->io_port + (reg))
#define NCR5380_write(reg, value) \
@@ -38,8 +32,6 @@
/* therefore SCSI_G_NCR5380_MEM */
#define DRV_MODULE_NAME "g_NCR5380_mmio"
-#define NCR5380_map_type unsigned long
-#define NCR5380_map_name base
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a8762a3efeef..532474109624 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
- char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
}
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+ schedule_work(&ioa_cfg->work_q);
hostrcb = ipr_get_free_hostrcb(ioa_cfg);
- kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c051694bfcb0..f9b6fba689ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
free_task:
/* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
+ spin_lock(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ spin_unlock(&session->back_lock);
return NULL;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ca86c885dfaa..3aaea713bf37 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2233,7 +2233,7 @@ struct megasas_instance_template {
};
#define MEGASAS_IS_LOGICAL(scp) \
- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9ff57dee72d7..d8b1fbd4c8aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,16 +1700,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out_done;
}
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
+ /*
+ * FW takes care of flush cache on its own for Virtual Disk.
+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+ */
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
scmd->result = DID_OK << 16;
goto out_done;
- default:
- break;
}
return instance->instancet->build_and_issue_cmd(instance, scmd);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 209a969a979d..91b70bc46e7f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1273,9 +1273,9 @@ scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->handle = raid_device->handle;
sas_target_priv_data->sas_address = raid_device->wwid;
sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
- sas_target_priv_data->raid_device = raid_device;
if (ioc->is_warpdrive)
- raid_device->starget = starget;
+ sas_target_priv_data->raid_device = raid_device;
+ raid_device->starget = starget;
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
return 0;
@@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
SAM_STAT_CHECK_CONDITION;
}
-
+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+{
+ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+}
/**
* scsih_qcmd - main scsi request entry point
@@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
+ /*
+ * Lock the device for any subsequent command until command is
+ * done.
+ */
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_block(scmd->device);
+
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
if (scmd == NULL)
return 1;
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
if (mpi_reply == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ace65db1d2a2..56d6142852a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -707,6 +707,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
srb_t *sp;
int rval;
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -1451,6 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /* Don't abort commands in adapter during EEH
+ * recovery as it's not accessible/responding.
+ */
+ if (!ha->flags.eeh_busy) {
+ /* Get a reference to the sp and drop the lock.
+ * The reference ensures this sp->done() call
+ * - and not the call in qla2xxx_eh_abort() -
+ * ends the SCSI command (with result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
req->outstanding_cmds[cnt] = NULL;
sp->done(vha, sp, res);
}
@@ -2341,6 +2360,8 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 1;
if (!vha->host)
return 1;
if (time > vha->hw->loop_reset_delay * HZ)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index c905709707f0..cf04a364fd8b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
+ vfree(map_storep);
vfree(dif_storep);
vfree(fake_storep);
kfree(sdebug_q_arr);
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 54d446c9f56e..b8d3b97b217a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
};
static const struct scsi_dh_blist scsi_dh_blist[] = {
- {"DGC", "RAID", "clariion" },
- {"DGC", "DISK", "clariion" },
- {"DGC", "VRAID", "clariion" },
+ {"DGC", "RAID", "emc" },
+ {"DGC", "DISK", "emc" },
+ {"DGC", "VRAID", "emc" },
{"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
{"COMPAQ", "HSV110", "hp_sw" },
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c71344aebdbb..2cca9cffc63f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2077,7 +2077,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
}
static struct blk_mq_ops scsi_mq_ops = {
- .map_queue = blk_mq_map_queue,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 212e98d940bc..6f7128f49c30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
enum scsi_scan_mode rescan)
{
- char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
unsigned int length;
u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
}
}
- sprintf(devname, "host %d channel %d id %d",
- shost->host_no, sdev->channel, sdev->id);
-
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
* plus the number of luns we are requesting. 511 was the default
@@ -1470,12 +1466,12 @@ retry:
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
res = get_user_pages_unlocked(
uaddr,
nr_pages,
- rw == READ,
- 0, /* don't force */
- pages);
+ pages,
+ rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 47966909286d..e27b4d4e6ae2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -63,7 +63,7 @@ config SCSI_UFSHCD_PCI
config SCSI_UFS_DWC_TC_PCI
tristate "DesignWare pci support using a G210 Test Chip"
- depends on SCSI_UFSHCD && PCI
+ depends on SCSI_UFSHCD_PCI
---help---
Synopsys Test Chip is a PHY for prototyping purposes.
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index ee4ab85e2801..22f881e9253a 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -25,6 +25,7 @@
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
+#define UFS_VENDOR_SKHYNIX 0x1AD
/**
* ufs_device_info - ufs device details
@@ -145,6 +146,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
END_FIX
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 37f3c51e9d92..05c745663c10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1266,9 +1266,12 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
0, query->request.query_func, 0, 0);
- /* Data segment length */
- ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
- 0, 0, len >> 8, (u8)len);
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -6500,6 +6503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
PTR_ERR(hba->devfreq));
+ err = PTR_ERR(hba->devfreq);
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 4a0d3cdc607c..15ca09cd16f3 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -793,6 +793,7 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int result = SUCCESS;
DECLARE_COMPLETION_ONSTACK(abort_cmp);
+ int done;
scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
adapter->host->host_no, cmd);
@@ -824,10 +825,10 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)
pvscsi_abort_cmd(adapter, ctx);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/* Wait for 2 secs for the completion. */
- wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
+ done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
spin_lock_irqsave(&adapter->hw_lock, flags);
- if (!completion_done(&abort_cmp)) {
+ if (!done) {
/*
* Failed to abort the command, unmark the fact that it
* was requested to be aborted.
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index c097d2ccbde3..d41292ef85f2 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -26,7 +26,7 @@
#include <linux/types.h>
-#define PVSCSI_DRIVER_VERSION_STRING "1.0.6.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING "1.0.7.0-k"
#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fe42a2fdf351..e6e90e80519a 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,6 +1,7 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/qbman/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 203307fd92c1..75e1f5334821 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -2,5 +2,6 @@
# Makefile for the Linux Kernel SOC fsl specific device drivers
#
+obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 000000000000..757033c0586c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+ bool "Freescale DPAA 1.x support"
+ depends on FSL_SOC_BOOKE
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 000000000000..7ae199f1664e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 000000000000..ffa48fdbb1a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+ while (1) {
+ struct bman_portal *p = get_affine_portal();
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Command timedout\n");
+ return -ETIMEDOUT;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ put_affine_portal();
+ /* Pool is empty */
+ return 0;
+ }
+ put_affine_portal();
+ }
+
+ return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ kfree(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 000000000000..9deb0524543f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start,
+ res->end - res->start + 1);
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 000000000000..6579cc18811a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ bman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ bman_offline_cpu(cpu);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+ .notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 000000000000..f6896a2f6d90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 000000000000..09b1c960b26a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 000000000000..037ed342add4
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 000000000000..6f6bdd154fe3
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 000000000000..b63fd72295c6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+ __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+ __flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 000000000000..119054bc922b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+ u8 _ncw_verb;
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querywq querywq;
+ struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommited EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* needed for providing a non-NULL device to dma_map_***() */
+ struct platform_device *pdev;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+
+ u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = jiffies;
+
+ do {
+ now = jiffies;
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ sprintf(buf, "qportal-%d", c->channel);
+ portal->pdev = platform_device_alloc(buf, -1);
+ if (!portal->pdev)
+ goto fail_devalloc;
+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+ goto fail_devadd;
+ ret = platform_device_add(portal->pdev);
+ if (ret)
+ goto fail_devadd;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_err(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISDR, 0);
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ platform_device_del(portal->pdev);
+fail_devadd:
+ platform_device_put(portal->pdev);
+fail_devalloc:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ platform_device_del(qm->pdev);
+ platform_device_put(qm->pdev);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(msg->fq.fqid);
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(msg->fq.contextB);
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(msg->ern.tag);
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust contextB as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: contextB points to the FQ */
+ fq = tag_to_fq(dq->contextB);
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = fq->fqid;
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+ mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+ msg.fq.contextB = fq_to_tag(fq);
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+ struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ eq->fqid = fq->fqid;
+ eq->tag = fq_to_tag(fq);
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts = {};
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+ int ret;
+
+ ret = qman_delete_cgr(cgr_comp->cgr);
+ complete(&cgr_comp->completion);
+
+ return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ struct task_struct *thread;
+ struct cgr_comp cgr_comp;
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ init_completion(&cgr_comp.completion);
+ cgr_comp.cgr = cgr;
+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+ "cgr_del");
+
+ if (IS_ERR(thread))
+ goto out;
+
+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+ wake_up_process(thread);
+ wait_for_completion(&cgr_comp.completion);
+ preempt_enable();
+ return;
+ }
+out:
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (dqrr->fqid == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, wq, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&p->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&p->p, FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+ qm_dqrr_sdqcr_set(&p->p, 0);
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 000000000000..0cace9e0077e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,808 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+ phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+ ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+ WARN_ON(ret);
+ if (ret)
+ return -ENODEV;
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 000000000000..148614388fca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,355 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ qman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ qman_offline_cpu(cpu);
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+ .notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ const u32 *channel;
+ void __iomem *va;
+ int irq, len, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ channel = of_get_property(node, "cell-index", &len);
+ if (!channel || (len != 4)) {
+ dev_err(dev, "Can't get %s property 'cell-index'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ pcfg->channel = *channel;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 000000000000..5cf821e623a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,371 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24)-1,
+ qm_mcr_odp_seq_mask = BIT(14)-1,
+ qm_mcr_orp_nesn_mask = BIT(14)-1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+ qm_mcr_is_mask = BIT(1)-1,
+ qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 000000000000..18f7f0202fa7
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 000000000000..d5f8cb2260dc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,36 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 000000000000..6880ff17f45e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,252 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+ if (!r) {
+ enum qm_fd_format fmt_a, fmt_b;
+
+ fmt_a = qm_fd_get_format(a);
+ fmt_b = qm_fd_get_format(b);
+ r = fmt_a - fmt_b;
+ }
+ if (!r)
+ r = a->cfg - b->cfg;
+ if (!r)
+ r = a->cmd - b->cmd;
+ return r;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 000000000000..43cf66ba42f5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,617 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+ if (!pdev) {
+ pr_crit("platform_device_alloc() failed");
+ return -EIO;
+ }
+ if (platform_device_add(pdev)) {
+ pr_crit("platform_device_add() failed");
+ return -EIO;
+ }
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address");
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb2215a57..0aaf429f31d5 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 7026507e6f1d..2707a827261b 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1;
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ int ret;
+ struct resource res;
if (qebase != -1)
return qebase;
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void)
return qebase;
}
- prop = of_get_property(qe, "reg", &size);
- if (prop && size >= sizeof(*prop))
- qebase = of_translate_address(qe, prop);
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
of_node_put(qe);
return qebase;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff805a904..104e68d9b84f 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
}
muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
muram_pbase = of_translate_address(np, zero);
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
struct muram_block *entry;
unsigned long start;
+ if (!muram_pool && cpm_muram_init())
+ goto out2;
+
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
if (!start)
goto out2;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index 5e48b1470178..a1048b44e6b9 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
utdm->tdm_port = val;
ut_info->uf_info.tdm_num = utdm->tdm_port;
- if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
else
utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
}
if (siram_init_flag == 0) {
- memset_io(utdm->siram, 0, res->end - res->start + 1);
+ memset_io(utdm->siram, 0, resource_size(res));
siram_init_flag = 1;
}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a003ba26ca6e..a5f10936fb9c 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -583,7 +583,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
{
unsigned long timeout;
- timeout = jiffies + usecs_to_jiffies(255);
+ timeout = jiffies + usecs_to_jiffies(10000);
do {
if (time_after(jiffies, timeout))
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index ac1957dfdf24..322034ab9d37 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -95,7 +95,7 @@ static const struct {
/**
* struct qcom_smd_edge - representing a remote processor
- * @smd: handle to qcom_smd
+ * @dev: device for this edge
* @of_node: of_node handle for information related to this edge
* @edge_id: identifier of this edge
* @remote_pid: identifier of remote processor
@@ -111,7 +111,8 @@ static const struct {
* @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
- struct qcom_smd *smd;
+ struct device dev;
+
struct device_node *of_node;
unsigned edge_id;
unsigned remote_pid;
@@ -135,6 +136,8 @@ struct qcom_smd_edge {
struct work_struct state_work;
};
+#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
+
/*
* SMD channel states.
*/
@@ -197,20 +200,6 @@ struct qcom_smd_channel {
void *drvdata;
struct list_head list;
- struct list_head dev_list;
-};
-
-/**
- * struct qcom_smd - smd struct
- * @dev: device struct
- * @num_edges: number of entries in @edges
- * @edges: array of edges to be handled
- */
-struct qcom_smd {
- struct device *dev;
-
- unsigned num_edges;
- struct qcom_smd_edge edges[0];
};
/*
@@ -374,7 +363,7 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0);
- SET_TX_CHANNEL_INFO(channel, tail, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
qcom_smd_signal_channel(channel);
@@ -421,7 +410,7 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
if (channel->state == state)
return;
- dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+ dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
@@ -891,8 +880,6 @@ static int qcom_smd_dev_remove(struct device *dev)
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
struct qcom_smd_channel *channel = qsdev->channel;
- struct qcom_smd_channel *tmp;
- struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
@@ -911,15 +898,9 @@ static int qcom_smd_dev_remove(struct device *dev)
if (qsdrv->remove)
qsdrv->remove(qsdev);
- /*
- * The client is now gone, close and release all channels associated
- * with this sdev
- */
- list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
- qcom_smd_channel_close(ch);
- list_del(&ch->dev_list);
- ch->qsdev = NULL;
- }
+ /* The client is now gone, close the primary channel */
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
return 0;
}
@@ -973,13 +954,12 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
struct qcom_smd_device *qsdev;
struct qcom_smd_edge *edge = channel->edge;
struct device_node *node;
- struct qcom_smd *smd = edge->smd;
int ret;
if (channel->qsdev)
return -EEXIST;
- dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev)
@@ -990,7 +970,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
edge->of_node->name,
node ? node->name : channel->name);
- qsdev->dev.parent = smd->dev;
+ qsdev->dev.parent = &edge->dev;
qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device;
qsdev->dev.of_node = node;
@@ -1001,7 +981,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
ret = device_register(&qsdev->dev);
if (ret) {
- dev_err(smd->dev, "device_register failed: %d\n", ret);
+ dev_err(&edge->dev, "device_register failed: %d\n", ret);
put_device(&qsdev->dev);
}
@@ -1091,6 +1071,8 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
*
* Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
* ready.
+ *
+ * Any channels returned must be closed with a call to qcom_smd_close_channel()
*/
struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
const char *name,
@@ -1120,15 +1102,21 @@ struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
return ERR_PTR(ret);
}
- /*
- * Append the list of channel to the channels associated with the sdev
- */
- list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
-
return channel;
}
EXPORT_SYMBOL(qcom_smd_open_channel);
+/**
+ * qcom_smd_close_channel() - close an additionally opened channel
+ * @channel: channel handle, returned by qcom_smd_open_channel()
+ */
+void qcom_smd_close_channel(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
+}
+EXPORT_SYMBOL(qcom_smd_close_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -1139,20 +1127,18 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
char *name)
{
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
size_t fifo_size;
size_t info_size;
void *fifo_base;
void *info;
int ret;
- channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
- channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
if (!channel->name)
return ERR_PTR(-ENOMEM);
@@ -1175,7 +1161,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
channel->info = info;
} else {
- dev_err(smd->dev,
+ dev_err(&edge->dev,
"channel info of size %zu not supported\n", info_size);
ret = -EINVAL;
goto free_name_and_channel;
@@ -1190,7 +1176,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
- dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
name, info_size, fifo_size);
channel->tx_fifo = fifo_base;
@@ -1202,8 +1188,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
return channel;
free_name_and_channel:
- devm_kfree(smd->dev, channel->name);
- devm_kfree(smd->dev, channel);
+ devm_kfree(&edge->dev, channel->name);
+ devm_kfree(&edge->dev, channel);
return ERR_PTR(ret);
}
@@ -1219,7 +1205,6 @@ static void qcom_channel_scan_worker(struct work_struct *work)
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
unsigned long flags;
unsigned fifo_id;
unsigned info_id;
@@ -1263,7 +1248,7 @@ static void qcom_channel_scan_worker(struct work_struct *work)
list_add(&channel->list, &edge->channels);
spin_unlock_irqrestore(&edge->channels_lock, flags);
- dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
wake_up_interruptible(&edge->new_channel_event);
@@ -1350,22 +1335,6 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->of_node = of_node_get(node);
- irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- dev_err(dev, "required smd interrupt missing\n");
- return -EINVAL;
- }
-
- ret = devm_request_irq(dev, irq,
- qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
- node->name, edge);
- if (ret) {
- dev_err(dev, "failed to request smd irq\n");
- return ret;
- }
-
- edge->irq = irq;
-
key = "qcom,smd-edge";
ret = of_property_read_u32(node, key, &edge->edge_id);
if (ret) {
@@ -1400,18 +1369,121 @@ static int qcom_smd_parse_edge(struct device *dev,
return -EINVAL;
}
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
return 0;
}
-static int qcom_smd_probe(struct platform_device *pdev)
+/*
+ * Release function for an edge.
+ * Reset the state of each associated channel and free the edge context.
+ */
+static void qcom_smd_edge_release(struct device *dev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_RX_CHANNEL_INFO(channel, head, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
+ }
+
+ kfree(edge);
+}
+
+/**
+ * qcom_smd_register_edge() - register an edge based on an device_node
+ * @parent: parent device for the edge
+ * @node: device_node describing the edge
+ *
+ * Returns an edge reference, or negative ERR_PTR() on failure.
+ */
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
{
struct qcom_smd_edge *edge;
- struct device_node *node;
- struct qcom_smd *smd;
- size_t array_size;
- int num_edges;
int ret;
- int i = 0;
+
+ edge = kzalloc(sizeof(*edge), GFP_KERNEL);
+ if (!edge)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&edge->new_channel_event);
+
+ edge->dev.parent = parent;
+ edge->dev.release = qcom_smd_edge_release;
+ dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
+ ret = device_register(&edge->dev);
+ if (ret) {
+ pr_err("failed to register smd edge\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = qcom_smd_parse_edge(&edge->dev, node, edge);
+ if (ret) {
+ dev_err(&edge->dev, "failed to parse smd edge\n");
+ goto unregister_dev;
+ }
+
+ schedule_work(&edge->scan_work);
+
+ return edge;
+
+unregister_dev:
+ put_device(&edge->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(qcom_smd_register_edge);
+
+static int qcom_smd_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+
+ return 0;
+}
+
+/**
+ * qcom_smd_unregister_edge() - release an edge and its children
+ * @edge: edge reference acquired from qcom_smd_register_edge
+ */
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ int ret;
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+
+ ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
+ if (ret)
+ dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
+
+ device_unregister(&edge->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smd_unregister_edge);
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
void *p;
/* Wait for smem */
@@ -1419,29 +1491,17 @@ static int qcom_smd_probe(struct platform_device *pdev)
if (PTR_ERR(p) == -EPROBE_DEFER)
return PTR_ERR(p);
- num_edges = of_get_available_child_count(pdev->dev.of_node);
- array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
- smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
- if (!smd)
- return -ENOMEM;
- smd->dev = &pdev->dev;
-
- smd->num_edges = num_edges;
- for_each_available_child_of_node(pdev->dev.of_node, node) {
- edge = &smd->edges[i++];
- edge->smd = smd;
- init_waitqueue_head(&edge->new_channel_event);
-
- ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
- if (ret)
- continue;
+ for_each_available_child_of_node(pdev->dev.of_node, node)
+ qcom_smd_register_edge(&pdev->dev, node);
- schedule_work(&edge->scan_work);
- }
+ return 0;
+}
- platform_set_drvdata(pdev, smd);
+static int qcom_smd_remove_edge(struct device *dev, void *data)
+{
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
- return 0;
+ return qcom_smd_unregister_edge(edge);
}
/*
@@ -1450,28 +1510,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
*/
static int qcom_smd_remove(struct platform_device *pdev)
{
- struct qcom_smd_channel *channel;
- struct qcom_smd_edge *edge;
- struct qcom_smd *smd = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < smd->num_edges; i++) {
- edge = &smd->edges[i];
-
- disable_irq(edge->irq);
- cancel_work_sync(&edge->scan_work);
- cancel_work_sync(&edge->state_work);
-
- /* No need to lock here, because the writer is gone */
- list_for_each_entry(channel, &edge->channels, list) {
- if (!channel->qsdev)
- continue;
+ int ret;
- qcom_smd_destroy_device(channel);
- }
- }
+ ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
- return 0;
+ return ret;
}
static const struct of_device_id qcom_smd_of_match[] = {
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 2e1aa9f130f4..18ec52f2078a 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -740,7 +740,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
if (hwlock_id < 0) {
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
return hwlock_id;
}
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 44842a205e4b..7acd1517dd37 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -27,6 +27,7 @@ struct rockchip_domain_info {
int req_mask;
int idle_mask;
int ack_mask;
+ bool active_wakeup;
};
struct rockchip_pmu_info {
@@ -75,23 +76,24 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
-#define DOMAIN(pwr, status, req, idle, ack) \
+#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
{ \
.pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
.status_mask = (status >= 0) ? BIT(status) : 0, \
.req_mask = (req >= 0) ? BIT(req) : 0, \
.idle_mask = (idle >= 0) ? BIT(idle) : 0, \
.ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .active_wakeup = wakeup, \
}
-#define DOMAIN_RK3288(pwr, status, req) \
- DOMAIN(pwr, status, req, req, (req) + 16)
+#define DOMAIN_RK3288(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
-#define DOMAIN_RK3368(pwr, status, req) \
- DOMAIN(pwr, status, req, (req) + 16, req)
+#define DOMAIN_RK3368(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
-#define DOMAIN_RK3399(pwr, status, req) \
- DOMAIN(pwr, status, req, req, req)
+#define DOMAIN_RK3399(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, req, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -295,6 +297,17 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
pm_clk_destroy(dev);
}
+static bool rockchip_active_wakeup(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+
+ genpd = pd_to_genpd(dev->pm_domain);
+ pd = container_of(genpd, struct rockchip_pm_domain, genpd);
+
+ return pd->info->active_wakeup;
+}
+
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
@@ -415,6 +428,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&pd->genpd, NULL, false);
@@ -623,48 +637,48 @@ err_out:
}
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
- [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6),
- [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2),
+ [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1),
- [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14),
- [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17),
- [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3),
- [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4),
- [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5),
- [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6),
- [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24),
- [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12),
- [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22),
- [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27),
- [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
};
static const struct rockchip_pmu_info rk3288_pmu = {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 71c834f3847e..7792ed88d80b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -967,8 +967,8 @@ static void tegra_io_rail_unprepare(void)
int tegra_io_rail_power_on(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -977,15 +977,9 @@ int tegra_io_rail_power_on(unsigned int id)
if (err)
goto error;
- mask = 1 << bit;
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_OFF;
- tegra_pmc_writel(value, request);
-
- err = tegra_io_rail_poll(status, mask, 0, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
if (err) {
pr_info("tegra_io_rail_poll() failed: %d\n", err);
goto error;
@@ -1002,8 +996,8 @@ EXPORT_SYMBOL(tegra_io_rail_power_on);
int tegra_io_rail_power_off(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -1014,15 +1008,9 @@ int tegra_io_rail_power_off(unsigned int id)
goto error;
}
- mask = 1 << bit;
-
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_ON;
- tegra_pmc_writel(value, request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
- err = tegra_io_rail_poll(status, mask, mask, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
if (err)
goto error;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 35c0dd945668..a67b0ff6a362 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -70,6 +70,7 @@
#define SPI_SR 0x2c
#define SPI_SR_EOQF 0x10000000
#define SPI_SR_TCFQF 0x80000000
+#define SPI_SR_CLEAR 0xdaad0000
#define SPI_RSER 0x30
#define SPI_RSER_EOQFE 0x10000000
@@ -646,6 +647,11 @@ static const struct regmap_config dspi_regmap_config = {
.max_register = 0x88,
};
+static void dspi_init(struct fsl_dspi *dspi)
+{
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -709,6 +715,7 @@ static int dspi_probe(struct platform_device *pdev)
return PTR_ERR(dspi->regmap);
}
+ dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7451585a080e..2c175b9495f7 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -458,7 +458,7 @@ static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
mspi->len -= rx_nr_bytes;
- if (mspi->rx)
+ if (rx_nr_bytes && mspi->rx)
mspi->get_rx(rx_data, mspi);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8146ccd35a1a..838783c3fed0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1112,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
/* If another context is idling the device then defer */
if (master->idling) {
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
}
@@ -1126,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
/* Only do teardown in the thread */
if (!in_kthread) {
- queue_kthread_work(&master->kworker,
+ kthread_queue_work(&master->kworker,
&master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return;
@@ -1250,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master)
master->running = false;
master->busy = false;
- init_kthread_worker(&master->kworker);
+ kthread_init_worker(&master->kworker);
master->kworker_task = kthread_run(kthread_worker_fn,
&master->kworker, "%s",
dev_name(&master->dev));
@@ -1258,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master)
dev_err(&master->dev, "failed to create message pump task\n");
return PTR_ERR(master->kworker_task);
}
- init_kthread_work(&master->pump_messages, spi_pump_messages);
+ kthread_init_work(&master->pump_messages, spi_pump_messages);
/*
* Master config will indicate if this controller should run the
@@ -1331,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master)
spin_lock_irqsave(&master->queue_lock, flags);
master->cur_msg = NULL;
master->cur_msg_prepared = false;
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1357,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master)
master->cur_msg = NULL;
spin_unlock_irqrestore(&master->queue_lock, flags);
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
return 0;
}
@@ -1404,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master)
ret = spi_stop_queue(master);
/*
- * flush_kthread_worker will block until all work is done.
+ * kthread_flush_worker will block until all work is done.
* If the reason that stop_queue timed out is that the work will never
* finish, then it does no good to call flush/stop thread, so
* return anyway.
@@ -1414,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master)
return ret;
}
- flush_kthread_worker(&master->kworker);
+ kthread_flush_worker(&master->kworker);
kthread_stop(master->kworker_task);
return 0;
@@ -1438,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
list_add_tail(&msg->queue, &master->queue);
if (!master->busy && need_pump)
- queue_kthread_work(&master->kworker, &master->pump_messages);
+ kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
@@ -1618,9 +1618,11 @@ static void of_register_spi_devices(struct spi_master *master)
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
spi = of_register_spi_device(master, nc);
- if (IS_ERR(spi))
+ if (IS_ERR(spi)) {
dev_warn(&master->dev, "Failed to create SPI device for %s\n",
nc->full_name);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
}
}
#else
@@ -3131,6 +3133,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%s'\n",
__func__, rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(spi));
}
break;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 396ded52ab70..209a8f7ef02b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
hdata.type = heap->type;
hdata.heap_id = heap->id;
- ret = copy_to_user(&buffer[cnt],
- &hdata, sizeof(hdata));
+ if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+ ret = -EFAULT;
+ goto out;
+ }
cnt++;
if (cnt >= max_cnt)
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
index 15bac92b7f04..46b2bb99bfd6 100644
--- a/drivers/staging/android/ion/ion_of.c
+++ b/drivers/staging/android/ion/ion_of.c
@@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
heap_pdev = of_platform_device_create(node, heaps[i].name,
&pdev->dev);
- if (!pdev)
+ if (!heap_pdev)
return ERR_PTR(-ENOMEM);
heap_pdev->dev.platform_data = &heaps[i];
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 7043eb0543f6..5ab49a798164 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -207,7 +207,8 @@ static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
* clock period is specified by user with prescaling
* already taken into account.
*/
- return counter->clock_period_ps;
+ *period_ps = counter->clock_period_ps;
+ return 0;
}
switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e36ee984485b..d33d6fe078ad 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state,
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
+ of_node_put(np);
return -ENODEV;
}
@@ -185,6 +186,7 @@ int arche_platform_change_state(enum arche_platform_state state,
exit:
spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
mutex_unlock(&arche_pdata->platform_state_mutex);
+ put_device(&pdev->dev);
of_node_put(np);
return ret;
}
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index 071bb1cfd3ae..baab460eeaa3 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&es2->arpcs);
spin_lock_init(&es2->arpc_lock);
- if (es2_arpc_in_enable(es2))
+ retval = es2_arpc_in_enable(es2);
+ if (retval)
goto error;
retval = gb_hd_add(hd);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 5e06e4229e42..250caa00de5e 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add irq chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add gpio chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 69f67ddbd4a3..660b4674a76f 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
return module;
err_put_interfaces:
- for (--i; i > 0; --i)
+ for (--i; i >= 0; --i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 5ee7954bd9f9..2633d2bfb1b4 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
minor = alloc_minor(gb_tty);
if (minor < 0) {
if (minor == -ENOSPC) {
- dev_err(&connection->bundle->dev,
+ dev_err(&gbphy_dev->dev,
"no more free minor numbers\n");
retval = -ENODEV;
} else {
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index d626125d7af9..564b36d4f648 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
case SCA3000_MEAS_MODE_OP_2:
*base_freq = info->option_mode_2_freq;
break;
+ default:
+ ret = -EINVAL;
}
error_ret:
return ret;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 5eecf1cb1028..3892a7470410 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -655,6 +655,7 @@ static void ad5933_work(struct work_struct *work)
__be16 buf[2];
int val[2];
unsigned char status;
+ int ret;
mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
@@ -662,19 +663,22 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ goto out;
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
- ad5933_i2c_read(st->client,
+ ret = ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
scan_count * 2, (u8 *)buf);
+ if (ret)
+ goto out;
if (scan_count == 2) {
val[0] = be16_to_cpu(buf[0]);
@@ -686,8 +690,7 @@ static void ad5933_work(struct work_struct *work)
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
if (status & AD5933_STAT_SWEEP_DONE) {
@@ -700,7 +703,7 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
+out:
mutex_unlock(&indio_dev->mlock);
}
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 2b5930150cda..13b43278a38d 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -35,6 +35,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
+ depends on BROKEN
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index c7a5d49e487f..9e8802181452 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -2467,7 +2467,7 @@ int kiblnd_dev_failover(struct kib_dev *dev)
hdev->ibh_cmid = cmid;
hdev->ibh_ibdev = cmid->device;
- pd = ib_alloc_pd(cmid->device);
+ pd = ib_alloc_pd(cmid->device, 0);
if (IS_ERR(pd)) {
rc = PTR_ERR(pd);
CERROR("Can't allocate PD: %d\n", rc);
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index fecabe139b1f..9786f6caaade 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -38,8 +38,8 @@
#include <linux/posix_acl_xattr.h>
#define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
-#define LUSTRE_POSIX_ACL_MAX_SIZE \
- (sizeof(posix_acl_xattr_header) + \
- LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
+#define LUSTRE_POSIX_ACL_MAX_SIZE \
+ (sizeof(struct posix_acl_xattr_header) + \
+ LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(struct posix_acl_xattr_entry))
#endif
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 6e3a188baaae..e1d784bae064 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1121,8 +1121,8 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct cl_io *io;
ssize_t result;
- CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zu\n",
- file->f_path.dentry->d_name.name, iot, *ppos, count);
+ CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
+ file, iot, *ppos, count);
restart:
io = vvp_env_thread_io(env);
@@ -1138,45 +1138,31 @@ restart:
range_lock_init(&range, *ppos, *ppos + count - 1);
vio->vui_fd = LUSTRE_FPRIVATE(file);
- vio->vui_io_subtype = args->via_io_subtype;
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
+ /*
+ * Direct IO reads must also take range lock,
+ * or multiple reads will try to work on the same pages
+ * See LU-6227 for details.
+ */
+ if (((iot == CIT_WRITE) ||
+ (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
+ range.rl_node.in_extent.start,
+ range.rl_node.in_extent.end);
+ result = range_lock(&lli->lli_write_tree,
+ &range);
+ if (result < 0)
+ goto out;
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- vio->vui_iter = args->u.normal.via_iter;
- vio->vui_iocb = args->u.normal.via_iocb;
- /*
- * Direct IO reads must also take range lock,
- * or multiple reads will try to work on the same pages
- * See LU-6227 for details.
- */
- if (((iot == CIT_WRITE) ||
- (iot == CIT_READ && (file->f_flags & O_DIRECT))) &&
- !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
- range.rl_node.in_extent.start,
- range.rl_node.in_extent.end);
- result = range_lock(&lli->lli_write_tree,
- &range);
- if (result < 0)
- goto out;
-
- range_locked = true;
- }
- down_read(&lli->lli_trunc_sem);
- break;
- case IO_SPLICE:
- vio->u.splice.vui_pipe = args->u.splice.via_pipe;
- vio->u.splice.vui_flags = args->u.splice.via_flags;
- break;
- default:
- CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
- LBUG();
+ range_locked = true;
}
+ down_read(&lli->lli_trunc_sem);
ll_cl_add(file, env, io);
result = cl_io_loop(env, io);
ll_cl_remove(file, env);
- if (args->via_io_subtype == IO_NORMAL)
- up_read(&lli->lli_trunc_sem);
+ up_read(&lli->lli_trunc_sem);
if (range_locked) {
CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
range.rl_node.in_extent.start,
@@ -1235,7 +1221,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1259,7 +1245,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR(env))
return PTR_ERR(env);
- args = ll_env_args(env, IO_NORMAL);
+ args = ll_env_args(env);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
@@ -1269,31 +1255,6 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return result;
}
-/*
- * Send file content (through pagecache) somewhere with helper
- */
-static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t count,
- unsigned int flags)
-{
- struct lu_env *env;
- struct vvp_io_args *args;
- ssize_t result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- args = ll_env_args(env, IO_SPLICE);
- args->u.splice.via_pipe = pipe;
- args->u.splice.via_flags = flags;
-
- result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
- cl_env_put(env, &refcheck);
- return result;
-}
-
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
__u64 flags, struct lov_user_md *lum,
int lum_size)
@@ -3267,7 +3228,7 @@ struct file_operations ll_file_operations = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush
};
@@ -3280,7 +3241,7 @@ struct file_operations ll_file_operations_flock = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_flock,
@@ -3296,7 +3257,7 @@ struct file_operations ll_file_operations_noflock = {
.release = ll_file_release,
.mmap = ll_file_mmap,
.llseek = ll_file_seek,
- .splice_read = ll_file_splice_read,
+ .splice_read = generic_file_splice_read,
.fsync = ll_fsync,
.flush = ll_flush,
.flock = ll_file_noflock,
@@ -3307,10 +3268,7 @@ const struct inode_operations ll_file_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = generic_removexattr,
.fiemap = ll_fiemap,
.get_acl = ll_get_acl,
};
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3e98bd685061..4bc551279aa4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -908,17 +908,11 @@ void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
*/
struct vvp_io_args {
/** normal/splice */
- enum vvp_io_subtype via_io_subtype;
-
union {
struct {
struct kiocb *via_iocb;
struct iov_iter *via_iter;
} normal;
- struct {
- struct pipe_inode_info *via_pipe;
- unsigned int via_flags;
- } splice;
} u;
};
@@ -946,14 +940,9 @@ static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
return lti;
}
-static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
{
- struct vvp_io_args *via = &ll_env_info(env)->lti_args;
-
- via->via_io_subtype = type;
-
- return via;
+ return &ll_env_info(env)->lti_args;
}
void ll_queue_done_writing(struct inode *inode, unsigned long flags);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6bb41b09172e..e5c62f4ce3d8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1459,7 +1459,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
- /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
+ /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
!capable(CFS_CAP_FOWNER))
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6eae60595905..23fda9d98bff 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
-static ssize_t unstable_stats_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
+static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
long pages;
int mb;
@@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_check: %8d\n"
- "unstable_pages: %12ld\n"
- "unstable_mb: %8d\n",
- cache->ccc_unstable_check, pages, mb);
+ seq_printf(m,
+ "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
+ cache->ccc_unstable_check, pages, mb);
+
+ return 0;
}
-static ssize_t unstable_stats_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
+static ssize_t ll_unstable_stats_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
char kernbuf[128];
int val, rc;
@@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj,
return count;
}
-LUSTRE_RW_ATTR(unstable_stats);
+LPROC_SEQ_FOPS(ll_unstable_stats);
static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_cached_mb", &ll_max_cached_mb_fops, NULL },
{ "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
+ { "unstable_stats", &ll_unstable_stats_fops, NULL },
{ "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
{ .name = "nosquash_nids",
.fops = &ll_nosquash_nids_fops },
@@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
- &lustre_attr_unstable_stats.attr,
&lustre_attr_root_squash.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index dfa36d34c645..180f35e3afd9 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -1097,13 +1097,17 @@ out:
}
static int ll_rename(struct inode *src, struct dentry *src_dchild,
- struct inode *tgt, struct dentry *tgt_dchild)
+ struct inode *tgt, struct dentry *tgt_dchild,
+ unsigned int flags)
{
struct ptlrpc_request *request = NULL;
struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
+ if (flags)
+ return -EINVAL;
+
CDEBUG(D_VFSTRACE,
"VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
src_dchild, PFID(ll_inode2fid(src)), src,
@@ -1148,14 +1152,11 @@ const struct inode_operations ll_dir_inode_operations = {
.rmdir = ll_rmdir,
.symlink = ll_symlink,
.link = ll_link,
- .rename = ll_rename,
+ .rename = ll_rename,
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = generic_removexattr,
.get_acl = ll_get_acl,
};
@@ -1163,9 +1164,6 @@ const struct inode_operations ll_special_inode_operations = {
.setattr = ll_setattr,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = generic_removexattr,
.get_acl = ll_get_acl,
};
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index f8bc7ed59646..82c7c48aa619 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -154,8 +154,5 @@ const struct inode_operations ll_fast_symlink_inode_operations = {
.get_link = ll_get_link,
.getattr = ll_getattr,
.permission = ll_inode_permission,
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
.listxattr = ll_listxattr,
- .removexattr = generic_removexattr,
};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 5802da81cd0e..4464ad258387 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -49,14 +49,6 @@ struct obd_device;
struct obd_export;
struct page;
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
/**
* IO state private to IO state private to VVP layer.
*/
@@ -99,10 +91,6 @@ struct vvp_io {
bool ft_flags_valid;
} fault;
struct {
- struct pipe_inode_info *vui_pipe;
- unsigned int vui_flags;
- } splice;
- struct {
struct cl_page_list vui_queue;
unsigned long vui_written;
int vui_from;
@@ -110,8 +98,6 @@ struct vvp_io {
} write;
} u;
- enum vvp_io_subtype vui_io_subtype;
-
/**
* Layout version when this IO is initialized
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 2ab450359b6d..2b7f182a15e2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -54,18 +54,6 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
}
/**
- * True, if \a io is a normal io, False for splice_{read,write}
- */
-static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
-{
- struct vvp_io *vio = vvp_env_io(env);
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- return vio->vui_io_subtype == IO_NORMAL;
-}
-
-/**
* For swapping layout. The file's layout may have changed.
* To avoid populating pages to a wrong stripe, we have to verify the
* correctness of layout. It works because swapping layout processes
@@ -390,9 +378,6 @@ static int vvp_mmap_locks(const struct lu_env *env,
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- if (!cl_is_normalio(env, io))
- return 0;
-
if (!vio->vui_iter) /* nfs or loop back device write */
return 0;
@@ -461,15 +446,10 @@ static void vvp_io_advance(const struct lu_env *env,
const struct cl_io_slice *ios,
size_t nob)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct cl_io *io = ios->cis_io;
struct cl_object *obj = ios->cis_io->ci_obj;
-
+ struct vvp_io *vio = cl2vvp_io(env, ios);
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- if (!cl_is_normalio(env, io))
- return;
-
iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
}
@@ -478,7 +458,7 @@ static void vvp_io_update_iov(const struct lu_env *env,
{
size_t size = io->u.ci_rw.crw_count;
- if (!cl_is_normalio(env, io) || !vio->vui_iter)
+ if (!vio->vui_iter)
return;
iov_iter_truncate(vio->vui_iter, size);
@@ -715,25 +695,8 @@ static int vvp_io_read_start(const struct lu_env *env,
/* BUG: 5972 */
file_accessed(file);
- switch (vio->vui_io_subtype) {
- case IO_NORMAL:
- LASSERT(vio->vui_iocb->ki_pos == pos);
- result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
- break;
- case IO_SPLICE:
- result = generic_file_splice_read(file, &pos,
- vio->u.splice.vui_pipe, cnt,
- vio->u.splice.vui_flags);
- /* LU-1109: do splice read stripe by stripe otherwise if it
- * may make nfsd stuck if this read occupied all internal pipe
- * buffers.
- */
- io->ci_continue = 0;
- break;
- default:
- CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
- LBUG();
- }
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
out:
if (result >= 0) {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 5d79efc1aafe..046e84d7a158 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -241,10 +241,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
obj->vob_discard_page_warned = 0;
} else {
SetPageError(vmpage);
- if (ioret == -ENOSPC)
- set_bit(AS_ENOSPC, &inode->i_mapping->flags);
- else
- set_bit(AS_EIO, &inode->i_mapping->flags);
+ mapping_set_error(inode->i_mapping, ioret);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
obj->vob_discard_page_warned == 0) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 5d3995d5c69a..a7416cd9ac71 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -2220,7 +2220,7 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
task_lock(current);
if (pud->pud_ngroups > current_ngroups)
pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
+ memcpy(pud->pud_groups, current_cred()->group_info->gid,
pud->pud_ngroups * sizeof(__u32));
task_unlock(current);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index e5945e2ccc49..b05b1f935e4c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -3742,32 +3742,28 @@ void lustre_assert_wire_constants(void)
CLASSERT(FIEMAP_EXTENT_NET == 0x80000000);
/* Checks for type posix_acl_xattr_entry */
- LASSERTF((int)sizeof(posix_acl_xattr_entry) == 8, "found %lld\n",
- (long long)(int)sizeof(posix_acl_xattr_entry));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_tag));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_tag));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_perm));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_perm));
- LASSERTF((int)offsetof(posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_entry, e_id));
- LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_id));
+ LASSERTF((int)sizeof(struct posix_acl_xattr_entry) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct posix_acl_xattr_entry));
+ LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct posix_acl_xattr_entry, e_tag));
+ LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_tag));
+ LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
+ (long long)(int)offsetof(struct posix_acl_xattr_entry, e_perm));
+ LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_perm));
+ LASSERTF((int)offsetof(struct posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct posix_acl_xattr_entry, e_id));
+ LASSERTF((int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct posix_acl_xattr_entry *)0)->e_id));
/* Checks for type posix_acl_xattr_header */
- LASSERTF((int)sizeof(posix_acl_xattr_header) == 4, "found %lld\n",
- (long long)(int)sizeof(posix_acl_xattr_header));
- LASSERTF((int)offsetof(posix_acl_xattr_header, a_version) == 0, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_header, a_version));
- LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_version));
- LASSERTF((int)offsetof(posix_acl_xattr_header, a_entries) == 4, "found %lld\n",
- (long long)(int)offsetof(posix_acl_xattr_header, a_entries));
- LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_entries) == 0, "found %lld\n",
- (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_entries));
+ LASSERTF((int)sizeof(struct posix_acl_xattr_header) == 4, "found %lld\n",
+ (long long)(int)sizeof(struct posix_acl_xattr_header));
+ LASSERTF((int)offsetof(struct posix_acl_xattr_header, a_version) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct posix_acl_xattr_header, a_version));
+ LASSERTF((int)sizeof(((struct posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct posix_acl_xattr_header *)0)->a_version));
/* Checks for struct link_ea_header */
LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 7292f23954df..6620d96ee44d 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -31,11 +31,11 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/pulse8-cec/Kconfig"
-source "drivers/staging/media/tw686x-kh/Kconfig"
-
source "drivers/staging/media/s5p-cec/Kconfig"
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
+source "drivers/staging/media/st-cec/Kconfig"
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 87ce8ad1e22a..906257e94dda 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,4 +6,4 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
-obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh/
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index ea15cc638097..4d9bd02ede47 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -482,6 +482,8 @@ static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on)
flags);
memset(&bdev->rds_info, 0, sizeof(bdev->rds_info));
}
+ if (err)
+ return err;
return bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM,
bdev->cache_fm_rds_system);
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
index 21457a1f6c9f..6e12d41b1f86 100644
--- a/drivers/staging/media/cec/Kconfig
+++ b/drivers/staging/media/cec/Kconfig
@@ -5,9 +5,6 @@ config MEDIA_CEC
---help---
Enable the CEC API.
- To compile this driver as a module, choose M here: the
- module will be called cec.
-
config MEDIA_CEC_DEBUG
bool "CEC debugfs interface (EXPERIMENTAL)"
depends on MEDIA_CEC && DEBUG_FS
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 946986f3ac0d..611e07b78bfe 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1164,8 +1164,6 @@ void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (IS_ERR_OR_NULL(adap))
return;
- if (WARN_ON(adap->capabilities & CEC_CAP_PHYS_ADDR))
- return;
mutex_lock(&adap->lock);
__cec_s_phys_addr(adap, phys_addr, block);
mutex_unlock(&adap->lock);
@@ -1306,8 +1304,6 @@ int cec_s_log_addrs(struct cec_adapter *adap,
{
int err;
- if (WARN_ON(adap->capabilities & CEC_CAP_LOG_ADDRS))
- return -EINVAL;
mutex_lock(&adap->lock);
err = __cec_s_log_addrs(adap, log_addrs, block);
mutex_unlock(&adap->lock);
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
index 3b1e4d2b190d..b0137e247dc9 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/staging/media/cec/cec-core.c
@@ -357,8 +357,7 @@ void cec_delete_adapter(struct cec_adapter *adap)
if (adap->kthread_config)
kthread_stop(adap->kthread_config);
#if IS_REACHABLE(CONFIG_RC_CORE)
- if (adap->rc)
- rc_free_device(adap->rc);
+ rc_free_device(adap->rc);
#endif
kfree(adap);
}
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 64d99ec292e5..bfb76a45bfbf 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -650,7 +650,7 @@ static int __init lirc_parallel_init(void)
if (!pport) {
pr_notice("no port at %x found\n", io);
result = -ENXIO;
- goto exit_device_put;
+ goto exit_device_del;
}
ppdevice = parport_register_device(pport, LIRC_DRIVER_NAME,
pf, kf, lirc_lirc_irq_handler, 0,
@@ -659,7 +659,7 @@ static int __init lirc_parallel_init(void)
if (!ppdevice) {
pr_notice("parport_register_device() failed\n");
result = -ENXIO;
- goto exit_device_put;
+ goto exit_device_del;
}
if (parport_claim(ppdevice) != 0)
goto skip_init;
@@ -678,7 +678,7 @@ static int __init lirc_parallel_init(void)
parport_release(pport);
parport_unregister_device(ppdevice);
result = -EIO;
- goto exit_device_put;
+ goto exit_device_del;
}
#endif
@@ -695,11 +695,13 @@ static int __init lirc_parallel_init(void)
pr_notice("register_chrdev() failed\n");
parport_unregister_device(ppdevice);
result = -EIO;
- goto exit_device_put;
+ goto exit_device_del;
}
pr_info("installed using port 0x%04x irq %d\n", io, irq);
return 0;
+exit_device_del:
+ platform_device_del(lirc_parallel_dev);
exit_device_put:
platform_device_put(lirc_parallel_dev);
exit_driver_unregister:
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 6ceb4eb00493..c26c99fd4a24 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -61,7 +61,7 @@ static void iss_print_status(struct iss_device *iss)
* See this link for reference:
* http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
*/
-void omap4iss_flush(struct iss_device *iss)
+static void omap4iss_flush(struct iss_device *iss)
{
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
@@ -362,6 +362,10 @@ static irqreturn_t iss_isr(int irq, void *_iss)
return IRQ_HANDLED;
}
+static const struct media_device_ops iss_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
@@ -988,7 +992,7 @@ static int iss_register_entities(struct iss_device *iss)
strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
sizeof(iss->media_dev.model));
iss->media_dev.hw_revision = iss->revision;
- iss->media_dev.link_notify = v4l2_pipeline_link_notify;
+ iss->media_dev.ops = &iss_media_ops;
ret = media_device_register(&iss->media_dev);
if (ret < 0) {
dev_err(iss->dev, "Media device registration failed (%d)\n",
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 90b7ff56722d..c16927ac8eb0 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -646,6 +646,103 @@ iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
}
static int
+iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev_format format;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ u32 pad;
+ int ret;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ /* Try the get selection operation first and fallback to get format if not
+ * implemented.
+ */
+ sdsel.pad = pad;
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ sel->r = sdsel.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ format.pad = pad;
+ format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format.format.width;
+ sel->r.height = format.format.height;
+
+ return 0;
+}
+
+static int
+iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct iss_video *video = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+ u32 pad;
+ int ret;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ subdev = iss_video_remote_subdev(video, &pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ sdsel.pad = pad;
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
+ mutex_unlock(&video->mutex);
+ if (!ret)
+ sel->r = sdsel.r;
+
+ return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
+}
+
+static int
iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
@@ -971,6 +1068,8 @@ static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
.vidioc_g_fmt_vid_out = iss_video_get_format,
.vidioc_s_fmt_vid_out = iss_video_set_format,
.vidioc_try_fmt_vid_out = iss_video_try_format,
+ .vidioc_g_selection = iss_video_get_selection,
+ .vidioc_s_selection = iss_video_set_selection,
.vidioc_g_parm = iss_video_get_param,
.vidioc_s_parm = iss_video_set_param,
.vidioc_reqbufs = iss_video_reqbufs,
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index ed8bd95ad6d0..1732c3857b8e 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -10,6 +10,29 @@
* this archive for more details.
*/
+/*
+ * Notes:
+ *
+ * - Devices with firmware version < 2 do not store their configuration in
+ * EEPROM.
+ *
+ * - In autonomous mode, only messages from a TV will be acknowledged, even
+ * polling messages. Upon receiving a message from a TV, the dongle will
+ * respond to messages from any logical address.
+ *
+ * - In autonomous mode, the dongle will by default reply Feature Abort
+ * [Unrecognized Opcode] when it receives Give Device Vendor ID. It will
+ * however observe vendor ID's reported by other devices and possibly
+ * alter this behavior. When TV's (and TV's only) report that their vendor ID
+ * is LG (0x00e091), the dongle will itself reply that it has the same vendor
+ * ID, and it will respond to at least one vendor specific command.
+ *
+ * - In autonomous mode, the dongle is known to attempt wakeup if it receives
+ * <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it
+ * receives <Set Stream Path> with its own physical address. It also does this
+ * if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV.
+ */
+
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -28,8 +51,11 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
static int debug;
+static int persistent_config = 1;
module_param(debug, int, 0644);
+module_param(persistent_config, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-1)");
+MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
enum pulse8_msgcodes {
MSGCODE_NOTHING = 0,
@@ -86,12 +112,16 @@ enum pulse8_msgcodes {
#define DATA_SIZE 256
+#define PING_PERIOD (15 * HZ)
+
struct pulse8 {
struct device *dev;
struct serio *serio;
struct cec_adapter *adap;
+ unsigned int vers;
struct completion cmd_done;
struct work_struct work;
+ struct delayed_work ping_eeprom_work;
struct cec_msg rx_msg;
u8 data[DATA_SIZE];
unsigned int len;
@@ -99,8 +129,15 @@ struct pulse8 {
unsigned int idx;
bool escape;
bool started;
+ struct mutex config_lock;
+ struct mutex write_lock;
+ bool config_pending;
+ bool restoring_config;
+ bool autonomous;
};
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
+
static void pulse8_irq_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
@@ -205,6 +242,7 @@ static void pulse8_disconnect(struct serio *serio)
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
+ cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
dev_info(&serio->dev, "disconnected\n");
serio_close(serio);
serio_set_drvdata(serio, NULL);
@@ -228,13 +266,14 @@ static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
}
}
if (!err)
- err = serio_write(serio, 0xfe);
+ err = serio_write(serio, MSGEND);
return err;
}
-static int pulse8_send_and_wait(struct pulse8 *pulse8,
- const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
+ const u8 *cmd, u8 cmd_len,
+ u8 response, u8 size)
{
int err;
@@ -250,24 +289,8 @@ static int pulse8_send_and_wait(struct pulse8 *pulse8,
if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
cmd[0] != MSGCODE_SET_CONTROLLED &&
cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
- cmd[0] != MSGCODE_GET_BUILDDATE) {
- u8 cmd_sc[2];
-
- cmd_sc[0] = MSGCODE_SET_CONTROLLED;
- cmd_sc[1] = 1;
- err = pulse8_send_and_wait(pulse8, cmd_sc, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- if (err)
- return err;
- init_completion(&pulse8->cmd_done);
-
- err = pulse8_send(pulse8->serio, cmd, cmd_len);
- if (err)
- return err;
-
- if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
- return -ETIMEDOUT;
- }
+ cmd[0] != MSGCODE_GET_BUILDDATE)
+ return -ENOTTY;
if (response &&
((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
dev_info(pulse8->dev, "transmit: failed %02x\n",
@@ -277,74 +300,155 @@ static int pulse8_send_and_wait(struct pulse8 *pulse8,
return 0;
}
-static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio)
+static int pulse8_send_and_wait(struct pulse8 *pulse8,
+ const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+{
+ u8 cmd_sc[2];
+ int err;
+
+ mutex_lock(&pulse8->write_lock);
+ err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+
+ if (err == -ENOTTY) {
+ cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+ cmd_sc[1] = 1;
+ err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (err)
+ goto unlock;
+ err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
+ response, size);
+ }
+
+unlock:
+ mutex_unlock(&pulse8->write_lock);
+ return err == -ENOTTY ? -EIO : err;
+}
+
+static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+ struct cec_log_addrs *log_addrs, u16 *pa)
{
u8 *data = pulse8->data + 1;
- unsigned int count = 0;
- unsigned int vers = 0;
u8 cmd[2];
int err;
+ struct tm tm;
+ time_t date;
+
+ pulse8->vers = 0;
- cmd[0] = MSGCODE_PING;
- err = pulse8_send_and_wait(pulse8, cmd, 1,
- MSGCODE_COMMAND_ACCEPTED, 0);
cmd[0] = MSGCODE_FIRMWARE_VERSION;
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
if (err)
return err;
-
- vers = (data[0] << 8) | data[1];
-
- dev_info(pulse8->dev, "Firmware version %04x\n", vers);
- if (vers < 2)
+ pulse8->vers = (data[0] << 8) | data[1];
+ dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers);
+ if (pulse8->vers < 2) {
+ *pa = CEC_PHYS_ADDR_INVALID;
return 0;
+ }
cmd[0] = MSGCODE_GET_BUILDDATE;
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
- if (!err) {
- time_t date = (data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | data[3];
- struct tm tm;
-
- time_to_tm(date, 0, &tm);
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
+ if (err)
+ return err;
+ date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
+ time_to_tm(date, 0, &tm);
+ dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ dev_dbg(pulse8->dev, "Persistent config:\n");
+ cmd[0] = MSGCODE_GET_AUTO_ENABLED;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ pulse8->autonomous = data[0];
+ dev_dbg(pulse8->dev, "Autonomous mode: %s",
+ data[0] ? "on" : "off");
- dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
+ cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ log_addrs->primary_device_type[0] = data[0];
+ dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]);
+ switch (log_addrs->primary_device_type[0]) {
+ case CEC_OP_PRIM_DEVTYPE_TV:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_RECORD:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_TUNER:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_SWITCH:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
+ break;
+ default:
+ log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+ dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
+ log_addrs->primary_device_type[0]);
+ break;
}
- do {
- if (count)
- msleep(500);
- cmd[0] = MSGCODE_SET_AUTO_ENABLED;
- cmd[1] = 0;
- err = pulse8_send_and_wait(pulse8, cmd, 2,
- MSGCODE_COMMAND_ACCEPTED, 1);
- if (err && count == 0) {
- dev_info(pulse8->dev, "No Auto Enabled supported\n");
- return 0;
- }
+ cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+ if (err)
+ return err;
+ log_addrs->log_addr_mask = (data[0] << 8) | data[1];
+ dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n",
+ log_addrs->log_addr_mask);
+ if (log_addrs->log_addr_mask)
+ log_addrs->num_log_addrs = 1;
+
+ cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ *pa = (data[0] << 8) | data[1];
+ dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+ cec_phys_addr_exp(*pa));
- cmd[0] = MSGCODE_GET_AUTO_ENABLED;
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
- if (!err && !data[0]) {
- cmd[0] = MSGCODE_WRITE_EEPROM;
- err = pulse8_send_and_wait(pulse8, cmd, 1,
- MSGCODE_COMMAND_ACCEPTED, 1);
- cmd[0] = MSGCODE_GET_AUTO_ENABLED;
- if (!err)
- err = pulse8_send_and_wait(pulse8, cmd, 1,
- cmd[0], 1);
- }
- } while (!err && data[0] && count++ < 5);
+ cmd[0] = MSGCODE_GET_HDMI_VERSION;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (err)
+ return err;
+ log_addrs->cec_version = data[0];
+ dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
- if (!err && data[0])
- err = -EIO;
+ cmd[0] = MSGCODE_GET_OSD_NAME;
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
+ if (err)
+ return err;
+ strncpy(log_addrs->osd_name, data, 13);
+ dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
- return err;
+ return 0;
+}
+
+static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
+ struct cec_log_addrs *log_addrs,
+ u16 pa)
+{
+ int err;
+
+ err = cec_s_log_addrs(pulse8->adap, log_addrs, false);
+ if (err)
+ return err;
+
+ cec_s_phys_addr(pulse8->adap, pa, false);
+
+ return 0;
}
static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -364,9 +468,11 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct pulse8 *pulse8 = adap->priv;
u16 mask = 0;
- u8 cmd[3];
- int err;
+ u16 pa = adap->phys_addr;
+ u8 cmd[16];
+ int err = 0;
+ mutex_lock(&pulse8->config_lock);
if (log_addr != CEC_LOG_ADDR_INVALID)
mask = 1 << log_addr;
cmd[0] = MSGCODE_SET_ACK_MASK;
@@ -374,8 +480,106 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
cmd[2] = mask & 0xff;
err = pulse8_send_and_wait(pulse8, cmd, 3,
MSGCODE_COMMAND_ACCEPTED, 0);
- if (mask == 0)
- return 0;
+ if ((err && mask != 0) || pulse8->restoring_config)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+ cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+ pulse8->autonomous = cmd[1];
+ if (log_addr == CEC_LOG_ADDR_INVALID)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_DEVICE_TYPE;
+ cmd[1] = adap->log_addrs.primary_device_type[0];
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ switch (adap->log_addrs.primary_device_type[0]) {
+ case CEC_OP_PRIM_DEVTYPE_TV:
+ mask = CEC_LOG_ADDR_MASK_TV;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_RECORD:
+ mask = CEC_LOG_ADDR_MASK_RECORD;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_TUNER:
+ mask = CEC_LOG_ADDR_MASK_TUNER;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+ mask = CEC_LOG_ADDR_MASK_PLAYBACK;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+ mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_SWITCH:
+ mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
+ break;
+ case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+ mask = CEC_LOG_ADDR_MASK_SPECIFIC;
+ break;
+ default:
+ mask = 0;
+ break;
+ }
+ cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
+ cmd[1] = mask >> 8;
+ cmd[2] = mask & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
+ cmd[1] = log_addr;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
+ cmd[1] = pa >> 8;
+ cmd[2] = pa & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ cmd[0] = MSGCODE_SET_HDMI_VERSION;
+ cmd[1] = adap->log_addrs.cec_version;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+
+ if (adap->log_addrs.osd_name[0]) {
+ size_t osd_len = strlen(adap->log_addrs.osd_name);
+ char *osd_str = cmd + 1;
+
+ cmd[0] = MSGCODE_SET_OSD_NAME;
+ strncpy(cmd + 1, adap->log_addrs.osd_name, 13);
+ if (osd_len < 4) {
+ memset(osd_str + osd_len, ' ', 4 - osd_len);
+ osd_len = 4;
+ osd_str[osd_len] = '\0';
+ strcpy(adap->log_addrs.osd_name, osd_str);
+ }
+ err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ if (pulse8->restoring_config)
+ pulse8->restoring_config = false;
+ else
+ pulse8->config_pending = true;
+ mutex_unlock(&pulse8->config_lock);
return err;
}
@@ -437,6 +641,8 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
struct pulse8 *pulse8;
int err = -ENOMEM;
+ struct cec_log_addrs log_addrs = {};
+ u16 pa = CEC_PHYS_ADDR_INVALID;
pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
@@ -453,12 +659,15 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
+ mutex_init(&pulse8->write_lock);
+ mutex_init(&pulse8->config_lock);
+ pulse8->config_pending = false;
err = serio_open(serio, drv);
if (err)
goto delete_adap;
- err = pulse8_setup(pulse8, serio);
+ err = pulse8_setup(pulse8, serio, &log_addrs, &pa);
if (err)
goto close_serio;
@@ -467,6 +676,18 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
goto close_serio;
pulse8->dev = &pulse8->adap->devnode.dev;
+
+ if (persistent_config && pulse8->autonomous) {
+ err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa);
+ if (err)
+ goto close_serio;
+ pulse8->restoring_config = true;
+ }
+
+ INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
+ pulse8_ping_eeprom_work_handler);
+ schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+
return 0;
close_serio:
@@ -479,6 +700,33 @@ free_device:
return err;
}
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
+{
+ struct pulse8 *pulse8 =
+ container_of(work, struct pulse8, ping_eeprom_work.work);
+ u8 cmd;
+
+ schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+ cmd = MSGCODE_PING;
+ pulse8_send_and_wait(pulse8, &cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+
+ if (pulse8->vers < 2)
+ return;
+
+ mutex_lock(&pulse8->config_lock);
+ if (pulse8->config_pending && persistent_config) {
+ dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
+ cmd = MSGCODE_WRITE_EEPROM;
+ if (pulse8_send_and_wait(pulse8, &cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0))
+ dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
+ else
+ pulse8->config_pending = false;
+ }
+ mutex_unlock(&pulse8->config_lock);
+}
+
static struct serio_device_id pulse8_serio_ids[] = {
{
.type = SERIO_RS232,
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 78333273c4e5..1780a08b73c9 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -173,7 +173,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
int ret;
cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
- if (!dev)
+ if (!cec)
return -ENOMEM;
cec->dev = dev;
@@ -250,22 +250,9 @@ static int s5p_cec_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused s5p_cec_suspend(struct device *dev)
-{
- if (pm_runtime_suspended(dev))
- return 0;
- return s5p_cec_runtime_suspend(dev);
-}
-
-static int __maybe_unused s5p_cec_resume(struct device *dev)
-{
- if (pm_runtime_suspended(dev))
- return 0;
- return s5p_cec_runtime_resume(dev);
-}
-
static const struct dev_pm_ops s5p_cec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(s5p_cec_suspend, s5p_cec_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
NULL)
};
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
new file mode 100644
index 000000000000..784d2c600aca
--- /dev/null
+++ b/drivers/staging/media/st-cec/Kconfig
@@ -0,0 +1,8 @@
+config VIDEO_STI_HDMI_CEC
+ tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+ depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST)
+ ---help---
+ This is a driver for STIH4xx HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
diff --git a/drivers/staging/media/st-cec/Makefile b/drivers/staging/media/st-cec/Makefile
new file mode 100644
index 000000000000..f07905e1448a
--- /dev/null
+++ b/drivers/staging/media/st-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += stih-cec.o
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
new file mode 100644
index 000000000000..214344866a6b
--- /dev/null
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -0,0 +1,380 @@
+/*
+ * drivers/staging/media/st-cec/stih-cec.c
+ *
+ * STIH4xx CEC driver
+ * Copyright (C) STMicroelectronic SA 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include <media/cec.h>
+
+#define CEC_NAME "stih-cec"
+
+/* CEC registers */
+#define CEC_CLK_DIV 0x0
+#define CEC_CTRL 0x4
+#define CEC_IRQ_CTRL 0x8
+#define CEC_STATUS 0xC
+#define CEC_EXT_STATUS 0x10
+#define CEC_TX_CTRL 0x14
+#define CEC_FREE_TIME_THRESH 0x18
+#define CEC_BIT_TOUT_THRESH 0x1C
+#define CEC_BIT_PULSE_THRESH 0x20
+#define CEC_DATA 0x24
+#define CEC_TX_ARRAY_CTRL 0x28
+#define CEC_CTRL2 0x2C
+#define CEC_TX_ERROR_STS 0x30
+#define CEC_ADDR_TABLE 0x34
+#define CEC_DATA_ARRAY_CTRL 0x38
+#define CEC_DATA_ARRAY_STATUS 0x3C
+#define CEC_TX_DATA_BASE 0x40
+#define CEC_TX_DATA_TOP 0x50
+#define CEC_TX_DATA_SIZE 0x1
+#define CEC_RX_DATA_BASE 0x54
+#define CEC_RX_DATA_TOP 0x64
+#define CEC_RX_DATA_SIZE 0x1
+
+/* CEC_CTRL2 */
+#define CEC_LINE_INACTIVE_EN BIT(0)
+#define CEC_AUTO_BUS_ERR_EN BIT(1)
+#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
+#define CEC_TX_REQ_WAIT_EN BIT(3)
+
+/* CEC_DATA_ARRAY_CTRL */
+#define CEC_TX_ARRAY_EN BIT(0)
+#define CEC_RX_ARRAY_EN BIT(1)
+#define CEC_TX_ARRAY_RESET BIT(2)
+#define CEC_RX_ARRAY_RESET BIT(3)
+#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
+#define CEC_TX_STOP_ON_NACK BIT(7)
+
+/* CEC_TX_ARRAY_CTRL */
+#define CEC_TX_N_OF_BYTES 0x1F
+#define CEC_TX_START BIT(5)
+#define CEC_TX_AUTO_SOM_EN BIT(6)
+#define CEC_TX_AUTO_EOM_EN BIT(7)
+
+/* CEC_IRQ_CTRL */
+#define CEC_TX_DONE_IRQ_EN BIT(0)
+#define CEC_ERROR_IRQ_EN BIT(2)
+#define CEC_RX_DONE_IRQ_EN BIT(3)
+#define CEC_RX_SOM_IRQ_EN BIT(4)
+#define CEC_RX_EOM_IRQ_EN BIT(5)
+#define CEC_FREE_TIME_IRQ_EN BIT(6)
+#define CEC_PIN_STS_IRQ_EN BIT(7)
+
+/* CEC_CTRL */
+#define CEC_IN_FILTER_EN BIT(0)
+#define CEC_PWR_SAVE_EN BIT(1)
+#define CEC_EN BIT(4)
+#define CEC_ACK_CTRL BIT(5)
+#define CEC_RX_RESET_EN BIT(6)
+#define CEC_IGNORE_RX_ERROR BIT(7)
+
+/* CEC_STATUS */
+#define CEC_TX_DONE_STS BIT(0)
+#define CEC_TX_ACK_GET_STS BIT(1)
+#define CEC_ERROR_STS BIT(2)
+#define CEC_RX_DONE_STS BIT(3)
+#define CEC_RX_SOM_STS BIT(4)
+#define CEC_RX_EOM_STS BIT(5)
+#define CEC_FREE_TIME_IRQ_STS BIT(6)
+#define CEC_PIN_STS BIT(7)
+#define CEC_SBIT_TOUT_STS BIT(8)
+#define CEC_DBIT_TOUT_STS BIT(9)
+#define CEC_LPULSE_ERROR_STS BIT(10)
+#define CEC_HPULSE_ERROR_STS BIT(11)
+#define CEC_TX_ERROR BIT(12)
+#define CEC_TX_ARB_ERROR BIT(13)
+#define CEC_RX_ERROR_MIN BIT(14)
+#define CEC_RX_ERROR_MAX BIT(15)
+
+/* Signal free time in bit periods (2.4ms) */
+#define CEC_PRESENT_INIT_SFT 7
+#define CEC_NEW_INIT_SFT 5
+#define CEC_RETRANSMIT_SFT 3
+
+/* Constants for CEC_BIT_TOUT_THRESH register */
+#define CEC_SBIT_TOUT_47MS BIT(1)
+#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1)
+#define CEC_SBIT_TOUT_50MS BIT(2)
+#define CEC_DBIT_TOUT_27MS BIT(0)
+#define CEC_DBIT_TOUT_28MS BIT(1)
+#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1)
+
+/* Constants for CEC_BIT_PULSE_THRESH register */
+#define CEC_BIT_LPULSE_03MS BIT(1)
+#define CEC_BIT_HPULSE_03MS BIT(3)
+
+/* Constants for CEC_DATA_ARRAY_STATUS register */
+#define CEC_RX_N_OF_BYTES 0x1F
+#define CEC_TX_N_OF_BYTES_SENT BIT(5)
+#define CEC_RX_OVERRUN BIT(6)
+
+struct stih_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+};
+
+static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct stih_cec *cec = adap->priv;
+
+ if (enable) {
+ /* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
+ unsigned long clk_freq = clk_get_rate(cec->clk);
+ u32 cec_clk_div = clk_freq / 10000;
+
+ writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
+
+ /* Configuration of the durations activating a timeout */
+ writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
+ cec->regs + CEC_BIT_TOUT_THRESH);
+
+ /* Configuration of the smallest allowed duration for pulses */
+ writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
+ cec->regs + CEC_BIT_PULSE_THRESH);
+
+ /* Minimum received bit period threshold */
+ writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
+
+ /* Configuration of transceiver data arrays */
+ writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
+ cec->regs + CEC_DATA_ARRAY_CTRL);
+
+ /* Configuration of the control bits for CEC Transceiver */
+ writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
+ cec->regs + CEC_CTRL);
+
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Enable the interrupts */
+ writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
+ CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
+ CEC_ERROR_IRQ_EN,
+ cec->regs + CEC_IRQ_CTRL);
+
+ } else {
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Disable the interrupts */
+ writel(0, cec->regs + CEC_IRQ_CTRL);
+ }
+
+ return 0;
+}
+
+static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct stih_cec *cec = adap->priv;
+ u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
+
+ reg |= 1 << logical_addr;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ reg = 0;
+
+ writel(reg, cec->regs + CEC_ADDR_TABLE);
+
+ return 0;
+}
+
+static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct stih_cec *cec = adap->priv;
+ int i;
+
+ /* Copy message into registers */
+ for (i = 0; i < msg->len; i++)
+ writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
+
+ /* Start transmission, configure hardware to add start and stop bits
+ * Signal free time is handled by the hardware
+ */
+ writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
+ msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
+
+ return 0;
+}
+
+static void stih_tx_done(struct stih_cec *cec, u32 status)
+{
+ if (status & CEC_TX_ERROR) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, 0, 0, 0, 1);
+ return;
+ }
+
+ if (status & CEC_TX_ARB_ERROR) {
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+ return;
+ }
+
+ if (!(status & CEC_TX_ACK_GET_STS)) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, 0, 1, 0, 0);
+ return;
+ }
+
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+}
+
+static void stih_rx_done(struct stih_cec *cec, u32 status)
+{
+ struct cec_msg msg = {};
+ u8 i;
+
+ if (status & CEC_RX_ERROR_MIN)
+ return;
+
+ if (status & CEC_RX_ERROR_MAX)
+ return;
+
+ msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
+
+ if (!msg.len)
+ return;
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
+
+ cec_received_msg(cec->adap, &msg);
+}
+
+static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ if (cec->irq_status & CEC_TX_DONE_STS)
+ stih_tx_done(cec, cec->irq_status);
+
+ if (cec->irq_status & CEC_RX_DONE_STS)
+ stih_rx_done(cec, cec->irq_status);
+
+ cec->irq_status = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ cec->irq_status = readl(cec->regs + CEC_STATUS);
+ writel(cec->irq_status, cec->regs + CEC_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct cec_adap_ops sti_cec_adap_ops = {
+ .adap_enable = stih_cec_adap_enable,
+ .adap_log_addr = stih_cec_adap_log_addr,
+ .adap_transmit = stih_cec_adap_transmit,
+};
+
+static int stih_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct stih_cec *cec;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->regs))
+ return PTR_ERR(cec->regs);
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
+ stih_cec_irq_handler_thread, 0,
+ pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "cec-clk");
+ if (IS_ERR(cec->clk)) {
+ dev_err(dev, "Cannot get cec clock\n");
+ return PTR_ERR(cec->clk);
+ }
+
+ cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
+ CEC_NAME,
+ CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
+ CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT,
+ 1, &pdev->dev);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int stih_cec_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id stih_cec_match[] = {
+ {
+ .compatible = "st,stih-cec",
+ },
+ {},
+};
+
+static struct platform_driver stih_cec_pdrv = {
+ .probe = stih_cec_probe,
+ .remove = stih_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = stih_cec_match,
+ },
+};
+
+module_platform_driver(stih_cec_pdrv);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STIH4xx CEC driver");
diff --git a/drivers/staging/media/tw686x-kh/Kconfig b/drivers/staging/media/tw686x-kh/Kconfig
deleted file mode 100644
index 6264d30edf5a..000000000000
--- a/drivers/staging/media/tw686x-kh/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-config VIDEO_TW686X_KH
- tristate "Intersil/Techwell TW686x Video For Linux"
- depends on VIDEO_DEV && PCI && VIDEO_V4L2
- depends on !(VIDEO_TW686X=y || VIDEO_TW686X=m) || COMPILE_TEST
- select VIDEOBUF2_DMA_SG
- help
- Support for Intersil/Techwell TW686x-based frame grabber cards.
-
- Currently supported chips:
- - TW6864 (4 video channels),
- - TW6865 (4 video channels, not tested, second generation chip),
- - TW6868 (8 video channels but only 4 first channels using
- built-in video decoder are supported, not tested),
- - TW6869 (8 video channels, second generation chip).
-
- To compile this driver as a module, choose M here: the module
- will be named tw686x-kh.
diff --git a/drivers/staging/media/tw686x-kh/Makefile b/drivers/staging/media/tw686x-kh/Makefile
deleted file mode 100644
index 2a36a38cf30e..000000000000
--- a/drivers/staging/media/tw686x-kh/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-tw686x-kh-objs := tw686x-kh-core.o tw686x-kh-video.o
-
-obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh.o
diff --git a/drivers/staging/media/tw686x-kh/TODO b/drivers/staging/media/tw686x-kh/TODO
deleted file mode 100644
index 480a495b11fb..000000000000
--- a/drivers/staging/media/tw686x-kh/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-TODO:
-
-- implement V4L2_FIELD_INTERLACED* mode(s).
-- add audio support
-
-Please Cc: patches to Krzysztof Halasa <khalasa@piap.pl>.
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
deleted file mode 100644
index 03b3b62c59c4..000000000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "tw686x-kh.h"
-#include "tw686x-kh-regs.h"
-
-static irqreturn_t tw686x_irq(int irq, void *dev_id)
-{
- struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
- u32 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
- unsigned long flags;
- unsigned int handled = 0;
-
- if (int_status) {
- spin_lock_irqsave(&dev->irq_lock, flags);
- dev->dma_requests |= int_status;
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-
- if (int_status & 0xFF0000FF)
- handled = tw686x_kh_video_irq(dev);
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static int tw686x_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
-{
- struct tw686x_dev *dev;
- int err;
-
- dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev) +
- (pci_id->driver_data & TYPE_MAX_CHANNELS) *
- sizeof(dev->video_channels[0]), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- sprintf(dev->name, "TW%04X", pci_dev->device);
- dev->type = pci_id->driver_data;
-
- pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
- pci_name(pci_dev), pci_dev->irq,
- (unsigned long)pci_resource_start(pci_dev, 0));
-
- dev->pci_dev = pci_dev;
- if (pcim_enable_device(pci_dev))
- return -EIO;
-
- pci_set_master(pci_dev);
-
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
- pr_err("%s: 32-bit PCI DMA not supported\n", dev->name);
- return -EIO;
- }
-
- err = pci_request_regions(pci_dev, dev->name);
- if (err < 0) {
- pr_err("%s: Unable to get MMIO region\n", dev->name);
- return err;
- }
-
- dev->mmio = pci_ioremap_bar(pci_dev, 0);
- if (!dev->mmio) {
- pr_err("%s: Unable to remap MMIO region\n", dev->name);
- return -EIO;
- }
-
- reg_write(dev, SYS_SOFT_RST, 0x0F); /* Reset all subsystems */
- mdelay(1);
-
- reg_write(dev, SRST[0], 0x3F);
- if (max_channels(dev) > 4)
- reg_write(dev, SRST[1], 0x3F);
- reg_write(dev, DMA_CMD, 0);
- reg_write(dev, DMA_CHANNEL_ENABLE, 0);
- reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x3EFF0FF0);
- reg_write(dev, DMA_TIMER_INTERVAL, 0x38000);
- reg_write(dev, DMA_CONFIG, 0xFFFFFF04);
-
- spin_lock_init(&dev->irq_lock);
-
- err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw686x_irq,
- IRQF_SHARED, dev->name, dev);
- if (err < 0) {
- pr_err("%s: Unable to get IRQ\n", dev->name);
- return err;
- }
-
- err = tw686x_kh_video_init(dev);
- if (err)
- return err;
-
- pci_set_drvdata(pci_dev, dev);
- return 0;
-}
-
-static void tw686x_remove(struct pci_dev *pci_dev)
-{
- struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
-
- tw686x_kh_video_free(dev);
-}
-
-/* driver_data is number of A/V channels */
-static const struct pci_device_id tw686x_pci_tbl[] = {
- {PCI_DEVICE(0x1797, 0x6864), .driver_data = 4},
- /* not tested */
- {PCI_DEVICE(0x1797, 0x6865), .driver_data = 4 | TYPE_SECOND_GEN},
- /* TW6868 supports 8 A/V channels with an external TW2865 chip -
- not supported by the driver */
- {PCI_DEVICE(0x1797, 0x6868), .driver_data = 4}, /* not tested */
- {PCI_DEVICE(0x1797, 0x6869), .driver_data = 8 | TYPE_SECOND_GEN},
- {}
-};
-
-static struct pci_driver tw686x_pci_driver = {
- .name = "tw686x-kh",
- .id_table = tw686x_pci_tbl,
- .probe = tw686x_probe,
- .remove = tw686x_remove,
-};
-
-MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
-MODULE_AUTHOR("Krzysztof Halasa");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
-module_pci_driver(tw686x_pci_driver);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
deleted file mode 100644
index 53e1889babd0..000000000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* DMA controller registers */
-#define REG8_1(a0) ((const u16[8]) {a0, a0 + 1, a0 + 2, a0 + 3, \
- a0 + 4, a0 + 5, a0 + 6, a0 + 7})
-#define REG8_2(a0) ((const u16[8]) {a0, a0 + 2, a0 + 4, a0 + 6, \
- a0 + 8, a0 + 0xA, a0 + 0xC, a0 + 0xE})
-#define REG8_8(a0) ((const u16[8]) {a0, a0 + 8, a0 + 0x10, a0 + 0x18, \
- a0 + 0x20, a0 + 0x28, a0 + 0x30, a0 + 0x38})
-#define INT_STATUS 0x00
-#define PB_STATUS 0x01
-#define DMA_CMD 0x02
-#define VIDEO_FIFO_STATUS 0x03
-#define VIDEO_CHANNEL_ID 0x04
-#define VIDEO_PARSER_STATUS 0x05
-#define SYS_SOFT_RST 0x06
-#define DMA_PAGE_TABLE0_ADDR ((const u16[8]) {0x08, 0xD0, 0xD2, 0xD4, \
- 0xD6, 0xD8, 0xDA, 0xDC})
-#define DMA_PAGE_TABLE1_ADDR ((const u16[8]) {0x09, 0xD1, 0xD3, 0xD5, \
- 0xD7, 0xD9, 0xDB, 0xDD})
-#define DMA_CHANNEL_ENABLE 0x0A
-#define DMA_CONFIG 0x0B
-#define DMA_TIMER_INTERVAL 0x0C
-#define DMA_CHANNEL_TIMEOUT 0x0D
-#define VDMA_CHANNEL_CONFIG REG8_1(0x10)
-#define ADMA_P_ADDR REG8_2(0x18)
-#define ADMA_B_ADDR REG8_2(0x19)
-#define DMA10_P_ADDR 0x28 /* ??? */
-#define DMA10_B_ADDR 0x29
-#define VIDEO_CONTROL1 0x2A
-#define VIDEO_CONTROL2 0x2B
-#define AUDIO_CONTROL1 0x2C
-#define AUDIO_CONTROL2 0x2D
-#define PHASE_REF 0x2E
-#define GPIO_REG 0x2F
-#define INTL_HBAR_CTRL REG8_1(0x30)
-#define AUDIO_CONTROL3 0x38
-#define VIDEO_FIELD_CTRL REG8_1(0x39)
-#define HSCALER_CTRL REG8_1(0x42)
-#define VIDEO_SIZE REG8_1(0x4A)
-#define VIDEO_SIZE_F2 REG8_1(0x52)
-#define MD_CONF REG8_1(0x60)
-#define MD_INIT REG8_1(0x68)
-#define MD_MAP0 REG8_1(0x70)
-#define VDMA_P_ADDR REG8_8(0x80) /* not used in DMA SG mode */
-#define VDMA_WHP REG8_8(0x81)
-#define VDMA_B_ADDR REG8_8(0x82)
-#define VDMA_F2_P_ADDR REG8_8(0x84)
-#define VDMA_F2_WHP REG8_8(0x85)
-#define VDMA_F2_B_ADDR REG8_8(0x86)
-#define EP_REG_ADDR 0xFE
-#define EP_REG_DATA 0xFF
-
-/* Video decoder registers */
-#define VDREG8(a0) ((const u16[8]) { \
- a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030, \
- a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
-#define VIDSTAT VDREG8(0x100)
-#define BRIGHT VDREG8(0x101)
-#define CONTRAST VDREG8(0x102)
-#define SHARPNESS VDREG8(0x103)
-#define SAT_U VDREG8(0x104)
-#define SAT_V VDREG8(0x105)
-#define HUE VDREG8(0x106)
-#define CROP_HI VDREG8(0x107)
-#define VDELAY_LO VDREG8(0x108)
-#define VACTIVE_LO VDREG8(0x109)
-#define HDELAY_LO VDREG8(0x10A)
-#define HACTIVE_LO VDREG8(0x10B)
-#define MVSN VDREG8(0x10C)
-#define STATUS2 VDREG8(0x10C)
-#define SDT VDREG8(0x10E)
-#define SDT_EN VDREG8(0x10F)
-
-#define VSCALE_LO VDREG8(0x144)
-#define SCALE_HI VDREG8(0x145)
-#define HSCALE_LO VDREG8(0x146)
-#define F2CROP_HI VDREG8(0x147)
-#define F2VDELAY_LO VDREG8(0x148)
-#define F2VACTIVE_LO VDREG8(0x149)
-#define F2HDELAY_LO VDREG8(0x14A)
-#define F2HACTIVE_LO VDREG8(0x14B)
-#define F2VSCALE_LO VDREG8(0x14C)
-#define F2SCALE_HI VDREG8(0x14D)
-#define F2HSCALE_LO VDREG8(0x14E)
-#define F2CNT VDREG8(0x14F)
-
-#define VDREG2(a0) ((const u16[2]) {a0, a0 + 0x100})
-#define SRST VDREG2(0x180)
-#define ACNTL VDREG2(0x181)
-#define ACNTL2 VDREG2(0x182)
-#define CNTRL1 VDREG2(0x183)
-#define CKHY VDREG2(0x184)
-#define SHCOR VDREG2(0x185)
-#define CORING VDREG2(0x186)
-#define CLMPG VDREG2(0x187)
-#define IAGC VDREG2(0x188)
-#define VCTRL1 VDREG2(0x18F)
-#define MISC1 VDREG2(0x194)
-#define LOOP VDREG2(0x195)
-#define MISC2 VDREG2(0x196)
-
-#define CLMD VDREG2(0x197)
-#define AIGAIN ((const u16[8]) {0x1D0, 0x1D1, 0x1D2, 0x1D3, \
- 0x2D0, 0x2D1, 0x2D2, 0x2D3})
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
deleted file mode 100644
index 9bf32aec2fc6..000000000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-event.h>
-#include "tw686x-kh.h"
-#include "tw686x-kh-regs.h"
-
-#define MAX_SG_ENTRY_SIZE (/* 8192 - 128 */ 4096)
-#define MAX_SG_DESC_COUNT 256 /* PAL 704x576 needs up to 198 4-KB pages */
-
-static const struct tw686x_format formats[] = {
- {
- .name = "4:2:2 packed, UYVY", /* aka Y422 */
- .fourcc = V4L2_PIX_FMT_UYVY,
- .mode = 0,
- .depth = 16,
- }, {
-#if 0
- .name = "4:2:0 packed, YUV",
- .mode = 1, /* non-standard */
- .depth = 12,
- }, {
- .name = "4:1:1 packed, YUV",
- .mode = 2, /* non-standard */
- .depth = 12,
- }, {
-#endif
- .name = "4:1:1 packed, YUV",
- .fourcc = V4L2_PIX_FMT_Y41P,
- .mode = 3,
- .depth = 12,
- }, {
- .name = "15 bpp RGB",
- .fourcc = V4L2_PIX_FMT_RGB555,
- .mode = 4,
- .depth = 16,
- }, {
- .name = "16 bpp RGB",
- .fourcc = V4L2_PIX_FMT_RGB565,
- .mode = 5,
- .depth = 16,
- }, {
- .name = "4:2:2 packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .mode = 6,
- .depth = 16,
- }
- /* mode 7 is "reserved" */
-};
-
-static const v4l2_std_id video_standards[7] = {
- V4L2_STD_NTSC,
- V4L2_STD_PAL,
- V4L2_STD_SECAM,
- V4L2_STD_NTSC_443,
- V4L2_STD_PAL_M,
- V4L2_STD_PAL_N,
- V4L2_STD_PAL_60,
-};
-
-static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
-{
- unsigned int cnt;
-
- for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
- if (formats[cnt].fourcc == fourcc)
- return &formats[cnt];
- return NULL;
-}
-
-static void tw686x_get_format(struct tw686x_video_channel *vc,
- struct v4l2_format *f)
-{
- const struct tw686x_format *format;
- unsigned int width, height, height_div = 1;
-
- format = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!format) {
- format = &formats[0];
- f->fmt.pix.pixelformat = format->fourcc;
- }
-
- width = 704;
- if (f->fmt.pix.width < width * 3 / 4 /* halfway */)
- width /= 2;
-
- height = (vc->video_standard & V4L2_STD_625_50) ? 576 : 480;
- if (f->fmt.pix.height < height * 3 / 4 /* halfway */)
- height_div = 2;
-
- switch (f->fmt.pix.field) {
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- height_div = 2;
- break;
- case V4L2_FIELD_SEQ_BT:
- if (height_div > 1)
- f->fmt.pix.field = V4L2_FIELD_BOTTOM;
- break;
- default:
- if (height_div > 1)
- f->fmt.pix.field = V4L2_FIELD_TOP;
- else
- f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
- }
- height /= height_div;
-
- f->fmt.pix.width = width;
- f->fmt.pix.height = height;
- f->fmt.pix.bytesperline = f->fmt.pix.width * format->depth / 8;
- f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-}
-
-/* video queue operations */
-
-static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[],
- struct device *alloc_devs[])
-{
- struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
- unsigned int size = vc->width * vc->height * vc->format->depth / 8;
-
- if (*nbuffers < 2)
- *nbuffers = 2;
-
- if (*nplanes)
- return sizes[0] < size ? -EINVAL : 0;
-
- sizes[0] = size;
- *nplanes = 1; /* packed formats only */
- return 0;
-}
-
-static void tw686x_buf_queue(struct vb2_buffer *vb)
-{
- struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct tw686x_vb2_buf *buf;
-
- buf = container_of(vbuf, struct tw686x_vb2_buf, vb);
-
- spin_lock(&vc->qlock);
- list_add_tail(&buf->list, &vc->vidq_queued);
- spin_unlock(&vc->qlock);
-}
-
-static void setup_descs(struct tw686x_video_channel *vc, unsigned int n)
-{
-loop:
- while (!list_empty(&vc->vidq_queued)) {
- struct vdma_desc *descs = vc->sg_descs[n];
- struct tw686x_vb2_buf *buf;
- struct sg_table *vbuf;
- struct scatterlist *sg;
- unsigned int buf_len, count = 0;
- int i;
-
- buf = list_first_entry(&vc->vidq_queued, struct tw686x_vb2_buf,
- list);
- list_del(&buf->list);
-
- buf_len = vc->width * vc->height * vc->format->depth / 8;
- if (vb2_plane_size(&buf->vb.vb2_buf, 0) < buf_len) {
- pr_err("Video buffer size too small\n");
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- goto loop; /* try another */
- }
-
- vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
- for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
- dma_addr_t phys = sg_dma_address(sg);
- unsigned int len = sg_dma_len(sg);
-
- while (len && buf_len) {
- unsigned int entry_len = min_t(unsigned int, len,
- MAX_SG_ENTRY_SIZE);
- entry_len = min(entry_len, buf_len);
- if (count == MAX_SG_DESC_COUNT) {
- pr_err("Video buffer size too fragmented\n");
- vb2_buffer_done(&buf->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- goto loop;
- }
- descs[count].phys = cpu_to_le32(phys);
- descs[count++].flags_length =
- cpu_to_le32(0x40000000 /* available */ |
- entry_len);
- phys += entry_len;
- len -= entry_len;
- buf_len -= entry_len;
- }
- if (!buf_len)
- break;
- }
-
- /* clear the remaining entries */
- while (count < MAX_SG_DESC_COUNT) {
- descs[count].phys = 0;
- descs[count++].flags_length = 0; /* unavailable */
- }
-
- buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- vc->curr_bufs[n] = buf;
- return;
- }
- vc->curr_bufs[n] = NULL;
-}
-
-/* On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
- with 10-bit start_idx and end_idx determining start and end of frame buffer
- for particular channel.
- TW6868 with all its 8 channels would be problematic (only 127 SG entries per
- channel) but we support only 4 channels on this chip anyway (the first
- 4 channels are driven with internal video decoder, the other 4 would require
- an external TW286x part).
-
- On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
- starting with 0. Both chips have complete sets of internal video decoders
- (respectively 4 or 8-channel).
-
- All chips have separate SG tables for two video frames. */
-
-static void setup_dma_cfg(struct tw686x_video_channel *vc)
-{
- unsigned int field_width = 704;
- unsigned int field_height = (vc->video_standard & V4L2_STD_625_50) ?
- 288 : 240;
- unsigned int start_idx = is_second_gen(vc->dev) ? 0 :
- vc->ch * MAX_SG_DESC_COUNT;
- unsigned int end_idx = start_idx + MAX_SG_DESC_COUNT - 1;
- u32 dma_cfg = (0 << 30) /* input selection */ |
- (1 << 29) /* field2 dropped (if any) */ |
- ((vc->height < 300) << 28) /* field dropping */ |
- (1 << 27) /* master */ |
- (0 << 25) /* master channel (for slave only) */ |
- (0 << 24) /* (no) vertical (line) decimation */ |
- ((vc->width < 400) << 23) /* horizontal decimation */ |
- (vc->format->mode << 20) /* output video format */ |
- (end_idx << 10) /* DMA end index */ |
- start_idx /* DMA start index */;
- u32 reg;
-
- reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], dma_cfg);
- reg_write(vc->dev, VIDEO_SIZE[vc->ch], (1 << 31) | (field_height << 16)
- | field_width);
- reg = reg_read(vc->dev, VIDEO_CONTROL1);
- if (vc->video_standard & V4L2_STD_625_50)
- reg |= 1 << (vc->ch + 13);
- else
- reg &= ~(1 << (vc->ch + 13));
- reg_write(vc->dev, VIDEO_CONTROL1, reg);
-}
-
-static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
- struct tw686x_dev *dev = vc->dev;
- u32 dma_ch_mask;
- unsigned int n;
-
- setup_dma_cfg(vc);
-
- /* queue video buffers if available */
- spin_lock(&vc->qlock);
- for (n = 0; n < 2; n++)
- setup_descs(vc, n);
- spin_unlock(&vc->qlock);
-
- dev->video_active |= 1 << vc->ch;
- vc->seq = 0;
- dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE) | (1 << vc->ch);
- reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
- reg_write(dev, DMA_CMD, (1 << 31) | dma_ch_mask);
- return 0;
-}
-
-static void tw686x_stop_streaming(struct vb2_queue *vq)
-{
- struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
- struct tw686x_dev *dev = vc->dev;
- u32 dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
- u32 dma_cmd = reg_read(dev, DMA_CMD);
- unsigned int n;
-
- dma_ch_mask &= ~(1 << vc->ch);
- reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
-
- dev->video_active &= ~(1 << vc->ch);
-
- dma_cmd &= ~(1 << vc->ch);
- reg_write(dev, DMA_CMD, dma_cmd);
-
- if (!dev->video_active) {
- reg_write(dev, DMA_CMD, 0);
- reg_write(dev, DMA_CHANNEL_ENABLE, 0);
- }
-
- spin_lock(&vc->qlock);
- while (!list_empty(&vc->vidq_queued)) {
- struct tw686x_vb2_buf *buf;
-
- buf = list_entry(vc->vidq_queued.next, struct tw686x_vb2_buf,
- list);
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
-
- for (n = 0; n < 2; n++)
- if (vc->curr_bufs[n])
- vb2_buffer_done(&vc->curr_bufs[n]->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
-
- spin_unlock(&vc->qlock);
-}
-
-static struct vb2_ops tw686x_video_qops = {
- .queue_setup = tw686x_queue_setup,
- .buf_queue = tw686x_buf_queue,
- .start_streaming = tw686x_start_streaming,
- .stop_streaming = tw686x_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct tw686x_video_channel *vc;
- struct tw686x_dev *dev;
- unsigned int ch;
-
- vc = container_of(ctrl->handler, struct tw686x_video_channel,
- ctrl_handler);
- dev = vc->dev;
- ch = vc->ch;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- reg_write(dev, BRIGHT[ch], ctrl->val & 0xFF);
- return 0;
-
- case V4L2_CID_CONTRAST:
- reg_write(dev, CONTRAST[ch], ctrl->val);
- return 0;
-
- case V4L2_CID_SATURATION:
- reg_write(dev, SAT_U[ch], ctrl->val);
- reg_write(dev, SAT_V[ch], ctrl->val);
- return 0;
-
- case V4L2_CID_HUE:
- reg_write(dev, HUE[ch], ctrl->val & 0xFF);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = tw686x_s_ctrl,
-};
-
-static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
-
- f->fmt.pix.width = vc->width;
- f->fmt.pix.height = vc->height;
- f->fmt.pix.field = vc->field;
- f->fmt.pix.pixelformat = vc->format->fourcc;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.bytesperline = f->fmt.pix.width * vc->format->depth / 8;
- f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- return 0;
-}
-
-static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- tw686x_get_format(video_drvdata(file), f);
- return 0;
-}
-
-static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
-
- tw686x_get_format(vc, f);
- vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
- vc->field = f->fmt.pix.field;
- vc->width = f->fmt.pix.width;
- vc->height = f->fmt.pix.height;
- return 0;
-}
-
-static int tw686x_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
- struct tw686x_dev *dev = vc->dev;
-
- strcpy(cap->driver, "tw686x-kh");
- strcpy(cap->card, dev->name);
- sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci_dev));
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
- return 0;
-}
-
-static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
- unsigned int cnt;
- u32 sdt = 0; /* default */
-
- for (cnt = 0; cnt < ARRAY_SIZE(video_standards); cnt++)
- if (id & video_standards[cnt]) {
- sdt = cnt;
- break;
- }
-
- reg_write(vc->dev, SDT[vc->ch], sdt);
- vc->video_standard = video_standards[sdt];
- return 0;
-}
-
-static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
-
- *id = vc->video_standard;
- return 0;
-}
-
-static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- if (f->index >= ARRAY_SIZE(formats))
- return -EINVAL;
-
- strlcpy(f->description, formats[f->index].name, sizeof(f->description));
- f->pixelformat = formats[f->index].fourcc;
- return 0;
-}
-
-static int tw686x_g_parm(struct file *file, void *priv,
- struct v4l2_streamparm *sp)
-{
- struct tw686x_video_channel *vc = video_drvdata(file);
-
- if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- memset(&sp->parm.capture, 0, sizeof(sp->parm.capture));
- sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- v4l2_video_std_frame_period(vc->video_standard,
- &sp->parm.capture.timeperframe);
-
- return 0;
-}
-
-static int tw686x_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- /* the chip has internal multiplexer, support can be added
- if the actual hw uses it */
- if (inp->index)
- return -EINVAL;
-
- snprintf(inp->name, sizeof(inp->name), "Composite");
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_ALL;
- inp->capabilities = V4L2_IN_CAP_STD;
- return 0;
-}
-
-static int tw686x_g_input(struct file *file, void *priv, unsigned int *v)
-{
- *v = 0;
- return 0;
-}
-
-static int tw686x_s_input(struct file *file, void *priv, unsigned int v)
-{
- if (v)
- return -EINVAL;
- return 0;
-}
-
-static const struct v4l2_file_operations tw686x_video_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .unlocked_ioctl = video_ioctl2,
- .release = vb2_fop_release,
- .poll = vb2_fop_poll,
- .read = vb2_fop_read,
- .mmap = vb2_fop_mmap,
-};
-
-static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
- .vidioc_querycap = tw686x_querycap,
- .vidioc_enum_fmt_vid_cap = tw686x_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = tw686x_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = tw686x_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = tw686x_try_fmt_vid_cap,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_g_std = tw686x_g_std,
- .vidioc_s_std = tw686x_s_std,
- .vidioc_g_parm = tw686x_g_parm,
- .vidioc_enum_input = tw686x_enum_input,
- .vidioc_g_input = tw686x_g_input,
- .vidioc_s_input = tw686x_s_input,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static int video_thread(void *arg)
-{
- struct tw686x_dev *dev = arg;
- DECLARE_WAITQUEUE(wait, current);
-
- set_freezable();
- add_wait_queue(&dev->video_thread_wait, &wait);
-
- while (1) {
- long timeout = schedule_timeout_interruptible(HZ);
- unsigned int ch;
-
- if (timeout == -ERESTARTSYS || kthread_should_stop())
- break;
-
- for (ch = 0; ch < max_channels(dev); ch++) {
- struct tw686x_video_channel *vc;
- unsigned long flags;
- u32 request, n, stat = VB2_BUF_STATE_DONE;
-
- vc = &dev->video_channels[ch];
- if (!(dev->video_active & (1 << ch)))
- continue;
-
- spin_lock_irq(&dev->irq_lock);
- request = dev->dma_requests & (0x01000001 << ch);
- if (request)
- dev->dma_requests &= ~request;
- spin_unlock_irq(&dev->irq_lock);
-
- if (!request)
- continue;
-
- request >>= ch;
-
- /* handle channel events */
- if ((request & 0x01000000) |
- (reg_read(dev, VIDEO_FIFO_STATUS) & (0x01010001 << ch)) |
- (reg_read(dev, VIDEO_PARSER_STATUS) & (0x00000101 << ch))) {
- /* DMA Errors - reset channel */
- u32 reg;
-
- spin_lock_irqsave(&dev->irq_lock, flags);
- reg = reg_read(dev, DMA_CMD);
- /* Reset DMA channel */
- reg_write(dev, DMA_CMD, reg & ~(1 << ch));
- reg_write(dev, DMA_CMD, reg);
- spin_unlock_irqrestore(&dev->irq_lock, flags);
- stat = VB2_BUF_STATE_ERROR;
- }
-
- /* handle video stream */
- mutex_lock(&vc->vb_mutex);
- spin_lock(&vc->qlock);
- n = !!(reg_read(dev, PB_STATUS) & (1 << ch));
- if (vc->curr_bufs[n]) {
- struct vb2_v4l2_buffer *vb;
-
- vb = &vc->curr_bufs[n]->vb;
- vb->vb2_buf.timestamp = ktime_get_ns();
- vb->field = vc->field;
- if (V4L2_FIELD_HAS_BOTH(vc->field))
- vb->sequence = vc->seq++;
- else
- vb->sequence = (vc->seq++) / 2;
- vb2_set_plane_payload(&vb->vb2_buf, 0,
- vc->width * vc->height * vc->format->depth / 8);
- vb2_buffer_done(&vb->vb2_buf, stat);
- }
- setup_descs(vc, n);
- spin_unlock(&vc->qlock);
- mutex_unlock(&vc->vb_mutex);
- }
- try_to_freeze();
- }
-
- remove_wait_queue(&dev->video_thread_wait, &wait);
- return 0;
-}
-
-int tw686x_kh_video_irq(struct tw686x_dev *dev)
-{
- unsigned long flags, handled = 0;
- u32 requests;
-
- spin_lock_irqsave(&dev->irq_lock, flags);
- requests = dev->dma_requests;
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-
- if (requests & dev->video_active) {
- wake_up_interruptible_all(&dev->video_thread_wait);
- handled = 1;
- }
- return handled;
-}
-
-void tw686x_kh_video_free(struct tw686x_dev *dev)
-{
- unsigned int ch, n;
-
- if (dev->video_thread)
- kthread_stop(dev->video_thread);
-
- for (ch = 0; ch < max_channels(dev); ch++) {
- struct tw686x_video_channel *vc = &dev->video_channels[ch];
-
- v4l2_ctrl_handler_free(&vc->ctrl_handler);
- if (vc->device)
- video_unregister_device(vc->device);
- for (n = 0; n < 2; n++) {
- struct dma_desc *descs = &vc->sg_tables[n];
-
- if (descs->virt)
- pci_free_consistent(dev->pci_dev, descs->size,
- descs->virt, descs->phys);
- }
- }
-
- v4l2_device_unregister(&dev->v4l2_dev);
-}
-
-#define SG_TABLE_SIZE (MAX_SG_DESC_COUNT * sizeof(struct vdma_desc))
-
-int tw686x_kh_video_init(struct tw686x_dev *dev)
-{
- unsigned int ch, n;
- int err;
-
- init_waitqueue_head(&dev->video_thread_wait);
-
- err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
- if (err)
- return err;
-
- reg_write(dev, VIDEO_CONTROL1, 0); /* NTSC, disable scaler */
- reg_write(dev, PHASE_REF, 0x00001518); /* Scatter-gather DMA mode */
-
- /* setup required SG table sizes */
- for (n = 0; n < 2; n++)
- if (is_second_gen(dev)) {
- /* TW 6865, TW6869 - each channel needs a pair of
- descriptor tables */
- for (ch = 0; ch < max_channels(dev); ch++)
- dev->video_channels[ch].sg_tables[n].size =
- SG_TABLE_SIZE;
-
- } else
- /* TW 6864, TW6868 - we need to allocate a pair of
- descriptor tables, common for all channels.
- Each table will be bigger than 4 KB. */
- dev->video_channels[0].sg_tables[n].size =
- max_channels(dev) * SG_TABLE_SIZE;
-
- /* allocate SG tables and initialize video channels */
- for (ch = 0; ch < max_channels(dev); ch++) {
- struct tw686x_video_channel *vc = &dev->video_channels[ch];
- struct video_device *vdev;
-
- mutex_init(&vc->vb_mutex);
- spin_lock_init(&vc->qlock);
- INIT_LIST_HEAD(&vc->vidq_queued);
-
- vc->dev = dev;
- vc->ch = ch;
-
- /* default settings: NTSC */
- vc->format = &formats[0];
- vc->video_standard = V4L2_STD_NTSC;
- reg_write(vc->dev, SDT[vc->ch], 0);
- vc->field = V4L2_FIELD_SEQ_BT;
- vc->width = 704;
- vc->height = 480;
-
- for (n = 0; n < 2; n++) {
- void *cpu;
-
- if (vc->sg_tables[n].size) {
- unsigned int reg = n ? DMA_PAGE_TABLE1_ADDR[ch] :
- DMA_PAGE_TABLE0_ADDR[ch];
-
- cpu = pci_alloc_consistent(dev->pci_dev,
- vc->sg_tables[n].size,
- &vc->sg_tables[n].phys);
- if (!cpu) {
- pr_err("Error allocating video DMA scatter-gather tables\n");
- err = -ENOMEM;
- goto error;
- }
- vc->sg_tables[n].virt = cpu;
- reg_write(dev, reg, vc->sg_tables[n].phys);
- } else
- cpu = dev->video_channels[0].sg_tables[n].virt +
- ch * SG_TABLE_SIZE;
-
- vc->sg_descs[n] = cpu;
- }
-
- reg_write(dev, VCTRL1[0], 0x24);
- reg_write(dev, LOOP[0], 0xA5);
- if (max_channels(dev) > 4) {
- reg_write(dev, VCTRL1[1], 0x24);
- reg_write(dev, LOOP[1], 0xA5);
- }
- reg_write(dev, VIDEO_FIELD_CTRL[ch], 0);
- reg_write(dev, VDELAY_LO[ch], 0x14);
-
- vdev = video_device_alloc();
- if (!vdev) {
- pr_warn("Unable to allocate video device\n");
- err = -ENOMEM;
- goto error;
- }
-
- vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
- vc->vidq.drv_priv = vc;
- vc->vidq.buf_struct_size = sizeof(struct tw686x_vb2_buf);
- vc->vidq.ops = &tw686x_video_qops;
- vc->vidq.mem_ops = &vb2_dma_sg_memops;
- vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vc->vidq.min_buffers_needed = 2;
- vc->vidq.lock = &vc->vb_mutex;
- vc->vidq.dev = &dev->pci_dev->dev;
- vc->vidq.gfp_flags = GFP_DMA32;
-
- err = vb2_queue_init(&vc->vidq);
- if (err)
- goto error;
-
- strcpy(vdev->name, "TW686x-video");
- snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
- vdev->fops = &tw686x_video_fops;
- vdev->ioctl_ops = &tw686x_video_ioctl_ops;
- vdev->release = video_device_release;
- vdev->v4l2_dev = &dev->v4l2_dev;
- vdev->queue = &vc->vidq;
- vdev->tvnorms = V4L2_STD_ALL;
- vdev->minor = -1;
- vdev->lock = &vc->vb_mutex;
-
- dev->video_channels[ch].device = vdev;
- video_set_drvdata(vdev, vc);
- err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- if (err < 0)
- goto error;
-
- v4l2_ctrl_handler_init(&vc->ctrl_handler,
- 4 /* number of controls */);
- vdev->ctrl_handler = &vc->ctrl_handler;
- v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
- V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
- v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
- V4L2_CID_CONTRAST, 0, 255, 1, 64);
- v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
- V4L2_CID_SATURATION, 0, 255, 1, 128);
- v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops, V4L2_CID_HUE,
- -124, 127, 1, 0);
- err = vc->ctrl_handler.error;
- if (err)
- goto error;
-
- v4l2_ctrl_handler_setup(&vc->ctrl_handler);
- }
-
- dev->video_thread = kthread_run(video_thread, dev, "tw686x_video");
- if (IS_ERR(dev->video_thread)) {
- err = PTR_ERR(dev->video_thread);
- goto error;
- }
-
- return 0;
-
-error:
- tw686x_kh_video_free(dev);
- return err;
-}
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
deleted file mode 100644
index 6284a90d6fe3..000000000000
--- a/drivers/staging/media/tw686x-kh/tw686x-kh.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2015 Industrial Research Institute for Automation
- * and Measurements PIAP
- *
- * Written by Krzysztof Ha?asa.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/freezer.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <media/videobuf2-dma-sg.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-#define TYPE_MAX_CHANNELS 0x0F
-#define TYPE_SECOND_GEN 0x10
-
-struct tw686x_format {
- char *name;
- unsigned int fourcc;
- unsigned int depth;
- unsigned int mode;
-};
-
-struct dma_desc {
- dma_addr_t phys;
- void *virt;
- unsigned int size;
-};
-
-struct vdma_desc {
- __le32 flags_length; /* 3 MSBits for flags, 13 LSBits for length */
- __le32 phys;
-};
-
-struct tw686x_vb2_buf {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-struct tw686x_video_channel {
- struct tw686x_dev *dev;
-
- struct vb2_queue vidq;
- struct list_head vidq_queued;
- struct video_device *device;
- struct dma_desc sg_tables[2];
- struct tw686x_vb2_buf *curr_bufs[2];
- struct vdma_desc *sg_descs[2];
-
- struct v4l2_ctrl_handler ctrl_handler;
- const struct tw686x_format *format;
- struct mutex vb_mutex;
- spinlock_t qlock;
- v4l2_std_id video_standard;
- unsigned int width, height;
- enum v4l2_field field; /* supported TOP, BOTTOM, SEQ_TB and SEQ_BT */
- unsigned int seq; /* video field or frame counter */
- unsigned int ch;
-};
-
-/* global device status */
-struct tw686x_dev {
- spinlock_t irq_lock;
-
- struct v4l2_device v4l2_dev;
- struct snd_card *card; /* sound card */
-
- unsigned int video_active; /* active video channel mask */
-
- char name[32];
- unsigned int type;
- struct pci_dev *pci_dev;
- __u32 __iomem *mmio;
-
- struct task_struct *video_thread;
- wait_queue_head_t video_thread_wait;
- u32 dma_requests;
-
- struct tw686x_video_channel video_channels[0];
-};
-
-static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
-{
- return readl(dev->mmio + reg);
-}
-
-static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
- uint32_t value)
-{
- writel(value, dev->mmio + reg);
-}
-
-static inline unsigned int max_channels(struct tw686x_dev *dev)
-{
- return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
-}
-
-static inline unsigned int is_second_gen(struct tw686x_dev *dev)
-{
- /* each channel has its own DMA SG table */
- return dev->type & TYPE_SECOND_GEN;
-}
-
-int tw686x_kh_video_irq(struct tw686x_dev *dev);
-int tw686x_kh_video_init(struct tw686x_dev *dev);
-void tw686x_kh_video_free(struct tw686x_dev *dev);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index a324322ee0ad..499952c8ef39 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,13 +106,12 @@ static int nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev;
- char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
- ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL);
+ ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ser_dev)
return -ENOMEM;
- ser_dev->id.type = SERIO_PS_PSTHRU;
+ ser_dev->id.type = SERIO_8042;
ser_dev->write = ps2_sendcommand;
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
@@ -127,9 +126,6 @@ static int nvec_mouse_probe(struct platform_device *pdev)
serio_register_port(ser_dev);
- /* mouse reset */
- nvec_write_async(nvec, mouse_reset, sizeof(mouse_reset));
-
return 0;
}
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 955247979aaa..4ed6d8d7712a 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -601,13 +601,13 @@
#define PANEL_PLANE_TL 0x08001C
#define PANEL_PLANE_TL_TOP_SHIFT 16
-#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
-#define PANEL_PLANE_TL_LEFT_MASK 0xeff
+#define PANEL_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define PANEL_PLANE_TL_LEFT_MASK 0x7ff
#define PANEL_PLANE_BR 0x080020
#define PANEL_PLANE_BR_BOTTOM_SHIFT 16
-#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
-#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
+#define PANEL_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define PANEL_PLANE_BR_RIGHT_MASK 0x7ff
#define PANEL_HORIZONTAL_TOTAL 0x080024
#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
actual_pages = get_user_pages(task, task->mm,
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
- (type == PAGELIST_READ) /*Write */ ,
- 0 /*Force */ ,
+ (type == PAGELIST_READ) ? FOLL_WRITE : 0,
pages,
NULL /*vmas */);
up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
current->mm, /* mm */
(unsigned long)virt_addr, /* start */
num_pages, /* len */
- 0, /* write */
- 0, /* force */
+ 0, /* gup_flags */
pages, /* pages (array of page pointers) */
NULL); /* vmas */
up_read(&current->mm->mmap_sem);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f5613e9467..6ab7443eabde 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
- kref_get(&se_cmd->cmd_kref);
+ if (ack_kref) {
+ if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+ return -EINVAL;
+
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd;
+ struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
int rc;
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
rc = kref_get_unless_zero(&se_cmd->cmd_kref);
if (rc) {
se_cmd->cmd_wait_set = 1;
spin_lock(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
- }
+ } else
+ list_del_init(&se_cmd->se_cmd_list);
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 62bf4fe5704a..47562509b489 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
size_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
- /* Offset of data ring from start of mb */
+ /* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
/*
* We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
*
* Called with ring lock held.
*/
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true;
}
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
- if ((command_size > (udev->cmdr_size / 2))
- || data_length > udev->data_size)
- pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, data_length,
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+ "cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
+ spin_unlock_irq(&udev->cmdr_lock);
+ return TCM_INVALID_CDB_FIELD;
+ }
while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
- return -ETIMEDOUT;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /*
- * Fix up iovecs, and handle if allocation in data ring wrapped.
- */
+ /* Handle allocating space from the data area */
iov = &entry->req.iov[0];
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mod_timer(&udev->timeout,
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0;
+ return TCM_NO_SENSE;
}
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret < 0) {
+ if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
spin_lock_irq(&udev->commands_lock);
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
/*
* cmd has been completed already from timeout, just reclaim
- * data ring space and free cmd
+ * data area space and free cmd
*/
free_data_area(udev, cmd);
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
}
static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
- int ret = tcmu_queue_cmd(se_cmd);
-
- if (ret != 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- else
- return TCM_NO_SENSE;
-}
-
-static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
- return passthrough_parse_cdb(cmd, tcmu_pass_op);
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..ff5de9a96643 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
- pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
kfree(sess);
- return NULL;
+ sess = ERR_PTR(rc);
}
return sess;
}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
- if (!sess)
- return FC_SPP_RESP_RES;
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2d702ca6556f..a13541bdc726 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -186,7 +186,7 @@ config HISI_THERMAL
config IMX_THERMAL
tristate "Temperature sensor driver for Freescale i.MX SoCs"
- depends on CPU_THERMAL
+ depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST
depends on MFD_SYSCON
depends on OF
help
@@ -195,6 +195,26 @@ config IMX_THERMAL
cpufreq is used as the cooling device to throttle CPUs when the
passive trip is crossed.
+config MAX77620_THERMAL
+ tristate "Temperature sensor driver for Maxim MAX77620 PMIC"
+ depends on MFD_MAX77620
+ depends on OF
+ help
+ Support for die junction temperature warning alarm for Maxim
+ Semiconductor PMIC MAX77620 device. Device generates two alarm
+ interrupts when PMIC die temperature cross the threshold of
+ 120 degC and 140 degC.
+
+config QORIQ_THERMAL
+ tristate "QorIQ Thermal Monitoring Unit"
+ depends on THERMAL_OF
+ depends on HAS_IOMEM
+ help
+ Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms.
+ It supports one critical trip point and one passive trip point. The
+ cpufreq is used as the cooling device to throttle CPUs when the
+ passive trip is crossed.
+
config SPEAR_THERMAL
tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -332,6 +352,16 @@ menu "ACPI INT340X thermal drivers"
source drivers/thermal/int340x_thermal/Kconfig
endmenu
+config INTEL_BXT_PMIC_THERMAL
+ tristate "Intel Broxton PMIC thermal driver"
+ depends on X86 && INTEL_SOC_PMIC && REGMAP
+ help
+ Select this driver for Intel Broxton PMIC with ADC channels monitoring
+ system temperature measurements and alerts.
+ This driver is used for monitoring the ADC channels of PMIC and handles
+ the alert trip point interrupts and notifies the thermal framework with
+ the trip point and temperature details of the zone.
+
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
depends on X86 && PCI
@@ -399,4 +429,9 @@ config GENERIC_ADC_THERMAL
to this driver. This driver reports the temperature by reading ADC
channel and converts it to temperature based on lookup table.
+menu "Qualcomm thermal drivers"
+depends on (ARCH_QCOM && OF) || COMPILE_TEST
+source "drivers/thermal/qcom/Kconfig"
+endmenu
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 10b07c14f8a9..c92eb22a41ff 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -37,6 +37,8 @@ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
+obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o
+obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
@@ -45,8 +47,10 @@ obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
+obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
+obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index a32b41783b77..9ce0e9eef923 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -74,7 +74,7 @@ struct power_table {
* cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
* @node: list_head to link all cpufreq_cooling_device together.
- * @last_load: load measured by the latest call to cpufreq_get_actual_power()
+ * @last_load: load measured by the latest call to cpufreq_get_requested_power()
* @time_in_idle: previous reading of the absolute time that this cpu was idle
* @time_in_idle_timestamp: wall time of the last invocation of
* get_cpu_idle_time_us()
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 652acd8fbe48..e776cea80cfc 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -306,7 +306,7 @@ static void db8500_thermal_work(struct work_struct *work)
if (cur_mode == THERMAL_DEVICE_DISABLED)
return;
- thermal_zone_device_update(pzone->therm_dev);
+ thermal_zone_device_update(pzone->therm_dev, THERMAL_EVENT_UNSPECIFIED);
dev_dbg(&pzone->therm_dev->device, "thermal work finished.\n");
}
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 01f0015f80dc..81631b110e17 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -312,7 +312,7 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
unsigned long freq;
u32 static_power;
- if (state < 0 || state >= dfc->freq_table_size)
+ if (state >= dfc->freq_table_size)
return -EINVAL;
freq = dfc->freq_table[state];
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index bb118a152cbb..fc5e5057f0de 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -65,7 +65,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
if (instance->target == 0 && tz->temperature >= trip_temp)
instance->target = 1;
else if (instance->target == 1 &&
- tz->temperature < trip_temp - trip_hyst)
+ tz->temperature <= trip_temp - trip_hyst)
instance->target = 0;
dev_dbg(&instance->cdev->device, "target=%d\n",
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 97fad8f51e1c..f6429666a1cf 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -237,7 +237,8 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
if (!data->sensors[i].tzd)
continue;
- thermal_zone_device_update(data->sensors[i].tzd);
+ thermal_zone_device_update(data->sensors[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
}
return IRQ_HANDLED;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index e473548b5d28..06912f0602b7 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -246,7 +246,7 @@ static int imx_set_mode(struct thermal_zone_device *tz,
}
data->mode = mode;
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return 0;
}
@@ -457,7 +457,7 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
dev_dbg(&data->tz->device, "THERMAL ALARM: T > %d\n",
data->alarm_temp / 1000);
- thermal_zone_device_update(data->tz);
+ thermal_zone_device_update(data->tz, THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index 69df3d960303..8e90b3151a42 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -35,7 +35,8 @@ static void int3402_notify(acpi_handle handle, u32 event, void *data)
case INT3402_PERF_CHANGED_EVENT:
break;
case INT3402_THERMAL_EVENT:
- int340x_thermal_zone_device_update(priv->int340x_zone);
+ int340x_thermal_zone_device_update(priv->int340x_zone,
+ THERMAL_TRIP_VIOLATED);
break;
default:
break;
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 50a7a08e3a15..c4890c9437eb 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -25,6 +25,7 @@
#define INT3403_TYPE_CHARGER 0x0B
#define INT3403_TYPE_BATTERY 0x0C
#define INT3403_PERF_CHANGED_EVENT 0x80
+#define INT3403_PERF_TRIP_POINT_CHANGED 0x81
#define INT3403_THERMAL_EVENT 0x90
/* Preserved structure for future expandbility */
@@ -72,7 +73,13 @@ static void int3403_notify(acpi_handle handle,
case INT3403_PERF_CHANGED_EVENT:
break;
case INT3403_THERMAL_EVENT:
- int340x_thermal_zone_device_update(obj->int340x_zone);
+ int340x_thermal_zone_device_update(obj->int340x_zone,
+ THERMAL_TRIP_VIOLATED);
+ break;
+ case INT3403_PERF_TRIP_POINT_CHANGED:
+ int340x_thermal_read_trips(obj->int340x_zone);
+ int340x_thermal_zone_device_update(obj->int340x_zone,
+ THERMAL_TRIP_CHANGED);
break;
default:
dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
index b9b2666aa94c..145a5c53ff5c 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
@@ -177,6 +177,42 @@ static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
return 0;
}
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+{
+ int trip_cnt = int34x_zone->aux_trip_nr;
+ int i;
+
+ int34x_zone->crt_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
+ &int34x_zone->crt_temp))
+ int34x_zone->crt_trip_id = trip_cnt++;
+
+ int34x_zone->hot_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_HOT",
+ &int34x_zone->hot_temp))
+ int34x_zone->hot_trip_id = trip_cnt++;
+
+ int34x_zone->psv_trip_id = -1;
+ if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_PSV",
+ &int34x_zone->psv_temp))
+ int34x_zone->psv_trip_id = trip_cnt++;
+
+ for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+ char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+
+ if (int340x_thermal_get_trip_config(int34x_zone->adev->handle,
+ name,
+ &int34x_zone->act_trips[i].temp))
+ break;
+
+ int34x_zone->act_trips[i].id = trip_cnt++;
+ int34x_zone->act_trips[i].valid = true;
+ }
+
+ return trip_cnt;
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
+
static struct thermal_zone_params int340x_thermal_params = {
.governor_name = "user_space",
.no_hwmon = true,
@@ -188,7 +224,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
struct int34x_thermal_zone *int34x_thermal_zone;
acpi_status status;
unsigned long long trip_cnt;
- int trip_mask = 0, i;
+ int trip_mask = 0;
int ret;
int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
@@ -214,28 +250,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
int34x_thermal_zone->aux_trip_nr = trip_cnt;
}
- int34x_thermal_zone->crt_trip_id = -1;
- if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
- &int34x_thermal_zone->crt_temp))
- int34x_thermal_zone->crt_trip_id = trip_cnt++;
- int34x_thermal_zone->hot_trip_id = -1;
- if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
- &int34x_thermal_zone->hot_temp))
- int34x_thermal_zone->hot_trip_id = trip_cnt++;
- int34x_thermal_zone->psv_trip_id = -1;
- if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
- &int34x_thermal_zone->psv_temp))
- int34x_thermal_zone->psv_trip_id = trip_cnt++;
- for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
- char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+ trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
- if (int340x_thermal_get_trip_config(adev->handle, name,
- &int34x_thermal_zone->act_trips[i].temp))
- break;
-
- int34x_thermal_zone->act_trips[i].id = trip_cnt++;
- int34x_thermal_zone->act_trips[i].valid = true;
- }
int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
adev->handle);
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
index aaadf724ff2e..5f3ba4775c5c 100644
--- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
@@ -46,6 +46,7 @@ struct int34x_thermal_zone {
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
struct thermal_zone_device_ops *override_ops);
void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
+int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone);
static inline void int340x_thermal_zone_set_priv_data(
struct int34x_thermal_zone *tzone, void *priv_data)
@@ -60,9 +61,10 @@ static inline void *int340x_thermal_zone_get_priv_data(
}
static inline void int340x_thermal_zone_device_update(
- struct int34x_thermal_zone *tzone)
+ struct int34x_thermal_zone *tzone,
+ enum thermal_notify_event event)
{
- thermal_zone_device_update(tzone->zone);
+ thermal_zone_device_update(tzone->zone, event);
}
#endif
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 42c1ac057bad..ff3b36f339e3 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -258,7 +258,8 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
switch (event) {
case PROC_POWER_CAPABILITY_CHANGED:
proc_thermal_read_ppcc(proc_priv);
- int340x_thermal_zone_device_update(proc_priv->int340x_zone);
+ int340x_thermal_zone_device_update(proc_priv->int340x_zone,
+ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
break;
default:
dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c
new file mode 100644
index 000000000000..0f19a393ddd8
--- /dev/null
+++ b/drivers/thermal/intel_bxt_pmic_thermal.c
@@ -0,0 +1,300 @@
+/*
+ * Intel Broxton PMIC thermal driver
+ *
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define BXTWC_THRM0IRQ 0x4E04
+#define BXTWC_THRM1IRQ 0x4E05
+#define BXTWC_THRM2IRQ 0x4E06
+#define BXTWC_MTHRM0IRQ 0x4E12
+#define BXTWC_MTHRM1IRQ 0x4E13
+#define BXTWC_MTHRM2IRQ 0x4E14
+#define BXTWC_STHRM0IRQ 0x4F19
+#define BXTWC_STHRM1IRQ 0x4F1A
+#define BXTWC_STHRM2IRQ 0x4F1B
+
+struct trip_config_map {
+ u16 irq_reg;
+ u16 irq_en;
+ u16 evt_stat;
+ u8 irq_mask;
+ u8 irq_en_mask;
+ u8 evt_mask;
+ u8 trip_num;
+};
+
+struct thermal_irq_map {
+ char handle[20];
+ int num_trips;
+ const struct trip_config_map *trip_config;
+};
+
+struct pmic_thermal_data {
+ const struct thermal_irq_map *maps;
+ int num_maps;
+};
+
+static const struct trip_config_map bxtwc_str0_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x01,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x01,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x01,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x10,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x10,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x10,
+ .trip_num = 1
+ }
+};
+
+static const struct trip_config_map bxtwc_str1_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x02,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x02,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x02,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x20,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x20,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x20,
+ .trip_num = 1
+ },
+};
+
+static const struct trip_config_map bxtwc_str2_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x04,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x04,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x04,
+ .trip_num = 0
+ },
+ {
+ .irq_reg = BXTWC_THRM0IRQ,
+ .irq_mask = 0x40,
+ .irq_en = BXTWC_MTHRM0IRQ,
+ .irq_en_mask = 0x40,
+ .evt_stat = BXTWC_STHRM0IRQ,
+ .evt_mask = 0x40,
+ .trip_num = 1
+ },
+};
+
+static const struct trip_config_map bxtwc_str3_trip_config[] = {
+ {
+ .irq_reg = BXTWC_THRM2IRQ,
+ .irq_mask = 0x10,
+ .irq_en = BXTWC_MTHRM2IRQ,
+ .irq_en_mask = 0x10,
+ .evt_stat = BXTWC_STHRM2IRQ,
+ .evt_mask = 0x10,
+ .trip_num = 0
+ },
+};
+
+static const struct thermal_irq_map bxtwc_thermal_irq_map[] = {
+ {
+ .handle = "STR0",
+ .trip_config = bxtwc_str0_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str0_trip_config),
+ },
+ {
+ .handle = "STR1",
+ .trip_config = bxtwc_str1_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str1_trip_config),
+ },
+ {
+ .handle = "STR2",
+ .trip_config = bxtwc_str2_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str2_trip_config),
+ },
+ {
+ .handle = "STR3",
+ .trip_config = bxtwc_str3_trip_config,
+ .num_trips = ARRAY_SIZE(bxtwc_str3_trip_config),
+ },
+};
+
+static const struct pmic_thermal_data bxtwc_thermal_data = {
+ .maps = bxtwc_thermal_irq_map,
+ .num_maps = ARRAY_SIZE(bxtwc_thermal_irq_map),
+};
+
+static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct thermal_zone_device *tzd;
+ struct pmic_thermal_data *td;
+ struct intel_soc_pmic *pmic;
+ struct regmap *regmap;
+ u8 reg_val, mask, irq_stat, trip;
+ u16 reg, evt_stat_reg;
+ int i, j, ret;
+
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ regmap = pmic->regmap;
+ td = (struct pmic_thermal_data *)
+ platform_get_device_id(pdev)->driver_data;
+
+ /* Resolve thermal irqs */
+ for (i = 0; i < td->num_maps; i++) {
+ for (j = 0; j < td->maps[i].num_trips; j++) {
+ reg = td->maps[i].trip_config[j].irq_reg;
+ mask = td->maps[i].trip_config[j].irq_mask;
+ /*
+ * Read the irq register to resolve whether the
+ * interrupt was triggered for this sensor
+ */
+ if (regmap_read(regmap, reg, &ret))
+ return IRQ_HANDLED;
+
+ reg_val = (u8)ret;
+ irq_stat = ((u8)ret & mask);
+
+ if (!irq_stat)
+ continue;
+
+ /*
+ * Read the status register to find out what
+ * event occurred i.e a high or a low
+ */
+ evt_stat_reg = td->maps[i].trip_config[j].evt_stat;
+ if (regmap_read(regmap, evt_stat_reg, &ret))
+ return IRQ_HANDLED;
+
+ trip = td->maps[i].trip_config[j].trip_num;
+ tzd = thermal_zone_get_zone_by_name(td->maps[i].handle);
+ if (!IS_ERR(tzd))
+ thermal_zone_device_update(tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+
+ /* Clear the appropriate irq */
+ regmap_write(regmap, reg, reg_val & mask);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pmic_thermal_probe(struct platform_device *pdev)
+{
+ struct regmap_irq_chip_data *regmap_irq_chip;
+ struct pmic_thermal_data *thermal_data;
+ int ret, irq, virq, i, j, pmic_irq_count;
+ struct intel_soc_pmic *pmic;
+ struct regmap *regmap;
+ struct device *dev;
+ u16 reg;
+ u8 mask;
+
+ dev = &pdev->dev;
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic) {
+ dev_err(dev, "Failed to get struct intel_soc_pmic pointer\n");
+ return -ENODEV;
+ }
+
+ thermal_data = (struct pmic_thermal_data *)
+ platform_get_device_id(pdev)->driver_data;
+ if (!thermal_data) {
+ dev_err(dev, "No thermal data initialized!!\n");
+ return -ENODEV;
+ }
+
+ regmap = pmic->regmap;
+ regmap_irq_chip = pmic->irq_chip_data_level2;
+
+ pmic_irq_count = 0;
+ while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) {
+ virq = regmap_irq_get_virq(regmap_irq_chip, irq);
+ if (virq < 0) {
+ dev_err(dev, "failed to get virq by irq %d\n", irq);
+ return virq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, virq,
+ NULL, pmic_thermal_irq_handler,
+ IRQF_ONESHOT, "pmic_thermal", pdev);
+
+ if (ret) {
+ dev_err(dev, "request irq(%d) failed: %d\n", virq, ret);
+ return ret;
+ }
+ pmic_irq_count++;
+ }
+
+ /* Enable thermal interrupts */
+ for (i = 0; i < thermal_data->num_maps; i++) {
+ for (j = 0; j < thermal_data->maps[i].num_trips; j++) {
+ reg = thermal_data->maps[i].trip_config[j].irq_en;
+ mask = thermal_data->maps[i].trip_config[j].irq_en_mask;
+ ret = regmap_update_bits(regmap, reg, mask, 0x00);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id pmic_thermal_id_table[] = {
+ {
+ .name = "bxt_wcove_thermal",
+ .driver_data = (kernel_ulong_t)&bxtwc_thermal_data,
+ },
+ {},
+};
+
+static struct platform_driver pmic_thermal_driver = {
+ .probe = pmic_thermal_probe,
+ .driver = {
+ .name = "pmic_thermal",
+ },
+ .id_table = pmic_thermal_id_table,
+};
+
+MODULE_DEVICE_TABLE(platform, pmic_thermal_id_table);
+module_platform_driver(pmic_thermal_driver);
+
+MODULE_AUTHOR("Yegnesh S Iyer <yegnesh.s.iyer@intel.com>");
+MODULE_DESCRIPTION("Intel Broxton PMIC Thermal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 9b4815e81b0d..19bf2028e508 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
@@ -66,9 +69,53 @@ struct pch_thermal_device {
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ int psv_trip_id;
+ unsigned long psv_temp;
bool bios_enabled;
};
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ struct acpi_device *adev;
+
+ ptd->psv_trip_id = -1;
+
+ adev = ACPI_COMPANION(&ptd->pdev->dev);
+ if (adev) {
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+ &r);
+ if (ACPI_SUCCESS(status)) {
+ unsigned long trip_temp;
+
+ trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ if (trip_temp) {
+ ptd->psv_temp = trip_temp;
+ ptd->psv_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+ }
+ }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ ptd->psv_trip_id = -1;
+
+}
+#endif
+
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
{
u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
++(*nr_trips);
}
+ pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
return 0;
}
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
*type = THERMAL_TRIP_CRITICAL;
else if (ptd->hot_trip_id == trip)
*type = THERMAL_TRIP_HOT;
+ else if (ptd->psv_trip_id == trip)
+ *type = THERMAL_TRIP_PASSIVE;
else
return -EINVAL;
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
*temp = ptd->crt_temp;
else if (ptd->hot_trip_id == trip)
*temp = ptd->hot_temp;
+ else if (ptd->psv_trip_id == trip)
+ *temp = ptd->psv_temp;
else
return -EINVAL;
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
ptd->ops = &pch_dev_ops_wpt;
dev_name = "pch_skylake";
break;
+ case PCH_THERMAL_DID_HSW_1:
+ case PCH_THERMAL_DID_HSW_2:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_haswell";
+ break;
default:
dev_err(&pdev->dev, "unknown pch thermal device\n");
return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afcfd2..afada655f861 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,17 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
static int __init powerclamp_probe(void)
{
+
if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("Intel powerclamp does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/thermal/intel_soc_dts_iosf.c b/drivers/thermal/intel_soc_dts_iosf.c
index f72e1db3216f..e0813dfaa278 100644
--- a/drivers/thermal/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel_soc_dts_iosf.c
@@ -391,7 +391,8 @@ void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
pr_debug("TZD update for zone %d\n", i);
- thermal_zone_device_update(sensors->soc_dts[i].tzone);
+ thermal_zone_device_update(sensors->soc_dts[i].tzone,
+ THERMAL_EVENT_UNSPECIFIED);
}
} else
spin_unlock_irqrestore(&sensors->intr_notify_lock, flags);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
new file mode 100644
index 000000000000..83905ff46e40
--- /dev/null
+++ b/drivers/thermal/max77620_thermal.c
@@ -0,0 +1,166 @@
+/*
+ * Junction temperature thermal driver for Maxim Max77620.
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ * Mallikarjun Kasoju <mkasoju@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define MAX77620_NORMAL_OPERATING_TEMP 100000
+#define MAX77620_TJALARM1_TEMP 120000
+#define MAX77620_TJALARM2_TEMP 140000
+
+struct max77620_therm_info {
+ struct device *dev;
+ struct regmap *rmap;
+ struct thermal_zone_device *tz_device;
+ int irq_tjalarm1;
+ int irq_tjalarm2;
+};
+
+/**
+ * max77620_thermal_read_temp: Read PMIC die temperatue.
+ * @data: Device specific data.
+ * temp: Temperature in millidegrees Celsius
+ *
+ * The actual temperature of PMIC die is not available from PMIC.
+ * PMIC only tells the status if it has crossed or not the threshold level
+ * of 120degC or 140degC.
+ * If threshold has not been crossed then assume die temperature as 100degC
+ * else 120degC or 140deG based on the PMIC die temp threshold status.
+ *
+ * Return 0 on success otherwise error number to show reason of failure.
+ */
+
+static int max77620_thermal_read_temp(void *data, int *temp)
+{
+ struct max77620_therm_info *mtherm = data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(mtherm->rmap, MAX77620_REG_STATLBT, &val);
+ if (ret < 0) {
+ dev_err(mtherm->dev, "Failed to read STATLBT: %d\n", ret);
+ return ret;
+ }
+
+ if (val & MAX77620_IRQ_TJALRM2_MASK)
+ *temp = MAX77620_TJALARM2_TEMP;
+ else if (val & MAX77620_IRQ_TJALRM1_MASK)
+ *temp = MAX77620_TJALARM1_TEMP;
+ else
+ *temp = MAX77620_NORMAL_OPERATING_TEMP;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops max77620_thermal_ops = {
+ .get_temp = max77620_thermal_read_temp,
+};
+
+static irqreturn_t max77620_thermal_irq(int irq, void *data)
+{
+ struct max77620_therm_info *mtherm = data;
+
+ if (irq == mtherm->irq_tjalarm1)
+ dev_warn(mtherm->dev, "Junction Temp Alarm1(120C) occurred\n");
+ else if (irq == mtherm->irq_tjalarm2)
+ dev_crit(mtherm->dev, "Junction Temp Alarm2(140C) occurred\n");
+
+ thermal_zone_device_update(mtherm->tz_device,
+ THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int max77620_thermal_probe(struct platform_device *pdev)
+{
+ struct max77620_therm_info *mtherm;
+ int ret;
+
+ mtherm = devm_kzalloc(&pdev->dev, sizeof(*mtherm), GFP_KERNEL);
+ if (!mtherm)
+ return -ENOMEM;
+
+ mtherm->irq_tjalarm1 = platform_get_irq(pdev, 0);
+ mtherm->irq_tjalarm2 = platform_get_irq(pdev, 1);
+ if ((mtherm->irq_tjalarm1 < 0) || (mtherm->irq_tjalarm2 < 0)) {
+ dev_err(&pdev->dev, "Alarm irq number not available\n");
+ return -EINVAL;
+ }
+
+ pdev->dev.of_node = pdev->dev.parent->of_node;
+
+ mtherm->dev = &pdev->dev;
+ mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!mtherm->rmap) {
+ dev_err(&pdev->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ mtherm, &max77620_thermal_ops);
+ if (IS_ERR(mtherm->tz_device)) {
+ ret = PTR_ERR(mtherm->tz_device);
+ dev_err(&pdev->dev, "Failed to register thermal zone: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm1, NULL,
+ max77620_thermal_irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(&pdev->dev), mtherm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request irq1: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, mtherm->irq_tjalarm2, NULL,
+ max77620_thermal_irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(&pdev->dev), mtherm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request irq2: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mtherm);
+
+ return 0;
+}
+
+static struct platform_device_id max77620_thermal_devtype[] = {
+ { .name = "max77620-thermal", },
+ {},
+};
+
+static struct platform_driver max77620_thermal_driver = {
+ .driver = {
+ .name = "max77620-thermal",
+ },
+ .probe = max77620_thermal_probe,
+ .id_table = max77620_thermal_devtype,
+};
+
+module_platform_driver(max77620_thermal_driver);
+
+MODULE_DESCRIPTION("Max77620 Junction temperature Thermal driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 262ab0a2266f..34169c32d495 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -2,6 +2,7 @@
* Copyright (c) 2015 MediaTek Inc.
* Author: Hanyi Wu <hanyi.wu@mediatek.com>
* Sascha Hauer <s.hauer@pengutronix.de>
+ * Dawei Chien <dawei.chien@mediatek.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,6 +22,7 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -88,6 +90,7 @@
#define TEMP_ADCVALIDMASK_VALID_HIGH BIT(5)
#define TEMP_ADCVALIDMASK_VALID_POS(bit) (bit)
+/* MT8173 thermal sensors */
#define MT8173_TS1 0
#define MT8173_TS2 1
#define MT8173_TS3 2
@@ -106,7 +109,12 @@
/* The number of sensing points per bank */
#define MT8173_NUM_SENSORS_PER_ZONE 4
-/* Layout of the fuses providing the calibration data */
+/*
+ * Layout of the fuses providing the calibration data
+ * These macros could be used for both MT8173 and MT2701.
+ * MT8173 has five sensors and need five VTS calibration data,
+ * and MT2701 has three sensors and need three VTS calibration data.
+ */
#define MT8173_CALIB_BUF0_VALID BIT(0)
#define MT8173_CALIB_BUF1_ADC_GE(x) (((x) >> 22) & 0x3ff)
#define MT8173_CALIB_BUF0_VTS_TS1(x) (((x) >> 17) & 0x1ff)
@@ -117,24 +125,50 @@
#define MT8173_CALIB_BUF0_DEGC_CALI(x) (((x) >> 1) & 0x3f)
#define MT8173_CALIB_BUF0_O_SLOPE(x) (((x) >> 26) & 0x3f)
+/* MT2701 thermal sensors */
+#define MT2701_TS1 0
+#define MT2701_TS2 1
+#define MT2701_TSABB 2
+
+/* AUXADC channel 11 is used for the temperature sensors */
+#define MT2701_TEMP_AUXADC_CHANNEL 11
+
+/* The total number of temperature sensors in the MT2701 */
+#define MT2701_NUM_SENSORS 3
+
#define THERMAL_NAME "mtk-thermal"
+/* The number of sensing points per bank */
+#define MT2701_NUM_SENSORS_PER_ZONE 3
+
struct mtk_thermal;
+struct thermal_bank_cfg {
+ unsigned int num_sensors;
+ const int *sensors;
+};
+
struct mtk_thermal_bank {
struct mtk_thermal *mt;
int id;
};
+struct mtk_thermal_data {
+ s32 num_banks;
+ s32 num_sensors;
+ s32 auxadc_channel;
+ const int *sensor_mux_values;
+ const int *msr;
+ const int *adcpnp;
+ struct thermal_bank_cfg bank_data[];
+};
+
struct mtk_thermal {
struct device *dev;
void __iomem *thermal_base;
struct clk *clk_peri_therm;
struct clk *clk_auxadc;
-
- struct mtk_thermal_bank banks[MT8173_NUM_ZONES];
-
/* lock: for getting and putting banks */
struct mutex lock;
@@ -144,16 +178,44 @@ struct mtk_thermal {
s32 o_slope;
s32 vts[MT8173_NUM_SENSORS];
+ const struct mtk_thermal_data *conf;
+ struct mtk_thermal_bank banks[];
};
-struct mtk_thermal_bank_cfg {
- unsigned int num_sensors;
- unsigned int sensors[MT8173_NUM_SENSORS_PER_ZONE];
+/* MT8173 thermal sensor data */
+const int mt8173_bank_data[MT8173_NUM_ZONES][3] = {
+ { MT8173_TS2, MT8173_TS3 },
+ { MT8173_TS2, MT8173_TS4 },
+ { MT8173_TS1, MT8173_TS2, MT8173_TSABB },
+ { MT8173_TS2 },
};
-static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = {
+ TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2
+};
-/*
+const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = {
+ TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2, TEMP_ADCPNP3
+};
+
+const int mt8173_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+
+/* MT2701 thermal sensor data */
+const int mt2701_bank_data[MT2701_NUM_SENSORS] = {
+ MT2701_TS1, MT2701_TS2, MT2701_TSABB
+};
+
+const int mt2701_msr[MT2701_NUM_SENSORS_PER_ZONE] = {
+ TEMP_MSR0, TEMP_MSR1, TEMP_MSR2
+};
+
+const int mt2701_adcpnp[MT2701_NUM_SENSORS_PER_ZONE] = {
+ TEMP_ADCPNP0, TEMP_ADCPNP1, TEMP_ADCPNP2
+};
+
+const int mt2701_mux_values[MT2701_NUM_SENSORS] = { 0, 1, 16 };
+
+/**
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
* temperature sensors. We use each bank to measure a certain area of the
@@ -166,42 +228,53 @@ static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
* data, and this indeed needs the temperatures of the individual banks
* for making better decisions.
*/
-static const struct mtk_thermal_bank_cfg bank_data[] = {
- {
- .num_sensors = 2,
- .sensors = { MT8173_TS2, MT8173_TS3 },
- }, {
- .num_sensors = 2,
- .sensors = { MT8173_TS2, MT8173_TS4 },
- }, {
- .num_sensors = 3,
- .sensors = { MT8173_TS1, MT8173_TS2, MT8173_TSABB },
- }, {
- .num_sensors = 1,
- .sensors = { MT8173_TS2 },
+static const struct mtk_thermal_data mt8173_thermal_data = {
+ .auxadc_channel = MT8173_TEMP_AUXADC_CHANNEL,
+ .num_banks = MT8173_NUM_ZONES,
+ .num_sensors = MT8173_NUM_SENSORS,
+ .bank_data = {
+ {
+ .num_sensors = 2,
+ .sensors = mt8173_bank_data[0],
+ }, {
+ .num_sensors = 2,
+ .sensors = mt8173_bank_data[1],
+ }, {
+ .num_sensors = 3,
+ .sensors = mt8173_bank_data[2],
+ }, {
+ .num_sensors = 1,
+ .sensors = mt8173_bank_data[3],
+ },
},
+ .msr = mt8173_msr,
+ .adcpnp = mt8173_adcpnp,
+ .sensor_mux_values = mt8173_mux_values,
};
-struct mtk_thermal_sense_point {
- int msr;
- int adcpnp;
-};
-
-static const struct mtk_thermal_sense_point
- sensing_points[MT8173_NUM_SENSORS_PER_ZONE] = {
- {
- .msr = TEMP_MSR0,
- .adcpnp = TEMP_ADCPNP0,
- }, {
- .msr = TEMP_MSR1,
- .adcpnp = TEMP_ADCPNP1,
- }, {
- .msr = TEMP_MSR2,
- .adcpnp = TEMP_ADCPNP2,
- }, {
- .msr = TEMP_MSR3,
- .adcpnp = TEMP_ADCPNP3,
+/**
+ * The MT2701 thermal controller has one bank, which can read up to
+ * three temperature sensors simultaneously. The MT2701 has a total of 3
+ * temperature sensors.
+ *
+ * The thermal core only gets the maximum temperature of this one bank,
+ * so the bank concept wouldn't be necessary here. However, the SVS (Smart
+ * Voltage Scaling) unit makes its decisions based on the same bank
+ * data.
+ */
+static const struct mtk_thermal_data mt2701_thermal_data = {
+ .auxadc_channel = MT2701_TEMP_AUXADC_CHANNEL,
+ .num_banks = 1,
+ .num_sensors = MT2701_NUM_SENSORS,
+ .bank_data = {
+ {
+ .num_sensors = 3,
+ .sensors = mt2701_bank_data,
+ },
},
+ .msr = mt2701_msr,
+ .adcpnp = mt2701_adcpnp,
+ .sensor_mux_values = mt2701_mux_values,
};
/**
@@ -270,13 +343,16 @@ static void mtk_thermal_put_bank(struct mtk_thermal_bank *bank)
static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
{
struct mtk_thermal *mt = bank->mt;
+ const struct mtk_thermal_data *conf = mt->conf;
int i, temp = INT_MIN, max = INT_MIN;
u32 raw;
- for (i = 0; i < bank_data[bank->id].num_sensors; i++) {
- raw = readl(mt->thermal_base + sensing_points[i].msr);
+ for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
+ raw = readl(mt->thermal_base + conf->msr[i]);
- temp = raw_to_mcelsius(mt, bank_data[bank->id].sensors[i], raw);
+ temp = raw_to_mcelsius(mt,
+ conf->bank_data[bank->id].sensors[i],
+ raw);
/*
* The first read of a sensor often contains very high bogus
@@ -299,7 +375,7 @@ static int mtk_read_temp(void *data, int *temperature)
int i;
int tempmax = INT_MIN;
- for (i = 0; i < MT8173_NUM_ZONES; i++) {
+ for (i = 0; i < mt->conf->num_banks; i++) {
struct mtk_thermal_bank *bank = &mt->banks[i];
mtk_thermal_get_bank(bank);
@@ -322,7 +398,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
u32 apmixed_phys_base, u32 auxadc_phys_base)
{
struct mtk_thermal_bank *bank = &mt->banks[num];
- const struct mtk_thermal_bank_cfg *cfg = &bank_data[num];
+ const struct mtk_thermal_data *conf = mt->conf;
int i;
bank->id = num;
@@ -368,7 +444,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
* this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0)
* automatically by hw
*/
- writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCMUX);
+ writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCMUX);
/* AHB address for auxadc mux selection */
writel(auxadc_phys_base + AUXADC_CON1_CLR_V,
@@ -379,18 +455,18 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
mt->thermal_base + TEMP_PNPMUXADDR);
/* AHB value for auxadc enable */
- writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCEN);
+ writel(BIT(conf->auxadc_channel), mt->thermal_base + TEMP_ADCEN);
/* AHB address for auxadc enable (channel 0 immediate mode selected) */
writel(auxadc_phys_base + AUXADC_CON1_SET_V,
mt->thermal_base + TEMP_ADCENADDR);
/* AHB address for auxadc valid bit */
- writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+ writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
mt->thermal_base + TEMP_ADCVALIDADDR);
/* AHB address for auxadc voltage output */
- writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+ writel(auxadc_phys_base + AUXADC_DATA(conf->auxadc_channel),
mt->thermal_base + TEMP_ADCVOLTADDR);
/* read valid & voltage are at the same register */
@@ -407,11 +483,12 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
mt->thermal_base + TEMP_ADCWRITECTRL);
- for (i = 0; i < cfg->num_sensors; i++)
- writel(sensor_mux_values[cfg->sensors[i]],
- mt->thermal_base + sensing_points[i].adcpnp);
+ for (i = 0; i < conf->bank_data[num].num_sensors; i++)
+ writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
+ mt->thermal_base + conf->adcpnp[i]);
- writel((1 << cfg->num_sensors) - 1, mt->thermal_base + TEMP_MONCTL0);
+ writel((1 << conf->bank_data[num].num_sensors) - 1,
+ mt->thermal_base + TEMP_MONCTL0);
writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE |
TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
@@ -442,7 +519,7 @@ static int mtk_thermal_get_calibration_data(struct device *dev,
/* Start with default values */
mt->adc_ge = 512;
- for (i = 0; i < MT8173_NUM_SENSORS; i++)
+ for (i = 0; i < mt->conf->num_sensors; i++)
mt->vts[i] = 260;
mt->degc_cali = 40;
mt->o_slope = 0;
@@ -486,18 +563,37 @@ out:
return ret;
}
+static const struct of_device_id mtk_thermal_of_match[] = {
+ {
+ .compatible = "mediatek,mt8173-thermal",
+ .data = (void *)&mt8173_thermal_data,
+ },
+ {
+ .compatible = "mediatek,mt2701-thermal",
+ .data = (void *)&mt2701_thermal_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, mtk_thermal_of_match);
+
static int mtk_thermal_probe(struct platform_device *pdev)
{
int ret, i;
struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
struct mtk_thermal *mt;
struct resource *res;
+ const struct of_device_id *of_id;
u64 auxadc_phys_base, apmixed_phys_base;
+ struct thermal_zone_device *tzdev;
mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL);
if (!mt)
return -ENOMEM;
+ of_id = of_match_device(mtk_thermal_of_match, &pdev->dev);
+ if (of_id)
+ mt->conf = (const struct mtk_thermal_data *)of_id->data;
+
mt->clk_peri_therm = devm_clk_get(&pdev->dev, "therm");
if (IS_ERR(mt->clk_peri_therm))
return PTR_ERR(mt->clk_peri_therm);
@@ -565,17 +661,23 @@ static int mtk_thermal_probe(struct platform_device *pdev)
goto err_disable_clk_auxadc;
}
- for (i = 0; i < MT8173_NUM_ZONES; i++)
+ for (i = 0; i < mt->conf->num_banks; i++)
mtk_thermal_init_bank(mt, i, apmixed_phys_base,
auxadc_phys_base);
platform_set_drvdata(pdev, mt);
- devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
- &mtk_thermal_ops);
+ tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
+ if (IS_ERR(tzdev)) {
+ ret = PTR_ERR(tzdev);
+ goto err_disable_clk_peri_therm;
+ }
return 0;
+err_disable_clk_peri_therm:
+ clk_disable_unprepare(mt->clk_peri_therm);
err_disable_clk_auxadc:
clk_disable_unprepare(mt->clk_auxadc);
@@ -592,13 +694,6 @@ static int mtk_thermal_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mtk_thermal_of_match[] = {
- {
- .compatible = "mediatek,mt8173-thermal",
- }, {
- },
-};
-
static struct platform_driver mtk_thermal_driver = {
.probe = mtk_thermal_probe,
.remove = mtk_thermal_remove,
@@ -610,6 +705,7 @@ static struct platform_driver mtk_thermal_driver = {
module_platform_driver(mtk_thermal_driver);
+MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
MODULE_DESCRIPTION("Mediatek thermal driver");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index b8e509c60848..d04ec3b9e5ff 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -101,6 +101,17 @@ static int of_thermal_get_temp(struct thermal_zone_device *tz,
return data->ops->get_temp(data->sensor_data, temp);
}
+static int of_thermal_set_trips(struct thermal_zone_device *tz,
+ int low, int high)
+{
+ struct __thermal_zone *data = tz->devdata;
+
+ if (!data->ops || !data->ops->set_trips)
+ return -EINVAL;
+
+ return data->ops->set_trips(data->sensor_data, low, high);
+}
+
/**
* of_thermal_get_ntrips - function to export number of available trip
* points.
@@ -181,9 +192,6 @@ static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
{
struct __thermal_zone *data = tz->devdata;
- if (!data->ops || !data->ops->set_emul_temp)
- return -EINVAL;
-
return data->ops->set_emul_temp(data->sensor_data, temp);
}
@@ -191,25 +199,11 @@ static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
enum thermal_trend *trend)
{
struct __thermal_zone *data = tz->devdata;
- long dev_trend;
- int r;
if (!data->ops->get_trend)
return -EINVAL;
- r = data->ops->get_trend(data->sensor_data, &dev_trend);
- if (r)
- return r;
-
- /* TODO: These intervals might have some thresholds, but in core code */
- if (dev_trend > 0)
- *trend = THERMAL_TREND_RAISING;
- else if (dev_trend < 0)
- *trend = THERMAL_TREND_DROPPING;
- else
- *trend = THERMAL_TREND_STABLE;
-
- return 0;
+ return data->ops->get_trend(data->sensor_data, trip, trend);
}
static int of_thermal_bind(struct thermal_zone_device *thermal,
@@ -292,7 +286,7 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
mutex_unlock(&tz->lock);
data->mode = mode;
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return 0;
}
@@ -427,7 +421,17 @@ thermal_zone_of_add_sensor(struct device_node *zone,
tzd->ops->get_temp = of_thermal_get_temp;
tzd->ops->get_trend = of_thermal_get_trend;
- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+
+ /*
+ * The thermal zone core will calculate the window if they have set the
+ * optional set_trips pointer.
+ */
+ if (ops->set_trips)
+ tzd->ops->set_trips = of_thermal_set_trips;
+
+ if (ops->set_emul_temp)
+ tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
+
mutex_unlock(&tzd->lock);
return tzd;
@@ -596,7 +600,7 @@ static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res,
* Return: On success returns a valid struct thermal_zone_device,
* otherwise, it returns a corresponding ERR_PTR(). Caller must
* check the return value with help of IS_ERR() helper.
- * Registered hermal_zone_device device will automatically be
+ * Registered thermal_zone_device device will automatically be
* released when device is unbounded.
*/
struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index f8a3c60bef94..819c6d5d7aa7 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -150,7 +150,7 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
{
struct qpnp_tm_chip *chip = data;
- thermal_zone_device_update(chip->tz_dev);
+ thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
new file mode 100644
index 000000000000..be32e5abce3c
--- /dev/null
+++ b/drivers/thermal/qcom/Kconfig
@@ -0,0 +1,11 @@
+config QCOM_TSENS
+ tristate "Qualcomm TSENS Temperature Alarm"
+ depends on THERMAL
+ depends on QCOM_QFPROM
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ This enables the thermal sysfs driver for the TSENS device. It shows
+ up in Sysfs as a thermal zone with multiple trip points. Disabling the
+ thermal zone device via the mode file results in disabling the sensor.
+ Also able to set threshold temperature for both hot and cold and update
+ when a threshold is reached.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
new file mode 100644
index 000000000000..2cc2193637e7
--- /dev/null
+++ b/drivers/thermal/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
+qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
new file mode 100644
index 000000000000..fdf561b8b81d
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8916 */
+#define BASE0_MASK 0x0000007f
+#define BASE1_MASK 0xfe000000
+#define BASE0_SHIFT 0
+#define BASE1_SHIFT 25
+
+#define S0_P1_MASK 0x00000f80
+#define S1_P1_MASK 0x003e0000
+#define S2_P1_MASK 0xf8000000
+#define S3_P1_MASK 0x000003e0
+#define S4_P1_MASK 0x000f8000
+
+#define S0_P2_MASK 0x0001f000
+#define S1_P2_MASK 0x07c00000
+#define S2_P2_MASK 0x0000001f
+#define S3_P2_MASK 0x00007c00
+#define S4_P2_MASK 0x01f00000
+
+#define S0_P1_SHIFT 7
+#define S1_P1_SHIFT 17
+#define S2_P1_SHIFT 27
+#define S3_P1_SHIFT 5
+#define S4_P1_SHIFT 15
+
+#define S0_P2_SHIFT 12
+#define S1_P2_SHIFT 22
+#define S2_P2_SHIFT 0
+#define S3_P2_SHIFT 10
+#define S4_P2_SHIFT 20
+
+#define CAL_SEL_MASK 0xe0000000
+#define CAL_SEL_SHIFT 29
+
+static int calibrate_8916(struct tsens_device *tmdev)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+static const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
+
+const struct tsens_data data_8916 = {
+ .num_sensors = 5,
+ .ops = &ops_8916,
+ .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
+};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
new file mode 100644
index 000000000000..0451277d3a8f
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+#define CAL_MDEGC 30000
+
+#define CONFIG_ADDR 0x3640
+#define CONFIG_ADDR_8660 0x3620
+/* CONFIG_ADDR bitmasks */
+#define CONFIG 0x9b
+#define CONFIG_MASK 0xf
+#define CONFIG_8660 1
+#define CONFIG_SHIFT_8660 28
+#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660)
+
+#define STATUS_CNTL_ADDR_8064 0x3660
+#define CNTL_ADDR 0x3620
+/* CNTL_ADDR bitmasks */
+#define EN BIT(0)
+#define SW_RST BIT(1)
+#define SENSOR0_EN BIT(3)
+#define SLP_CLK_ENA BIT(26)
+#define SLP_CLK_ENA_8660 BIT(24)
+#define MEASURE_PERIOD 1
+#define SENSOR0_SHIFT 3
+
+/* INT_STATUS_ADDR bitmasks */
+#define MIN_STATUS_MASK BIT(0)
+#define LOWER_STATUS_CLR BIT(1)
+#define UPPER_STATUS_CLR BIT(2)
+#define MAX_STATUS_MASK BIT(3)
+
+#define THRESHOLD_ADDR 0x3624
+/* THRESHOLD_ADDR bitmasks */
+#define THRESHOLD_MAX_LIMIT_SHIFT 24
+#define THRESHOLD_MIN_LIMIT_SHIFT 16
+#define THRESHOLD_UPPER_LIMIT_SHIFT 8
+#define THRESHOLD_LOWER_LIMIT_SHIFT 0
+
+/* Initial temperature threshold values */
+#define LOWER_LIMIT_TH 0x50
+#define UPPER_LIMIT_TH 0xdf
+#define MIN_LIMIT_TH 0x0
+#define MAX_LIMIT_TH 0xff
+
+#define S0_STATUS_ADDR 0x3628
+#define INT_STATUS_ADDR 0x363c
+#define TRDY_MASK BIT(7)
+#define TIMEOUT_US 100
+
+static int suspend_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ unsigned int mask;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ mask = SLP_CLK_ENA | EN;
+ else
+ mask = SLP_CLK_ENA_8660 | EN;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, mask, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int resume_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
+ if (ret)
+ return ret;
+
+ /*
+ * Separate CONFIG restore is not needed only for 8660 as
+ * config is part of CTRL Addr and its restored as such
+ */
+ if (tmdev->num_sensors > 1) {
+ ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int enable_8960(struct tsens_device *tmdev, int id)
+{
+ int ret;
+ u32 reg, mask;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ if (ret)
+ return ret;
+
+ mask = BIT(id + SENSOR0_SHIFT);
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ reg |= mask | SLP_CLK_ENA | EN;
+ else
+ reg |= mask | SLP_CLK_ENA_8660 | EN;
+
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void disable_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ u32 reg_cntl;
+ u32 mask;
+
+ mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask <<= SENSOR0_SHIFT;
+ mask |= EN;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ if (ret)
+ return;
+
+ reg_cntl &= ~mask;
+
+ if (tmdev->num_sensors > 1)
+ reg_cntl &= ~SLP_CLK_ENA;
+ else
+ reg_cntl &= ~SLP_CLK_ENA_8660;
+
+ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+}
+
+static int init_8960(struct tsens_device *tmdev)
+{
+ int ret, i;
+ u32 reg_cntl;
+
+ tmdev->map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->map)
+ return -ENODEV;
+
+ /*
+ * The status registers for each sensor are discontiguous
+ * because some SoCs have 5 sensors while others have more
+ * but the control registers stay in the same place, i.e
+ * directly after the first 5 status registers.
+ */
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (i >= 5)
+ tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
+ tmdev->sensor[i].status += i * 4;
+ }
+
+ reg_cntl = SW_RST;
+ ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1) {
+ reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
+ reg_cntl &= ~SW_RST;
+ ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ CONFIG_MASK, CONFIG);
+ } else {
+ reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
+ reg_cntl &= ~CONFIG_MASK_8660;
+ reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
+ }
+
+ reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ reg_cntl |= EN;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int calibrate_8960(struct tsens_device *tmdev)
+{
+ int i;
+ char *data;
+
+ ssize_t num_read = tmdev->num_sensors;
+ struct tsens_sensor *s = tmdev->sensor;
+
+ data = qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(data))
+ data = qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ for (i = 0; i < num_read; i++, s++)
+ s->offset = data[i];
+
+ return 0;
+}
+
+/* Temperature on y axis and ADC-code on x-axis */
+static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
+{
+ int slope, offset;
+
+ slope = thermal_zone_get_slope(s->tzd);
+ offset = CAL_MDEGC - slope * s->offset;
+
+ return adc_code * slope + offset;
+}
+
+static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+{
+ int ret;
+ u32 code, trdy;
+ const struct tsens_sensor *s = &tmdev->sensor[id];
+ unsigned long timeout;
+
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ if (ret)
+ return ret;
+ if (!(trdy & TRDY_MASK))
+ continue;
+ ret = regmap_read(tmdev->map, s->status, &code);
+ if (ret)
+ return ret;
+ *temp = code_to_mdegC(code, s);
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static const struct tsens_ops ops_8960 = {
+ .init = init_8960,
+ .calibrate = calibrate_8960,
+ .get_temp = get_temp_8960,
+ .enable = enable_8960,
+ .disable = disable_8960,
+ .suspend = suspend_8960,
+ .resume = resume_8960,
+};
+
+const struct tsens_data data_8960 = {
+ .num_sensors = 11,
+ .ops = &ops_8960,
+};
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
new file mode 100644
index 000000000000..9baf77e8cbe3
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8974 */
+#define BASE1_MASK 0xff
+#define S0_P1_MASK 0x3f00
+#define S1_P1_MASK 0xfc000
+#define S2_P1_MASK 0x3f00000
+#define S3_P1_MASK 0xfc000000
+#define S4_P1_MASK 0x3f
+#define S5_P1_MASK 0xfc0
+#define S6_P1_MASK 0x3f000
+#define S7_P1_MASK 0xfc0000
+#define S8_P1_MASK 0x3f000000
+#define S8_P1_MASK_BKP 0x3f
+#define S9_P1_MASK 0x3f
+#define S9_P1_MASK_BKP 0xfc0
+#define S10_P1_MASK 0xfc0
+#define S10_P1_MASK_BKP 0x3f000
+#define CAL_SEL_0_1 0xc0000000
+#define CAL_SEL_2 0x40000000
+#define CAL_SEL_SHIFT 30
+#define CAL_SEL_SHIFT_2 28
+
+#define S0_P1_SHIFT 8
+#define S1_P1_SHIFT 14
+#define S2_P1_SHIFT 20
+#define S3_P1_SHIFT 26
+#define S5_P1_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S7_P1_SHIFT 18
+#define S8_P1_SHIFT 24
+#define S9_P1_BKP_SHIFT 6
+#define S10_P1_SHIFT 6
+#define S10_P1_BKP_SHIFT 12
+
+#define BASE2_SHIFT 12
+#define BASE2_BKP_SHIFT 18
+#define S0_P2_SHIFT 20
+#define S0_P2_BKP_SHIFT 26
+#define S1_P2_SHIFT 26
+#define S2_P2_BKP_SHIFT 6
+#define S3_P2_SHIFT 6
+#define S3_P2_BKP_SHIFT 12
+#define S4_P2_SHIFT 12
+#define S4_P2_BKP_SHIFT 18
+#define S5_P2_SHIFT 18
+#define S5_P2_BKP_SHIFT 24
+#define S6_P2_SHIFT 24
+#define S7_P2_BKP_SHIFT 6
+#define S8_P2_SHIFT 6
+#define S8_P2_BKP_SHIFT 12
+#define S9_P2_SHIFT 12
+#define S9_P2_BKP_SHIFT 18
+#define S10_P2_SHIFT 18
+#define S10_P2_BKP_SHIFT 24
+
+#define BASE2_MASK 0xff000
+#define BASE2_BKP_MASK 0xfc0000
+#define S0_P2_MASK 0x3f00000
+#define S0_P2_BKP_MASK 0xfc000000
+#define S1_P2_MASK 0xfc000000
+#define S1_P2_BKP_MASK 0x3f
+#define S2_P2_MASK 0x3f
+#define S2_P2_BKP_MASK 0xfc0
+#define S3_P2_MASK 0xfc0
+#define S3_P2_BKP_MASK 0x3f000
+#define S4_P2_MASK 0x3f000
+#define S4_P2_BKP_MASK 0xfc0000
+#define S5_P2_MASK 0xfc0000
+#define S5_P2_BKP_MASK 0x3f000000
+#define S6_P2_MASK 0x3f000000
+#define S6_P2_BKP_MASK 0x3f
+#define S7_P2_MASK 0x3f
+#define S7_P2_BKP_MASK 0xfc0
+#define S8_P2_MASK 0xfc0
+#define S8_P2_BKP_MASK 0x3f000
+#define S9_P2_MASK 0x3f000
+#define S9_P2_BKP_MASK 0xfc0000
+#define S10_P2_MASK 0xfc0000
+#define S10_P2_BKP_MASK 0x3f000000
+
+#define BKP_SEL 0x3
+#define BKP_REDUN_SEL 0xe0000000
+#define BKP_REDUN_SHIFT 29
+
+#define BIT_APPEND 0x3
+
+static int calibrate_8974(struct tsens_device *tmdev)
+{
+ int base1 = 0, base2 = 0, i;
+ u32 p1[11], p2[11];
+ int mode = 0;
+ u32 *calib, *bkp;
+ u32 calib_redun_sel;
+
+ calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(calib))
+ return PTR_ERR(calib);
+
+ bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(bkp))
+ return PTR_ERR(bkp);
+
+ calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
+ calib_redun_sel >>= BKP_REDUN_SHIFT;
+
+ if (calib_redun_sel == BKP_SEL) {
+ mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT;
+ p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT;
+ p2[1] = (bkp[3] & S1_P2_BKP_MASK);
+ p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT;
+ p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT;
+ p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT;
+ p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT;
+ p2[6] = (calib[5] & S6_P2_BKP_MASK);
+ p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT;
+ p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
+ p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
+ p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = bkp[0] & BASE1_MASK;
+ p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (bkp[1] & S4_P1_MASK);
+ p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT;
+ p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT;
+ p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT;
+ break;
+ }
+ } else {
+ mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT;
+ p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (calib[3] & S2_P2_MASK);
+ p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT;
+ p2[7] = (calib[4] & S7_P2_MASK);
+ p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
+ p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = calib[0] & BASE1_MASK;
+ p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (calib[1] & S4_P1_MASK);
+ p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (calib[2] & S9_P1_MASK);
+ p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT;
+ break;
+ }
+ }
+
+ switch (mode) {
+ case ONE_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] += (base1 << 2) | BIT_APPEND;
+ break;
+ case TWO_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p2[i] += base2;
+ p2[i] <<= 2;
+ p2[i] |= BIT_APPEND;
+ }
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] += base1;
+ p1[i] <<= 2;
+ p1[i] |= BIT_APPEND;
+ }
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = 780;
+ p1[0] = 502;
+ p1[1] = 509;
+ p1[2] = 503;
+ p1[3] = 509;
+ p1[4] = 505;
+ p1[5] = 509;
+ p1[6] = 507;
+ p1[7] = 510;
+ p1[8] = 508;
+ p1[9] = 509;
+ p1[10] = 508;
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+static const struct tsens_ops ops_8974 = {
+ .init = init_common,
+ .calibrate = calibrate_8974,
+ .get_temp = get_temp_common,
+};
+
+const struct tsens_data data_8974 = {
+ .num_sensors = 11,
+ .ops = &ops_8974,
+};
diff --git a/drivers/thermal/qcom/tsens-8996.c b/drivers/thermal/qcom/tsens-8996.c
new file mode 100644
index 000000000000..e1f77818d8fa
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8996.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define STATUS_OFFSET 0x10a0
+#define LAST_TEMP_MASK 0xfff
+#define STATUS_VALID_BIT BIT(21)
+#define CODE_SIGN_BIT BIT(11)
+
+static int get_temp_8996(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int sensor_addr;
+ int last_temp = 0, last_temp2 = 0, last_temp3 = 0, ret;
+
+ sensor_addr = STATUS_OFFSET + s->hw_id * 4;
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ last_temp = code & LAST_TEMP_MASK;
+ if (code & STATUS_VALID_BIT)
+ goto done;
+
+ /* Try a second time */
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp2 = code & LAST_TEMP_MASK;
+ }
+
+ /* Try a third/last time */
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ if (code & STATUS_VALID_BIT) {
+ last_temp = code & LAST_TEMP_MASK;
+ goto done;
+ } else {
+ last_temp3 = code & LAST_TEMP_MASK;
+ }
+
+ if (last_temp == last_temp2)
+ last_temp = last_temp2;
+ else if (last_temp2 == last_temp3)
+ last_temp = last_temp3;
+done:
+ /* Code sign bit is the sign extension for a negative value */
+ if (last_temp & CODE_SIGN_BIT)
+ last_temp |= ~CODE_SIGN_BIT;
+
+ /* Temperatures are in deciCelicius */
+ *temp = last_temp * 100;
+
+ return 0;
+}
+
+static const struct tsens_ops ops_8996 = {
+ .init = init_common,
+ .get_temp = get_temp_8996,
+};
+
+const struct tsens_data data_8996 = {
+ .num_sensors = 13,
+ .ops = &ops_8996,
+};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
new file mode 100644
index 000000000000..b1449ad67fc0
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define S0_ST_ADDR 0x1030
+#define SN_ADDR_OFFSET 0x4
+#define SN_ST_TEMP_MASK 0x3ff
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+#define SLOPE_DEFAULT 3200
+
+char *qfprom_read(struct device *dev, const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+/*
+ * Use this function on devices where slope and offset calculations
+ * depend on calibration data read from qfprom. On others the slope
+ * and offset values are derived from tz->tzp->slope and tz->tzp->offset
+ * resp.
+ */
+void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+ u32 *p2, u32 mode)
+{
+ int i;
+ int num, den;
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ dev_dbg(tmdev->dev,
+ "sensor%d - data_point1:%#x data_point2:%#x\n",
+ i, p1[i], p2[i]);
+
+ tmdev->sensor[i].slope = SLOPE_DEFAULT;
+ if (mode == TWO_PT_CALIB) {
+ /*
+ * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+ * temp_120_degc - temp_30_degc (x2 - x1)
+ */
+ num = p2[i] - p1[i];
+ num *= SLOPE_FACTOR;
+ den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+ tmdev->sensor[i].slope = num / den;
+ }
+
+ tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ (CAL_DEGC_PT1 *
+ tmdev->sensor[i].slope);
+ dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ }
+}
+
+static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
+{
+ int degc, num, den;
+
+ num = (adc_code * SLOPE_FACTOR) - s->offset;
+ den = s->slope;
+
+ if (num > 0)
+ degc = num + (den / 2);
+ else if (num < 0)
+ degc = num - (den / 2);
+ else
+ degc = num;
+
+ degc /= den;
+
+ return degc;
+}
+
+int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int sensor_addr;
+ int last_temp = 0, ret;
+
+ sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ last_temp = code & SN_ST_TEMP_MASK;
+
+ *temp = code_to_degc(last_temp, s) * 1000;
+
+ return 0;
+}
+
+static const struct regmap_config tsens_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+int __init init_common(struct tsens_device *tmdev)
+{
+ void __iomem *base;
+
+ base = of_iomap(tmdev->dev->of_node, 0);
+ if (!base)
+ return -EINVAL;
+
+ tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
+ if (IS_ERR(tmdev->map)) {
+ iounmap(base);
+ return PTR_ERR(tmdev->map);
+ }
+
+ return 0;
+}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
new file mode 100644
index 000000000000..3f9fe6aa51cc
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+static int tsens_get_temp(void *data, int *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ return tmdev->ops->get_temp(tmdev, s->id, temp);
+}
+
+static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+{
+ const struct tsens_sensor *s = p;
+ struct tsens_device *tmdev = s->tmdev;
+
+ if (tmdev->ops->get_trend)
+ return tmdev->ops->get_trend(tmdev, s->id, trend);
+
+ return -ENOTSUPP;
+}
+
+static int __maybe_unused tsens_suspend(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->suspend)
+ return tmdev->ops->suspend(tmdev);
+
+ return 0;
+}
+
+static int __maybe_unused tsens_resume(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->resume)
+ return tmdev->ops->resume(tmdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
+
+static const struct of_device_id tsens_table[] = {
+ {
+ .compatible = "qcom,msm8916-tsens",
+ .data = &data_8916,
+ }, {
+ .compatible = "qcom,msm8974-tsens",
+ .data = &data_8974,
+ }, {
+ .compatible = "qcom,msm8996-tsens",
+ .data = &data_8996,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static const struct thermal_zone_of_device_ops tsens_of_ops = {
+ .get_temp = tsens_get_temp,
+ .get_trend = tsens_get_trend,
+};
+
+static int tsens_register(struct tsens_device *tmdev)
+{
+ int i;
+ struct thermal_zone_device *tzd;
+ u32 *hw_id, n = tmdev->num_sensors;
+
+ hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
+ if (!hw_id)
+ return -ENOMEM;
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
+ &tmdev->sensor[i],
+ &tsens_of_ops);
+ if (IS_ERR(tzd))
+ continue;
+ tmdev->sensor[i].tzd = tzd;
+ if (tmdev->ops->enable)
+ tmdev->ops->enable(tmdev, i);
+ }
+ return 0;
+}
+
+static int tsens_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct device *dev;
+ struct device_node *np;
+ struct tsens_sensor *s;
+ struct tsens_device *tmdev;
+ const struct tsens_data *data;
+ const struct of_device_id *id;
+
+ if (pdev->dev.of_node)
+ dev = &pdev->dev;
+ else
+ dev = pdev->dev.parent;
+
+ np = dev->of_node;
+
+ id = of_match_node(tsens_table, np);
+ if (id)
+ data = id->data;
+ else
+ data = &data_8960;
+
+ if (data->num_sensors <= 0) {
+ dev_err(dev, "invalid number of sensors\n");
+ return -EINVAL;
+ }
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev) +
+ data->num_sensors * sizeof(*s), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->num_sensors = data->num_sensors;
+ tmdev->ops = data->ops;
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (data->hw_ids)
+ tmdev->sensor[i].hw_id = data->hw_ids[i];
+ else
+ tmdev->sensor[i].hw_id = i;
+ }
+
+ if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+ return -EINVAL;
+
+ ret = tmdev->ops->init(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens init failed\n");
+ return ret;
+ }
+
+ if (tmdev->ops->calibrate) {
+ ret = tmdev->ops->calibrate(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens calibration failed\n");
+ return ret;
+ }
+ }
+
+ ret = tsens_register(tmdev);
+
+ platform_set_drvdata(pdev, tmdev);
+
+ return ret;
+}
+
+static int tsens_remove(struct platform_device *pdev)
+{
+ struct tsens_device *tmdev = platform_get_drvdata(pdev);
+
+ if (tmdev->ops->disable)
+ tmdev->ops->disable(tmdev);
+
+ return 0;
+}
+
+static struct platform_driver tsens_driver = {
+ .probe = tsens_probe,
+ .remove = tsens_remove,
+ .driver = {
+ .name = "qcom-tsens",
+ .pm = &tsens_pm_ops,
+ .of_match_table = tsens_table,
+ },
+};
+module_platform_driver(tsens_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM Temperature Sensor driver");
+MODULE_ALIAS("platform:qcom-tsens");
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
new file mode 100644
index 000000000000..911c1978892b
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#define ONE_PT_CALIB 0x1
+#define ONE_PT_CALIB2 0x2
+#define TWO_PT_CALIB 0x3
+
+#include <linux/thermal.h>
+
+struct tsens_device;
+
+struct tsens_sensor {
+ struct tsens_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int offset;
+ int id;
+ int hw_id;
+ int slope;
+ u32 status;
+};
+
+/**
+ * struct tsens_ops - operations as supported by the tsens device
+ * @init: Function to initialize the tsens device
+ * @calibrate: Function to calibrate the tsens device
+ * @get_temp: Function which returns the temp in millidegC
+ * @enable: Function to enable (clocks/power) tsens device
+ * @disable: Function to disable the tsens device
+ * @suspend: Function to suspend the tsens device
+ * @resume: Function to resume the tsens device
+ * @get_trend: Function to get the thermal/temp trend
+ */
+struct tsens_ops {
+ /* mandatory callbacks */
+ int (*init)(struct tsens_device *);
+ int (*calibrate)(struct tsens_device *);
+ int (*get_temp)(struct tsens_device *, int, int *);
+ /* optional callbacks */
+ int (*enable)(struct tsens_device *, int);
+ void (*disable)(struct tsens_device *);
+ int (*suspend)(struct tsens_device *);
+ int (*resume)(struct tsens_device *);
+ int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
+};
+
+/**
+ * struct tsens_data - tsens instance specific data
+ * @num_sensors: Max number of sensors supported by platform
+ * @ops: operations the tsens instance supports
+ * @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ */
+struct tsens_data {
+ const u32 num_sensors;
+ const struct tsens_ops *ops;
+ unsigned int *hw_ids;
+};
+
+/* Registers to be saved/restored across a context loss */
+struct tsens_context {
+ int threshold;
+ int control;
+};
+
+struct tsens_device {
+ struct device *dev;
+ u32 num_sensors;
+ struct regmap *map;
+ struct regmap_field *status_field;
+ struct tsens_context ctx;
+ bool trdy;
+ const struct tsens_ops *ops;
+ struct tsens_sensor sensor[0];
+};
+
+char *qfprom_read(struct device *, const char *);
+void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
+int init_common(struct tsens_device *);
+int get_temp_common(struct tsens_device *, int, int *);
+
+extern const struct tsens_data data_8916, data_8974, data_8960, data_8996;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
new file mode 100644
index 000000000000..644ba526d9ea
--- /dev/null
+++ b/drivers/thermal/qoriq_thermal.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define SITES_MAX 16
+
+/*
+ * QorIQ TMU Registers
+ */
+struct qoriq_tmu_site_regs {
+ u32 tritsr; /* Immediate Temperature Site Register */
+ u32 tratsr; /* Average Temperature Site Register */
+ u8 res0[0x8];
+};
+
+struct qoriq_tmu_regs {
+ u32 tmr; /* Mode Register */
+#define TMR_DISABLE 0x0
+#define TMR_ME 0x80000000
+#define TMR_ALPF 0x0c000000
+ u32 tsr; /* Status Register */
+ u32 tmtmir; /* Temperature measurement interval Register */
+#define TMTMIR_DEFAULT 0x0000000f
+ u8 res0[0x14];
+ u32 tier; /* Interrupt Enable Register */
+#define TIER_DISABLE 0x0
+ u32 tidr; /* Interrupt Detect Register */
+ u32 tiscr; /* Interrupt Site Capture Register */
+ u32 ticscr; /* Interrupt Critical Site Capture Register */
+ u8 res1[0x10];
+ u32 tmhtcrh; /* High Temperature Capture Register */
+ u32 tmhtcrl; /* Low Temperature Capture Register */
+ u8 res2[0x8];
+ u32 tmhtitr; /* High Temperature Immediate Threshold */
+ u32 tmhtatr; /* High Temperature Average Threshold */
+ u32 tmhtactr; /* High Temperature Average Crit Threshold */
+ u8 res3[0x24];
+ u32 ttcfgr; /* Temperature Configuration Register */
+ u32 tscfgr; /* Sensor Configuration Register */
+ u8 res4[0x78];
+ struct qoriq_tmu_site_regs site[SITES_MAX];
+ u8 res5[0x9f8];
+ u32 ipbrr0; /* IP Block Revision Register 0 */
+ u32 ipbrr1; /* IP Block Revision Register 1 */
+ u8 res6[0x310];
+ u32 ttr0cr; /* Temperature Range 0 Control Register */
+ u32 ttr1cr; /* Temperature Range 1 Control Register */
+ u32 ttr2cr; /* Temperature Range 2 Control Register */
+ u32 ttr3cr; /* Temperature Range 3 Control Register */
+};
+
+/*
+ * Thermal zone data
+ */
+struct qoriq_tmu_data {
+ struct thermal_zone_device *tz;
+ struct qoriq_tmu_regs __iomem *regs;
+ int sensor_id;
+ bool little_endian;
+};
+
+static void tmu_write(struct qoriq_tmu_data *p, u32 val, void __iomem *addr)
+{
+ if (p->little_endian)
+ iowrite32(val, addr);
+ else
+ iowrite32be(val, addr);
+}
+
+static u32 tmu_read(struct qoriq_tmu_data *p, void __iomem *addr)
+{
+ if (p->little_endian)
+ return ioread32(addr);
+ else
+ return ioread32be(addr);
+}
+
+static int tmu_get_temp(void *p, int *temp)
+{
+ u32 val;
+ struct qoriq_tmu_data *data = p;
+
+ val = tmu_read(data, &data->regs->site[data->sensor_id].tritsr);
+ *temp = (val & 0xff) * 1000;
+
+ return 0;
+}
+
+static int qoriq_tmu_get_sensor_id(void)
+{
+ int ret, id;
+ struct of_phandle_args sensor_specs;
+ struct device_node *np, *sensor_np;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return -ENODEV;
+
+ sensor_np = of_get_next_child(np, NULL);
+ ret = of_parse_phandle_with_args(sensor_np, "thermal-sensors",
+ "#thermal-sensor-cells",
+ 0, &sensor_specs);
+ if (ret) {
+ of_node_put(np);
+ of_node_put(sensor_np);
+ return ret;
+ }
+
+ if (sensor_specs.args_count >= 1) {
+ id = sensor_specs.args[0];
+ WARN(sensor_specs.args_count > 1,
+ "%s: too many cells in sensor specifier %d\n",
+ sensor_specs.np->name, sensor_specs.args_count);
+ } else {
+ id = 0;
+ }
+
+ of_node_put(np);
+ of_node_put(sensor_np);
+
+ return id;
+}
+
+static int qoriq_tmu_calibration(struct platform_device *pdev)
+{
+ int i, val, len;
+ u32 range[4];
+ const u32 *calibration;
+ struct device_node *np = pdev->dev.of_node;
+ struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+
+ if (of_property_read_u32_array(np, "fsl,tmu-range", range, 4)) {
+ dev_err(&pdev->dev, "missing calibration range.\n");
+ return -ENODEV;
+ }
+
+ /* Init temperature range registers */
+ tmu_write(data, range[0], &data->regs->ttr0cr);
+ tmu_write(data, range[1], &data->regs->ttr1cr);
+ tmu_write(data, range[2], &data->regs->ttr2cr);
+ tmu_write(data, range[3], &data->regs->ttr3cr);
+
+ calibration = of_get_property(np, "fsl,tmu-calibration", &len);
+ if (calibration == NULL || len % 8) {
+ dev_err(&pdev->dev, "invalid calibration data.\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < len; i += 8, calibration += 2) {
+ val = of_read_number(calibration, 1);
+ tmu_write(data, val, &data->regs->ttcfgr);
+ val = of_read_number(calibration + 1, 1);
+ tmu_write(data, val, &data->regs->tscfgr);
+ }
+
+ return 0;
+}
+
+static void qoriq_tmu_init_device(struct qoriq_tmu_data *data)
+{
+ /* Disable interrupt, using polling instead */
+ tmu_write(data, TIER_DISABLE, &data->regs->tier);
+
+ /* Set update_interval */
+ tmu_write(data, TMTMIR_DEFAULT, &data->regs->tmtmir);
+
+ /* Disable monitoring */
+ tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+}
+
+static struct thermal_zone_of_device_ops tmu_tz_ops = {
+ .get_temp = tmu_get_temp,
+};
+
+static int qoriq_tmu_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct thermal_trip *trip;
+ struct qoriq_tmu_data *data;
+ struct device_node *np = pdev->dev.of_node;
+ u32 site = 0;
+
+ if (!np) {
+ dev_err(&pdev->dev, "Device OF-Node is NULL");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->little_endian = of_property_read_bool(np, "little-endian");
+
+ data->sensor_id = qoriq_tmu_get_sensor_id();
+ if (data->sensor_id < 0) {
+ dev_err(&pdev->dev, "Failed to get sensor id\n");
+ ret = -ENODEV;
+ goto err_iomap;
+ }
+
+ data->regs = of_iomap(np, 0);
+ if (!data->regs) {
+ dev_err(&pdev->dev, "Failed to get memory region\n");
+ ret = -ENODEV;
+ goto err_iomap;
+ }
+
+ qoriq_tmu_init_device(data); /* TMU initialization */
+
+ ret = qoriq_tmu_calibration(pdev); /* TMU calibration */
+ if (ret < 0)
+ goto err_tmu;
+
+ data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
+ data, &tmu_tz_ops);
+ if (IS_ERR(data->tz)) {
+ ret = PTR_ERR(data->tz);
+ dev_err(&pdev->dev,
+ "Failed to register thermal zone device %d\n", ret);
+ goto err_tmu;
+ }
+
+ trip = of_thermal_get_trip_points(data->tz);
+
+ /* Enable monitoring */
+ site |= 0x1 << (15 - data->sensor_id);
+ tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
+
+ return 0;
+
+err_tmu:
+ iounmap(data->regs);
+
+err_iomap:
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int qoriq_tmu_remove(struct platform_device *pdev)
+{
+ struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
+
+ thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
+
+ /* Disable monitoring */
+ tmu_write(data, TMR_DISABLE, &data->regs->tmr);
+
+ iounmap(data->regs);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qoriq_tmu_suspend(struct device *dev)
+{
+ u32 tmr;
+ struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+
+ /* Disable monitoring */
+ tmr = tmu_read(data, &data->regs->tmr);
+ tmr &= ~TMR_ME;
+ tmu_write(data, tmr, &data->regs->tmr);
+
+ return 0;
+}
+
+static int qoriq_tmu_resume(struct device *dev)
+{
+ u32 tmr;
+ struct qoriq_tmu_data *data = dev_get_drvdata(dev);
+
+ /* Enable monitoring */
+ tmr = tmu_read(data, &data->regs->tmr);
+ tmr |= TMR_ME;
+ tmu_write(data, tmr, &data->regs->tmr);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
+ qoriq_tmu_suspend, qoriq_tmu_resume);
+
+static const struct of_device_id qoriq_tmu_match[] = {
+ { .compatible = "fsl,qoriq-tmu", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
+
+static struct platform_driver qoriq_tmu = {
+ .driver = {
+ .name = "qoriq_thermal",
+ .pm = &qoriq_tmu_pm_ops,
+ .of_match_table = qoriq_tmu_match,
+ },
+ .probe = qoriq_tmu_probe,
+ .remove = qoriq_tmu_remove,
+};
+module_platform_driver(qoriq_tmu);
+
+MODULE_AUTHOR("Jia Hongtao <hongtao.jia@nxp.com>");
+MODULE_DESCRIPTION("QorIQ Thermal Monitoring Unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5f817923f374..73e5fee6cf1d 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -31,6 +31,8 @@
#include <linux/spinlock.h>
#include <linux/thermal.h>
+#include "thermal_hwmon.h"
+
#define IDLE_INTERVAL 5000
#define COMMON_STR 0x00
@@ -75,6 +77,8 @@ struct rcar_thermal_priv {
#define rcar_priv_to_dev(priv) ((priv)->common->dev)
#define rcar_has_irq_support(priv) ((priv)->common->base)
#define rcar_id_to_shift(priv) ((priv)->id * 8)
+#define rcar_of_data(dev) ((unsigned long)of_device_get_match_data(dev))
+#define rcar_use_of_thermal(dev) (rcar_of_data(dev) == USE_OF_THERMAL)
#define USE_OF_THERMAL 1
static const struct of_device_id rcar_thermal_dt_ids[] = {
@@ -354,7 +358,8 @@ static void rcar_thermal_work(struct work_struct *work)
return;
if (nctemp != cctemp)
- thermal_zone_device_update(priv->zone);
+ thermal_zone_device_update(priv->zone,
+ THERMAL_EVENT_UNSPECIFIED);
}
static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -415,7 +420,10 @@ static int rcar_thermal_remove(struct platform_device *pdev)
rcar_thermal_for_each_priv(priv, common) {
rcar_thermal_irq_disable(priv);
- thermal_zone_device_unregister(priv->zone);
+ if (rcar_use_of_thermal(dev))
+ thermal_remove_hwmon_sysfs(priv->zone);
+ else
+ thermal_zone_device_unregister(priv->zone);
}
pm_runtime_put(dev);
@@ -430,7 +438,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
struct rcar_thermal_priv *priv;
struct device *dev = &pdev->dev;
struct resource *res, *irq;
- unsigned long of_data = (unsigned long)of_device_get_match_data(dev);
int mres = 0;
int i;
int ret = -ENODEV;
@@ -491,7 +498,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- if (of_data == USE_OF_THERMAL)
+ if (rcar_use_of_thermal(dev))
priv->zone = devm_thermal_zone_of_sensor_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
@@ -508,6 +515,17 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
+ if (rcar_use_of_thermal(dev)) {
+ /*
+ * thermal_zone doesn't enable hwmon as default,
+ * but, enable it here to keep compatible
+ */
+ priv->zone->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(priv->zone);
+ if (ret)
+ goto error_unregister;
+ }
+
rcar_thermal_irq_enable(priv);
list_move_tail(&priv->list, &common->head);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 5d491f16a866..e227a9f0acf7 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -96,6 +96,7 @@ struct chip_tsadc_table {
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
* @get_temp: get the temperature
+ * @set_alarm_temp: set the high temperature interrupt
* @set_tshut_temp: set the hardware-controlled shutdown temperature
* @set_tshut_mode: set the hardware-controlled shutdown mode
* @table: the chip-specific conversion table
@@ -119,6 +120,8 @@ struct rockchip_tsadc_chip {
/* Per-sensor methods */
int (*get_temp)(struct chip_tsadc_table table,
int chn, void __iomem *reg, int *temp);
+ void (*set_alarm_temp)(struct chip_tsadc_table table,
+ int chn, void __iomem *reg, int temp);
void (*set_tshut_temp)(struct chip_tsadc_table table,
int chn, void __iomem *reg, int temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
@@ -183,6 +186,7 @@ struct rockchip_thermal_data {
#define TSADCV2_INT_EN 0x08
#define TSADCV2_INT_PD 0x0c
#define TSADCV2_DATA(chn) (0x20 + (chn) * 0x04)
+#define TSADCV2_COMP_INT(chn) (0x30 + (chn) * 0x04)
#define TSADCV2_COMP_SHUT(chn) (0x40 + (chn) * 0x04)
#define TSADCV2_HIGHT_INT_DEBOUNCE 0x60
#define TSADCV2_HIGHT_TSHUT_DEBOUNCE 0x64
@@ -207,18 +211,21 @@ struct rockchip_thermal_data {
#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
-#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
-#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */
+#define TSADCV2_AUTO_PERIOD_TIME 250 /* 250ms */
+#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* 50ms */
+#define TSADCV3_AUTO_PERIOD_TIME 1875 /* 2.5ms */
+#define TSADCV3_AUTO_PERIOD_HT_TIME 1875 /* 2.5ms */
+
#define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */
#define GRF_SARADC_TESTBIT 0x0e644
#define GRF_TSADC_TESTBIT_L 0x0e648
#define GRF_TSADC_TESTBIT_H 0x0e64c
-#define GRF_TSADC_TSEN_PD_ON (0x30003 << 0)
-#define GRF_TSADC_TSEN_PD_OFF (0x30000 << 0)
#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
+#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
+#define GRF_TSADC_VCM_EN_H (0x10001 << 7)
/**
* struct tsadc_table - code to temperature conversion table
@@ -394,13 +401,17 @@ static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
int temp)
{
int high, low, mid;
+ u32 error = 0;
low = 0;
high = table.length - 1;
mid = (high + low) / 2;
- if (temp < table.id[low].temp || temp > table.id[high].temp)
- return 0;
+ /* Return mask code data when the temp is over table range */
+ if (temp < table.id[low].temp || temp > table.id[high].temp) {
+ error = table.data_mask;
+ goto exit;
+ }
while (low <= high) {
if (temp == table.id[mid].temp)
@@ -412,7 +423,9 @@ static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
mid = (low + high) / 2;
}
- return 0;
+exit:
+ pr_err("Invalid the conversion, error=%d\n", error);
+ return error;
}
static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
@@ -543,14 +556,34 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
/* Set interleave value to workround ic time sync issue */
writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs +
TSADCV2_USER_CON);
+
+ writel_relaxed(TSADCV2_AUTO_PERIOD_TIME,
+ regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
} else {
- regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON);
- mdelay(10);
- regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF);
+ /* Enable the voltage common mode feature */
+ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_VCM_EN_L);
+ regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_VCM_EN_H);
+
usleep_range(15, 100); /* The spec note says at least 15 us */
regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON);
regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON);
usleep_range(90, 200); /* The spec note says at least 90 us */
+
+ writel_relaxed(TSADCV3_AUTO_PERIOD_TIME,
+ regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV3_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
}
if (tshut_polarity == TSHUT_HIGH_ACTIVE)
@@ -559,14 +592,6 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
else
writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
regs + TSADCV2_AUTO_CON);
-
- writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
- writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
- regs + TSADCV2_HIGHT_INT_DEBOUNCE);
- writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
- regs + TSADCV2_AUTO_PERIOD_HT);
- writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
- regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
}
static void rk_tsadcv2_irq_ack(void __iomem *regs)
@@ -628,12 +653,34 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
return rk_tsadcv2_code_to_temp(table, val, temp);
}
+static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
+ int chn, void __iomem *regs, int temp)
+{
+ u32 alarm_value, int_en;
+
+ /* Make sure the value is valid */
+ alarm_value = rk_tsadcv2_temp_to_code(table, temp);
+ if (alarm_value == table.data_mask)
+ return;
+
+ writel_relaxed(alarm_value & table.data_mask,
+ regs + TSADCV2_COMP_INT(chn));
+
+ int_en = readl_relaxed(regs + TSADCV2_INT_EN);
+ int_en |= TSADCV2_INT_SRC_EN(chn);
+ writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+}
+
static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
int chn, void __iomem *regs, int temp)
{
u32 tshut_value, val;
+ /* Make sure the value is valid */
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
+ if (tshut_value == table.data_mask)
+ return;
+
writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
/* TSHUT will be valid */
@@ -670,6 +717,7 @@ static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
@@ -694,6 +742,7 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
.irq_ack = rk_tsadcv2_irq_ack,
.control = rk_tsadcv2_control,
.get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
@@ -718,6 +767,7 @@ static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
@@ -742,6 +792,7 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
.irq_ack = rk_tsadcv2_irq_ack,
.control = rk_tsadcv2_control,
.get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
@@ -766,6 +817,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
@@ -821,11 +873,27 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
thermal->chip->irq_ack(thermal->regs);
for (i = 0; i < thermal->chip->chn_num; i++)
- thermal_zone_device_update(thermal->sensors[i].tzd);
+ thermal_zone_device_update(thermal->sensors[i].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
+static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
+{
+ struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_data *thermal = sensor->thermal;
+ const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+
+ dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
+ __func__, sensor->id, low, high);
+
+ tsadc->set_alarm_temp(tsadc->table,
+ sensor->id, thermal->regs, high);
+
+ return 0;
+}
+
static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
{
struct rockchip_thermal_sensor *sensor = _sensor;
@@ -843,6 +911,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
.get_temp = rockchip_thermal_get_temp,
+ .set_trips = rockchip_thermal_set_trips,
};
static int rockchip_configure_from_dt(struct device *dev,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index f3ce94ec73b5..ad1186dd6132 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -225,7 +225,7 @@ static void exynos_report_trigger(struct exynos_tmu_data *p)
return;
}
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
mutex_lock(&tz->lock);
/* Find the level for which trip happened */
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index fc0c9e198710..91d42319de27 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -42,7 +42,8 @@ static irqreturn_t st_mmap_thermal_trip_handler(int irq, void *sdata)
{
struct st_thermal_sensor *sensor = sdata;
- thermal_zone_device_update(sensor->thermal_dev);
+ thermal_zone_device_update(sensor->thermal_dev,
+ THERMAL_EVENT_UNSPECIFIED);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
index 70e0d9f406e9..201304aeafeb 100644
--- a/drivers/thermal/tango_thermal.c
+++ b/drivers/thermal/tango_thermal.c
@@ -64,6 +64,12 @@ static const struct thermal_zone_of_device_ops ops = {
.get_temp = tango_get_temp,
};
+static void tango_thermal_init(struct tango_thermal_priv *priv)
+{
+ writel(0, priv->base + TEMPSI_CFG);
+ writel(CMD_ON, priv->base + TEMPSI_CMD);
+}
+
static int tango_thermal_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -79,14 +85,22 @@ static int tango_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ platform_set_drvdata(pdev, priv);
priv->thresh_idx = IDX_MIN;
- writel(0, priv->base + TEMPSI_CFG);
- writel(CMD_ON, priv->base + TEMPSI_CMD);
+ tango_thermal_init(priv);
tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
return PTR_ERR_OR_ZERO(tzdev);
}
+static int __maybe_unused tango_thermal_resume(struct device *dev)
+{
+ tango_thermal_init(dev_get_drvdata(dev));
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tango_thermal_pm, NULL, tango_thermal_resume);
+
static const struct of_device_id tango_sensor_ids[] = {
{
.compatible = "sigma,smp8758-thermal",
@@ -99,6 +113,7 @@ static struct platform_driver tango_thermal_driver = {
.driver = {
.name = "tango-thermal",
.of_match_table = tango_sensor_ids,
+ .pm = &tango_thermal_pm,
},
};
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index b8651726201e..7d2db23d71a3 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -30,6 +30,7 @@
#include <dt-bindings/thermal/tegra124-soctherm.h>
+#include "../thermal_core.h"
#include "soctherm.h"
#define SENSOR_CONFIG0 0
@@ -67,35 +68,228 @@
#define READBACK_ADD_HALF BIT(7)
#define READBACK_NEGATE BIT(0)
+/*
+ * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h
+ * because it will be used by tegraxxx_soctherm.c
+ */
+#define THERMCTL_LVL0_CPU0_EN_MASK BIT(8)
+#define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5)
+#define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1
+#define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2
+#define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3)
+#define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1
+#define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2
+#define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2)
+#define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3
+
+#define THERMCTL_LVL0_UP_STATS 0x10
+#define THERMCTL_LVL0_DN_STATS 0x14
+
+#define THERMCTL_STATS_CTL 0x94
+#define STATS_CTL_CLR_DN 0x8
+#define STATS_CTL_EN_DN 0x4
+#define STATS_CTL_CLR_UP 0x2
+#define STATS_CTL_EN_UP 0x1
+
+#define THROT_GLOBAL_CFG 0x400
+#define THROT_GLOBAL_ENB_MASK BIT(0)
+
+#define CPU_PSKIP_STATUS 0x418
+#define XPU_PSKIP_STATUS_M_MASK (0xff << 12)
+#define XPU_PSKIP_STATUS_N_MASK (0xff << 4)
+#define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1)
+#define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0)
+
+#define THROT_PRIORITY_LOCK 0x424
+#define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff
+
+#define THROT_STATUS 0x428
+#define THROT_STATUS_BREACH_MASK BIT(12)
+#define THROT_STATUS_STATE_MASK (0xff << 4)
+#define THROT_STATUS_ENABLED_MASK BIT(0)
+
+#define THROT_PSKIP_CTRL_LITE_CPU 0x430
+#define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31)
+#define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
+#define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
+#define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16)
+#define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8)
+#define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7
+
+#define THROT_VECT_NONE 0x0 /* 3'b000 */
+#define THROT_VECT_LOW 0x1 /* 3'b001 */
+#define THROT_VECT_MED 0x3 /* 3'b011 */
+#define THROT_VECT_HIGH 0x7 /* 3'b111 */
+
+#define THROT_PSKIP_RAMP_LITE_CPU 0x434
+#define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
+#define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
+#define THROT_PSKIP_RAMP_STEP_MASK 0xff
+
+#define THROT_PRIORITY_LITE 0x444
+#define THROT_PRIORITY_LITE_PRIO_MASK 0xff
+
+#define THROT_DELAY_LITE 0x448
+#define THROT_DELAY_LITE_DELAY_MASK 0xff
+
+/* car register offsets needed for enabling HW throttling */
+#define CAR_SUPER_CCLKG_DIVIDER 0x36c
+#define CDIVG_USE_THERM_CONTROLS_MASK BIT(30)
+
+/* ccroc register offsets needed for enabling HW throttling for Tegra132 */
+#define CCROC_SUPER_CCLKG_DIVIDER 0x024
+
+#define CCROC_GLOBAL_CFG 0x148
+
+#define CCROC_THROT_PSKIP_RAMP_CPU 0x150
+#define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31)
+#define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8)
+#define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff
+
+#define CCROC_THROT_PSKIP_CTRL_CPU 0x154
+#define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31)
+#define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8)
+#define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff
+
/* get val from register(r) mask bits(m) */
#define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1))
/* set val(v) to mask bits(m) of register(r) */
#define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \
(((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
+/* get dividend from the depth */
+#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
+
+/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
+#define THROT_OFFSET 0x30
+#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
+ (THROT_OFFSET * throt) + (8 * dev))
+#define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \
+ (THROT_OFFSET * throt) + (8 * dev))
+
+/* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */
+#define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \
+ (THROT_OFFSET * throt))
+#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
+ (THROT_OFFSET * throt))
+
+/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
+#define CCROC_THROT_OFFSET 0x0c
+#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
+ (CCROC_THROT_OFFSET * vect))
+#define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \
+ (CCROC_THROT_OFFSET * vect))
+
+/* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */
+#define THERMCTL_LVL_REGS_SIZE 0x20
+#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
+
static const int min_low_temp = -127000;
static const int max_high_temp = 127000;
+enum soctherm_throttle_id {
+ THROTTLE_LIGHT = 0,
+ THROTTLE_HEAVY,
+ THROTTLE_SIZE,
+};
+
+enum soctherm_throttle_dev_id {
+ THROTTLE_DEV_CPU = 0,
+ THROTTLE_DEV_GPU,
+ THROTTLE_DEV_SIZE,
+};
+
+static const char *const throt_names[] = {
+ [THROTTLE_LIGHT] = "light",
+ [THROTTLE_HEAVY] = "heavy",
+};
+
+struct tegra_soctherm;
struct tegra_thermctl_zone {
void __iomem *reg;
struct device *dev;
+ struct tegra_soctherm *ts;
struct thermal_zone_device *tz;
const struct tegra_tsensor_group *sg;
};
+struct soctherm_throt_cfg {
+ const char *name;
+ unsigned int id;
+ u8 priority;
+ u8 cpu_throt_level;
+ u32 cpu_throt_depth;
+ struct thermal_cooling_device *cdev;
+ bool init;
+};
+
struct tegra_soctherm {
struct reset_control *reset;
struct clk *clock_tsensor;
struct clk *clock_soctherm;
void __iomem *regs;
- struct thermal_zone_device **thermctl_tzs;
+ void __iomem *clk_regs;
+ void __iomem *ccroc_regs;
u32 *calib;
+ struct thermal_zone_device **thermctl_tzs;
struct tegra_soctherm_soc *soc;
+ struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
+
struct dentry *debugfs_dir;
};
+/**
+ * clk_writel() - writes a value to a CAR register
+ * @ts: pointer to a struct tegra_soctherm
+ * @v: the value to write
+ * @reg: the register offset
+ *
+ * Writes @v to @reg. No return value.
+ */
+static inline void clk_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
+{
+ writel(value, (ts->clk_regs + reg));
+}
+
+/**
+ * clk_readl() - reads specified register from CAR IP block
+ * @ts: pointer to a struct tegra_soctherm
+ * @reg: register address to be read
+ *
+ * Return: the value of the register
+ */
+static inline u32 clk_readl(struct tegra_soctherm *ts, u32 reg)
+{
+ return readl(ts->clk_regs + reg);
+}
+
+/**
+ * ccroc_writel() - writes a value to a CCROC register
+ * @ts: pointer to a struct tegra_soctherm
+ * @v: the value to write
+ * @reg: the register offset
+ *
+ * Writes @v to @reg. No return value.
+ */
+static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg)
+{
+ writel(value, (ts->ccroc_regs + reg));
+}
+
+/**
+ * ccroc_readl() - reads specified register from CCROC IP block
+ * @ts: pointer to a struct tegra_soctherm
+ * @reg: register address to be read
+ *
+ * Return: the value of the register
+ */
+static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg)
+{
+ return readl(ts->ccroc_regs + reg);
+}
+
static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
{
const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
@@ -150,11 +344,17 @@ static int tegra_thermctl_get_temp(void *data, int *out_temp)
static int
thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
int trip_temp);
+static int
+throttrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
+ struct soctherm_throt_cfg *stc, int trip_temp);
+static struct soctherm_throt_cfg *
+find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name);
static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
{
struct tegra_thermctl_zone *zone = data;
struct thermal_zone_device *tz = zone->tz;
+ struct tegra_soctherm *ts = zone->ts;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
enum thermal_trip_type type;
@@ -167,10 +367,29 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
if (ret)
return ret;
- if (type != THERMAL_TRIP_CRITICAL)
- return 0;
+ if (type == THERMAL_TRIP_CRITICAL) {
+ return thermtrip_program(dev, sg, temp);
+ } else if (type == THERMAL_TRIP_HOT) {
+ int i;
+
+ for (i = 0; i < THROTTLE_SIZE; i++) {
+ struct thermal_cooling_device *cdev;
+ struct soctherm_throt_cfg *stc;
+
+ if (!ts->throt_cfgs[i].init)
+ continue;
+
+ cdev = ts->throt_cfgs[i].cdev;
+ if (get_thermal_instance(tz, cdev, trip))
+ stc = find_throttle_cfg_by_name(ts, cdev->type);
+ else
+ continue;
+
+ return throttrip_program(dev, sg, stc, temp);
+ }
+ }
- return thermtrip_program(dev, sg, temp);
+ return 0;
}
static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
@@ -238,14 +457,110 @@ static int thermtrip_program(struct device *dev,
}
/**
+ * throttrip_program() - Configures the hardware to throttle the
+ * pulse if a given sensor group reaches a given temperature
+ * @dev: ptr to the struct device for the SOC_THERM IP block
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @stc: pointer to the throttle need to be triggered
+ * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
+ *
+ * Sets the thermal trip threshold and throttle event of the given sensor
+ * group. If this threshold is crossed, the hardware will trigger the
+ * throttle.
+ *
+ * Note that, although @trip_temp is specified in millicelsius, the
+ * hardware is programmed in degrees Celsius.
+ *
+ * Return: 0 upon success, or %-EINVAL upon failure.
+ */
+static int throttrip_program(struct device *dev,
+ const struct tegra_tsensor_group *sg,
+ struct soctherm_throt_cfg *stc,
+ int trip_temp)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int temp, cpu_throt, gpu_throt;
+ unsigned int throt;
+ u32 r, reg_off;
+
+ if (!dev || !sg || !stc || !stc->init)
+ return -EINVAL;
+
+ temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
+
+ /* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */
+ throt = stc->id;
+ reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1);
+
+ if (throt == THROTTLE_LIGHT) {
+ cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT;
+ gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT;
+ } else {
+ cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY;
+ gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY;
+ if (throt != THROTTLE_HEAVY)
+ dev_warn(dev,
+ "invalid throt id %d - assuming HEAVY",
+ throt);
+ }
+
+ r = readl(ts->regs + reg_off);
+ r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp);
+ r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
+ writel(r, ts->regs + reg_off);
+
+ return 0;
+}
+
+static struct soctherm_throt_cfg *
+find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; ts->throt_cfgs[i].name; i++)
+ if (!strcmp(ts->throt_cfgs[i].name, name))
+ return &ts->throt_cfgs[i];
+
+ return NULL;
+}
+
+static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
+{
+ int ntrips, i, ret;
+ enum thermal_trip_type type;
+
+ ntrips = of_thermal_get_ntrips(tz);
+ if (ntrips <= 0)
+ return -EINVAL;
+
+ for (i = 0; i < ntrips; i++) {
+ ret = tz->ops->get_trip_type(tz, i, &type);
+ if (ret)
+ return -EINVAL;
+ if (type == THERMAL_TRIP_HOT) {
+ ret = tz->ops->get_trip_temp(tz, i, temp);
+ if (!ret)
+ *trip = i;
+
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
* tegra_soctherm_set_hwtrips() - set HW trip point from DT data
* @dev: struct device * of the SOC_THERM instance
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
- * trip points , using "critical" type trip_temp from thermal
- * zone.
- * After they have been configured, THERMTRIP will take action
- * when the configured SoC thermal sensor group reaches a
+ * "THROTTLE" trip points , using "critical" or "hot" type trip_temp
+ * from thermal zone.
+ * After they have been configured, THERMTRIP or THROTTLE will take
+ * action when the configured SoC thermal sensor group reaches a
* certain temperature.
*
* Return: 0 upon success, or a negative error code on failure.
@@ -254,19 +569,24 @@ static int thermtrip_program(struct device *dev,
* THERMTRIP has been enabled successfully when a message similar to
* this one appears on the serial console:
* "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
+ * THROTTLE has been enabled successfully when a message similar to
+ * this one appears on the serial console:
+ * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC"
*/
static int tegra_soctherm_set_hwtrips(struct device *dev,
const struct tegra_tsensor_group *sg,
struct thermal_zone_device *tz)
{
- int temperature;
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ struct soctherm_throt_cfg *stc;
+ int i, trip, temperature;
int ret;
ret = tz->ops->get_crit_temp(tz, &temperature);
if (ret) {
dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
sg->name);
- return ret;
+ goto set_throttle;
}
ret = thermtrip_program(dev, sg, temperature);
@@ -280,6 +600,43 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
"thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
+set_throttle:
+ ret = get_hot_temp(tz, &trip, &temperature);
+ if (ret) {
+ dev_warn(dev, "throttrip: %s: missing hot temperature\n",
+ sg->name);
+ return 0;
+ }
+
+ for (i = 0; i < THROTTLE_SIZE; i++) {
+ struct thermal_cooling_device *cdev;
+
+ if (!ts->throt_cfgs[i].init)
+ continue;
+
+ cdev = ts->throt_cfgs[i].cdev;
+ if (get_thermal_instance(tz, cdev, trip))
+ stc = find_throttle_cfg_by_name(ts, cdev->type);
+ else
+ continue;
+
+ ret = throttrip_program(dev, sg, stc, temperature);
+ if (ret) {
+ dev_err(dev, "throttrip: %s: error during enable\n",
+ sg->name);
+ return ret;
+ }
+
+ dev_info(dev,
+ "throttrip: will throttle when %s reaches %d mC\n",
+ sg->name, temperature);
+ break;
+ }
+
+ if (i == THROTTLE_SIZE)
+ dev_warn(dev, "throttrip: %s: missing throttle cdev\n",
+ sg->name);
+
return 0;
}
@@ -291,7 +648,7 @@ static int regs_show(struct seq_file *s, void *data)
const struct tegra_tsensor *tsensors = ts->soc->tsensors;
const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
u32 r, state;
- int i;
+ int i, level;
seq_puts(s, "-----TSENSE (convert HW)-----\n");
@@ -365,6 +722,81 @@ static int regs_show(struct seq_file *s, void *data)
state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
seq_printf(s, " MEM(%d)\n", translate_temp(state));
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ seq_printf(s, "%s:\n", ttgs[i]->name);
+ for (level = 0; level < 4; level++) {
+ s32 v;
+ u32 mask;
+ u16 off = ttgs[i]->thermctl_lvl0_offset;
+
+ r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+
+ mask = ttgs[i]->thermctl_lvl0_up_thresh_mask;
+ state = REG_GET_MASK(r, mask);
+ v = sign_extend32(state, ts->soc->bptt - 1);
+ v *= ts->soc->thresh_grain;
+ seq_printf(s, " %d: Up/Dn(%d /", level, v);
+
+ mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask;
+ state = REG_GET_MASK(r, mask);
+ v = sign_extend32(state, ts->soc->bptt - 1);
+ v *= ts->soc->thresh_grain;
+ seq_printf(s, "%d ) ", v);
+
+ mask = THERMCTL_LVL0_CPU0_EN_MASK;
+ state = REG_GET_MASK(r, mask);
+ seq_printf(s, "En(%d) ", state);
+
+ mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK;
+ state = REG_GET_MASK(r, mask);
+ seq_puts(s, "CPU Throt");
+ if (!state)
+ seq_printf(s, "(%s) ", "none");
+ else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT)
+ seq_printf(s, "(%s) ", "L");
+ else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY)
+ seq_printf(s, "(%s) ", "H");
+ else
+ seq_printf(s, "(%s) ", "H+L");
+
+ mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK;
+ state = REG_GET_MASK(r, mask);
+ seq_puts(s, "GPU Throt");
+ if (!state)
+ seq_printf(s, "(%s) ", "none");
+ else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT)
+ seq_printf(s, "(%s) ", "L");
+ else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY)
+ seq_printf(s, "(%s) ", "H");
+ else
+ seq_printf(s, "(%s) ", "H+L");
+
+ mask = THERMCTL_LVL0_CPU0_STATUS_MASK;
+ state = REG_GET_MASK(r, mask);
+ seq_printf(s, "Status(%s)\n",
+ state == 0 ? "LO" :
+ state == 1 ? "In" :
+ state == 2 ? "Res" : "HI");
+ }
+ }
+
+ r = readl(ts->regs + THERMCTL_STATS_CTL);
+ seq_printf(s, "STATS: Up(%s) Dn(%s)\n",
+ r & STATS_CTL_EN_UP ? "En" : "--",
+ r & STATS_CTL_EN_DN ? "En" : "--");
+
+ for (level = 0; level < 4; level++) {
+ u16 off;
+
+ off = THERMCTL_LVL0_UP_STATS;
+ r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+ seq_printf(s, " Level_%d Up(%d) ", level, r);
+
+ off = THERMCTL_LVL0_DN_STATS;
+ r = readl(ts->regs + THERMCTL_LVL_REG(off, level));
+ seq_printf(s, "Dn(%d)\n", r);
+ }
+
r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
seq_printf(s, "Thermtrip Any En(%d)\n", state);
@@ -376,6 +808,32 @@ static int regs_show(struct seq_file *s, void *data)
seq_printf(s, "Thresh(%d)\n", state);
}
+ r = readl(ts->regs + THROT_GLOBAL_CFG);
+ seq_puts(s, "\n");
+ seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r);
+
+ seq_puts(s, "---------------------------------------------------\n");
+ r = readl(ts->regs + THROT_STATUS);
+ state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK);
+ seq_printf(s, "THROT STATUS: breach(%d) ", state);
+ state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK);
+ seq_printf(s, "state(%d) ", state);
+ state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK);
+ seq_printf(s, "enabled(%d)\n", state);
+
+ r = readl(ts->regs + CPU_PSKIP_STATUS);
+ if (ts->soc->use_ccroc) {
+ state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
+ seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state);
+ } else {
+ state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK);
+ seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state);
+ state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK);
+ seq_printf(s, "N(%d) ", state);
+ state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK);
+ seq_printf(s, "enabled(%d)\n", state);
+ }
+
return 0;
}
@@ -449,6 +907,326 @@ static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
return 0;
}
+static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *max_state)
+{
+ *max_state = 1;
+ return 0;
+}
+
+static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *cur_state)
+{
+ struct tegra_soctherm *ts = cdev->devdata;
+ u32 r;
+
+ r = readl(ts->regs + THROT_STATUS);
+ if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK))
+ *cur_state = 1;
+ else
+ *cur_state = 0;
+
+ return 0;
+}
+
+static int throt_set_cdev_state(struct thermal_cooling_device *cdev,
+ unsigned long cur_state)
+{
+ return 0;
+}
+
+static struct thermal_cooling_device_ops throt_cooling_ops = {
+ .get_max_state = throt_get_cdev_max_state,
+ .get_cur_state = throt_get_cdev_cur_state,
+ .set_cur_state = throt_set_cdev_state,
+};
+
+/**
+ * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
+ * and register them as cooling devices.
+ */
+static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ struct device_node *np_stc, *np_stcc;
+ const char *name;
+ u32 val;
+ int i, r;
+
+ for (i = 0; i < THROTTLE_SIZE; i++) {
+ ts->throt_cfgs[i].name = throt_names[i];
+ ts->throt_cfgs[i].id = i;
+ ts->throt_cfgs[i].init = false;
+ }
+
+ np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs");
+ if (!np_stc) {
+ dev_info(dev,
+ "throttle-cfg: no throttle-cfgs - not enabling\n");
+ return;
+ }
+
+ for_each_child_of_node(np_stc, np_stcc) {
+ struct soctherm_throt_cfg *stc;
+ struct thermal_cooling_device *tcd;
+
+ name = np_stcc->name;
+ stc = find_throttle_cfg_by_name(ts, name);
+ if (!stc) {
+ dev_err(dev,
+ "throttle-cfg: could not find %s\n", name);
+ continue;
+ }
+
+ r = of_property_read_u32(np_stcc, "nvidia,priority", &val);
+ if (r) {
+ dev_info(dev,
+ "throttle-cfg: %s: missing priority\n", name);
+ continue;
+ }
+ stc->priority = val;
+
+ if (ts->soc->use_ccroc) {
+ r = of_property_read_u32(np_stcc,
+ "nvidia,cpu-throt-level",
+ &val);
+ if (r) {
+ dev_info(dev,
+ "throttle-cfg: %s: missing cpu-throt-level\n",
+ name);
+ continue;
+ }
+ stc->cpu_throt_level = val;
+ } else {
+ r = of_property_read_u32(np_stcc,
+ "nvidia,cpu-throt-percent",
+ &val);
+ if (r) {
+ dev_info(dev,
+ "throttle-cfg: %s: missing cpu-throt-percent\n",
+ name);
+ continue;
+ }
+ stc->cpu_throt_depth = val;
+ }
+
+ tcd = thermal_of_cooling_device_register(np_stcc,
+ (char *)name, ts,
+ &throt_cooling_ops);
+ of_node_put(np_stcc);
+ if (IS_ERR_OR_NULL(tcd)) {
+ dev_err(dev,
+ "throttle-cfg: %s: failed to register cooling device\n",
+ name);
+ continue;
+ }
+
+ stc->cdev = tcd;
+ stc->init = true;
+ }
+
+ of_node_put(np_stc);
+}
+
+/**
+ * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config
+ * @level: describing the level LOW/MED/HIGH of throttling
+ *
+ * It's necessary to set up the CPU-local CCROC NV_THERM instance with
+ * the M/N values desired for each level. This function does this.
+ *
+ * This function pre-programs the CCROC NV_THERM levels in terms of
+ * pre-configured "Low", "Medium" or "Heavy" throttle levels which are
+ * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY.
+ */
+static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level)
+{
+ u8 depth, dividend;
+ u32 r;
+
+ switch (level) {
+ case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
+ depth = 50;
+ break;
+ case TEGRA_SOCTHERM_THROT_LEVEL_MED:
+ depth = 75;
+ break;
+ case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
+ depth = 80;
+ break;
+ case TEGRA_SOCTHERM_THROT_LEVEL_NONE:
+ return;
+ default:
+ return;
+ }
+
+ dividend = THROT_DEPTH_DIVIDEND(depth);
+
+ /* setup PSKIP in ccroc nv_therm registers */
+ r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
+ r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
+ r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf);
+ ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level));
+
+ r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
+ r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1);
+ r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
+ r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
+ ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level));
+}
+
+/**
+ * throttlectl_cpu_level_select() - program CPU pulse skipper config
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * Pulse skippers are used to throttle clock frequencies. This
+ * function programs the pulse skippers based on @throt and platform
+ * data. This function is used on SoCs which have CPU-local pulse
+ * skipper control, such as T13x. It programs soctherm's interface to
+ * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling
+ * vectors. PSKIP_BYPASS mode is set as required per HW spec.
+ */
+static void throttlectl_cpu_level_select(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r, throt_vect;
+
+ /* Denver:CCROC NV_THERM interface N:3 Mapping */
+ switch (ts->throt_cfgs[throt].cpu_throt_level) {
+ case TEGRA_SOCTHERM_THROT_LEVEL_LOW:
+ throt_vect = THROT_VECT_LOW;
+ break;
+ case TEGRA_SOCTHERM_THROT_LEVEL_MED:
+ throt_vect = THROT_VECT_MED;
+ break;
+ case TEGRA_SOCTHERM_THROT_LEVEL_HIGH:
+ throt_vect = THROT_VECT_HIGH;
+ break;
+ default:
+ throt_vect = THROT_VECT_NONE;
+ break;
+ }
+
+ r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect);
+ writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+
+ /* bypass sequencer in soc_therm as it is programmed in ccroc */
+ r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1);
+ writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+}
+
+/**
+ * throttlectl_cpu_mn() - program CPU pulse skipper configuration
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * Pulse skippers are used to throttle clock frequencies. This
+ * function programs the pulse skippers based on @throt and platform
+ * data. This function is used for CPUs that have "remote" pulse
+ * skipper control, e.g., the CPU pulse skipper is controlled by the
+ * SOC_THERM IP block. (SOC_THERM is located outside the CPU
+ * complex.)
+ */
+static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r;
+ int depth;
+ u8 dividend;
+
+ depth = ts->throt_cfgs[throt].cpu_throt_depth;
+ dividend = THROT_DEPTH_DIVIDEND(depth);
+
+ r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff);
+ writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU));
+
+ r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff);
+ r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf);
+ writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU));
+}
+
+/**
+ * soctherm_throttle_program() - programs pulse skippers' configuration
+ * @throt: the LIGHT/HEAVY of the throttle event id.
+ *
+ * Pulse skippers are used to throttle clock frequencies.
+ * This function programs the pulse skippers.
+ */
+static void soctherm_throttle_program(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r;
+ struct soctherm_throt_cfg stc = ts->throt_cfgs[throt];
+
+ if (!stc.init)
+ return;
+
+ /* Setup PSKIP parameters */
+ if (ts->soc->use_ccroc)
+ throttlectl_cpu_level_select(ts, throt);
+ else
+ throttlectl_cpu_mn(ts, throt);
+
+ r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
+ writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
+
+ r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0);
+ writel(r, ts->regs + THROT_DELAY_CTRL(throt));
+
+ r = readl(ts->regs + THROT_PRIORITY_LOCK);
+ r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK);
+ if (r >= stc.priority)
+ return;
+ r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK,
+ stc.priority);
+ writel(r, ts->regs + THROT_PRIORITY_LOCK);
+}
+
+static void tegra_soctherm_throttle(struct device *dev)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ u32 v;
+ int i;
+
+ /* configure LOW, MED and HIGH levels for CCROC NV_THERM */
+ if (ts->soc->use_ccroc) {
+ throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW);
+ throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED);
+ throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH);
+ }
+
+ /* Thermal HW throttle programming */
+ for (i = 0; i < THROTTLE_SIZE; i++)
+ soctherm_throttle_program(ts, i);
+
+ v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1);
+ if (ts->soc->use_ccroc) {
+ ccroc_writel(ts, v, CCROC_GLOBAL_CFG);
+
+ v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER);
+ v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
+ ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER);
+ } else {
+ writel(v, ts->regs + THROT_GLOBAL_CFG);
+
+ v = clk_readl(ts, CAR_SUPER_CCLKG_DIVIDER);
+ v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1);
+ clk_writel(ts, v, CAR_SUPER_CCLKG_DIVIDER);
+ }
+
+ /* initialize stats collection */
+ v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN |
+ STATS_CTL_CLR_UP | STATS_CTL_EN_UP;
+ writel(v, ts->regs + THERMCTL_STATS_CTL);
+}
+
static void soctherm_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
@@ -475,6 +1253,9 @@ static void soctherm_init(struct platform_device *pdev)
}
writel(pdiv, tegra->regs + SENSOR_PDIV);
writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
+
+ /* Configure hw throttle */
+ tegra_soctherm_throttle(&pdev->dev);
}
static const struct of_device_id tegra_soctherm_of_match[] = {
@@ -527,10 +1308,31 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
tegra->soc = soc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "soctherm-reg");
tegra->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tegra->regs))
+ if (IS_ERR(tegra->regs)) {
+ dev_err(&pdev->dev, "can't get soctherm registers");
return PTR_ERR(tegra->regs);
+ }
+
+ if (!tegra->soc->use_ccroc) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "car-reg");
+ tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->clk_regs)) {
+ dev_err(&pdev->dev, "can't get car clk registers");
+ return PTR_ERR(tegra->clk_regs);
+ }
+ } else {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ccroc-reg");
+ tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->ccroc_regs)) {
+ dev_err(&pdev->dev, "can't get ccroc registers");
+ return PTR_ERR(tegra->ccroc_regs);
+ }
+ }
tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
if (IS_ERR(tegra->reset)) {
@@ -580,6 +1382,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
+ soctherm_init_hw_throt_cdev(pdev);
+
soctherm_init(pdev);
for (i = 0; i < soc->num_ttgs; ++i) {
@@ -593,6 +1397,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
zone->dev = &pdev->dev;
zone->sg = soc->ttgs[i];
+ zone->ts = tegra;
z = devm_thermal_zone_of_sensor_register(&pdev->dev,
soc->ttgs[i]->id, zone,
@@ -608,7 +1413,9 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
/* Configure hw trip points */
- tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+ err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+ if (err)
+ goto disable_clocks;
}
soctherm_debug_init(pdev);
@@ -661,7 +1468,12 @@ static int __maybe_unused soctherm_resume(struct device *dev)
struct thermal_zone_device *tz;
tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
- tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+ err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Resume failed: set hwtrips failed\n");
+ return err;
+ }
}
return 0;
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index 28e18ec4b4c3..e96ca73fd780 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -15,6 +15,11 @@
#ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
#define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+#define THERMCTL_LEVEL0_GROUP_CPU 0x0
+#define THERMCTL_LEVEL0_GROUP_GPU 0x4
+#define THERMCTL_LEVEL0_GROUP_MEM 0x8
+#define THERMCTL_LEVEL0_GROUP_TSENSE 0xc
+
#define SENSOR_CONFIG2 8
#define SENSOR_CONFIG2_THERMA_MASK (0xffff << 16)
#define SENSOR_CONFIG2_THERMA_SHIFT 16
@@ -65,6 +70,9 @@ struct tegra_tsensor_group {
u32 thermtrip_enable_mask;
u32 thermtrip_any_en_mask;
u32 thermtrip_threshold_mask;
+ u16 thermctl_lvl0_offset;
+ u32 thermctl_lvl0_up_thresh_mask;
+ u32 thermctl_lvl0_dn_thresh_mask;
};
struct tegra_tsensor_configuration {
@@ -103,6 +111,8 @@ struct tegra_soctherm_soc {
const unsigned int num_ttgs;
const struct tegra_soctherm_fuse *tfuse;
const int thresh_grain;
+ const unsigned int bptt;
+ const bool use_ccroc;
};
int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index beb9d36b9c8a..36768630f78c 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -28,7 +28,11 @@
#define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff
+#define TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17)
+#define TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9)
+
#define TEGRA124_THRESH_GRAIN 1000
+#define TEGRA124_BPTT 8
static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
.tall = 16300,
@@ -51,6 +55,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
@@ -66,6 +73,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
@@ -79,6 +89,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+ .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
@@ -94,6 +107,9 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+ .thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
@@ -193,4 +209,6 @@ const struct tegra_soctherm_soc tegra124_soctherm = {
.num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
.tfuse = &tegra124_soctherm_fuse,
.thresh_grain = TEGRA124_THRESH_GRAIN,
+ .bptt = TEGRA124_BPTT,
+ .use_ccroc = false,
};
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index e2aa84e1b307..97fa30501eb1 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -28,7 +28,11 @@
#define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff
+#define TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK (0xff << 17)
+#define TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK (0xff << 9)
+
#define TEGRA132_THRESH_GRAIN 1000
+#define TEGRA132_BPTT 8
static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
.tall = 16300,
@@ -51,6 +55,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
@@ -66,6 +73,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
@@ -79,6 +89,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+ .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
@@ -94,6 +107,9 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+ .thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
@@ -193,4 +209,6 @@ const struct tegra_soctherm_soc tegra132_soctherm = {
.num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
.tfuse = &tegra132_soctherm_fuse,
.thresh_grain = TEGRA132_THRESH_GRAIN,
+ .bptt = TEGRA132_BPTT,
+ .use_ccroc = true,
};
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index 19cc0ab66f0e..ad53169a8e95 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -29,7 +29,11 @@
#define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9)
#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff
+#define TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK (0x1ff << 18)
+#define TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK (0x1ff << 9)
+
#define TEGRA210_THRESH_GRAIN 500
+#define TEGRA210_BPTT 9
static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
.tall = 16300,
@@ -52,6 +56,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
@@ -67,6 +74,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
+ .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
@@ -80,6 +90,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
+ .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
@@ -95,6 +108,9 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
+ .thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
+ .thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
};
static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
@@ -194,4 +210,6 @@ const struct tegra_soctherm_soc tegra210_soctherm = {
.num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
.tfuse = &tegra210_soctherm_fuse,
.thresh_grain = TEGRA210_THRESH_GRAIN,
+ .bptt = TEGRA210_BPTT,
+ .use_ccroc = false,
};
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index e2fc6161dded..226b0b4aced6 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -520,6 +520,56 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+ int low = -INT_MAX;
+ int high = INT_MAX;
+ int trip_temp, hysteresis;
+ int i, ret;
+
+ mutex_lock(&tz->lock);
+
+ if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
+ goto exit;
+
+ for (i = 0; i < tz->trips; i++) {
+ int trip_low;
+
+ tz->ops->get_trip_temp(tz, i, &trip_temp);
+ tz->ops->get_trip_hyst(tz, i, &hysteresis);
+
+ trip_low = trip_temp - hysteresis;
+
+ if (trip_low < tz->temperature && trip_low > low)
+ low = trip_low;
+
+ if (trip_temp > tz->temperature && trip_temp < high)
+ high = trip_temp;
+ }
+
+ /* No need to change trip points */
+ if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ goto exit;
+
+ tz->prev_low_trip = low;
+ tz->prev_high_trip = high;
+
+ dev_dbg(&tz->device,
+ "new temperature boundaries: %d < x < %d\n", low, high);
+
+ /*
+ * Set a temperature window. When this window is left the driver
+ * must inform the thermal core via thermal_zone_device_update.
+ */
+ ret = tz->ops->set_trips(tz, low, high);
+ if (ret)
+ dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+
+exit:
+ mutex_unlock(&tz->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_set_trips);
+
static void update_temperature(struct thermal_zone_device *tz)
{
int temp, ret;
@@ -557,7 +607,8 @@ static void thermal_zone_device_reset(struct thermal_zone_device *tz)
pos->initialized = false;
}
-void thermal_zone_device_update(struct thermal_zone_device *tz)
+void thermal_zone_device_update(struct thermal_zone_device *tz,
+ enum thermal_notify_event event)
{
int count;
@@ -569,6 +620,10 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
update_temperature(tz);
+ thermal_zone_set_trips(tz);
+
+ tz->notify_event = event;
+
for (count = 0; count < tz->trips; count++)
handle_thermal_trip(tz, count);
}
@@ -579,7 +634,7 @@ static void thermal_zone_device_check(struct work_struct *work)
struct thermal_zone_device *tz = container_of(work, struct
thermal_zone_device,
poll_queue.work);
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
}
/* sys I/F for thermal zone */
@@ -703,7 +758,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return count;
}
@@ -754,6 +809,9 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
*/
ret = tz->ops->set_trip_hyst(tz, trip, temperature);
+ if (!ret)
+ thermal_zone_set_trips(tz);
+
return ret ? ret : count;
}
@@ -822,7 +880,7 @@ passive_store(struct device *dev, struct device_attribute *attr,
tz->forced_passive = state;
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return count;
}
@@ -913,7 +971,7 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
}
if (!ret)
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return ret ? ret : count;
}
@@ -1509,7 +1567,8 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
if (atomic_cmpxchg(&pos->need_update, 1, 0))
- thermal_zone_device_update(pos);
+ thermal_zone_device_update(pos,
+ THERMAL_EVENT_UNSPECIFIED);
mutex_unlock(&thermal_list_lock);
return cdev;
@@ -1952,7 +2011,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
thermal_zone_device_reset(tz);
/* Update the new thermal zone and mark it as already updated. */
if (atomic_cmpxchg(&tz->need_update, 1, 0))
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
return tz;
@@ -2069,6 +2128,36 @@ exit:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name);
+/**
+ * thermal_zone_get_slope - return the slope attribute of the thermal zone
+ * @tz: thermal zone device with the slope attribute
+ *
+ * Return: If the thermal zone device has a slope attribute, return it, else
+ * return 1.
+ */
+int thermal_zone_get_slope(struct thermal_zone_device *tz)
+{
+ if (tz && tz->tzp)
+ return tz->tzp->slope;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_slope);
+
+/**
+ * thermal_zone_get_offset - return the offset attribute of the thermal zone
+ * @tz: thermal zone device with the offset attribute
+ *
+ * Return: If the thermal zone device has a offset attribute, return it, else
+ * return 0.
+ */
+int thermal_zone_get_offset(struct thermal_zone_device *tz)
+{
+ if (tz && tz->tzp)
+ return tz->tzp->offset;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_offset);
+
#ifdef CONFIG_NET
static const struct genl_multicast_group thermal_event_mcgrps[] = {
{ .name = THERMAL_GENL_MCAST_GROUP_NAME, },
@@ -2209,7 +2298,8 @@ static int thermal_pm_notify(struct notifier_block *nb,
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
thermal_zone_device_reset(tz);
- thermal_zone_device_update(tz);
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
}
break;
default:
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 15c0a9ac2209..0586bd0f2bab 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -52,7 +52,7 @@ static void ti_thermal_work(struct work_struct *work)
struct ti_thermal_data *data = container_of(work,
struct ti_thermal_data, thermal_wq);
- thermal_zone_device_update(data->ti_thermal);
+ thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED);
dev_dbg(&data->ti_thermal->device, "updated thermal zone %s\n",
data->ti_thermal->type);
@@ -205,7 +205,7 @@ static int ti_thermal_set_mode(struct thermal_zone_device *thermal,
data->mode = mode;
ti_bandgap_write_update_interval(bgp, data->sensor_id,
data->ti_thermal->polling_delay);
- thermal_zone_device_update(data->ti_thermal);
+ thermal_zone_device_update(data->ti_thermal, THERMAL_EVENT_UNSPECIFIED);
dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n",
data->ti_thermal->polling_delay);
@@ -239,7 +239,7 @@ static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
return 0;
}
-static int __ti_thermal_get_trend(void *p, long *trend)
+static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
{
struct ti_thermal_data *data = p;
struct ti_bandgap *bgp;
@@ -252,22 +252,6 @@ static int __ti_thermal_get_trend(void *p, long *trend)
if (ret)
return ret;
- *trend = tr;
-
- return 0;
-}
-
-/* Get the temperature trend callback functions for thermal zone */
-static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
-{
- int ret;
- long tr;
-
- ret = __ti_thermal_get_trend(thermal->devdata, &tr);
- if (ret)
- return ret;
-
if (tr > 0)
*trend = THERMAL_TREND_RAISING;
else if (tr < 0)
@@ -278,6 +262,13 @@ static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
return 0;
}
+/* Get the temperature trend callback functions for thermal zone */
+static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trend *trend)
+{
+ return __ti_thermal_get_trend(thermal->devdata, trip, trend);
+}
+
/* Get critical temperature callback functions for thermal zone */
static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
int *temp)
diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c
index 10adcddc8821..c908150c268d 100644
--- a/drivers/thermal/user_space.c
+++ b/drivers/thermal/user_space.c
@@ -23,19 +23,30 @@
*/
#include <linux/thermal.h>
-
+#include <linux/slab.h>
#include "thermal_core.h"
/**
* notify_user_space - Notifies user space about thermal events
* @tz - thermal_zone_device
+ * @trip - Trip point index
*
* This function notifies the user space through UEvents.
*/
static int notify_user_space(struct thermal_zone_device *tz, int trip)
{
+ char *thermal_prop[5];
+ int i;
+
mutex_lock(&tz->lock);
- kobject_uevent(&tz->device.kobj, KOBJ_CHANGE);
+ thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type);
+ thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature);
+ thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip);
+ thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", tz->notify_event);
+ thermal_prop[4] = NULL;
+ kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop);
+ for (i = 0; i < 4; ++i)
+ kfree(thermal_prop[i]);
mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 97f0a2bd93ed..95f4c1bcdb4c 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -348,7 +348,8 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
}
if (notify) {
pr_debug("thermal_zone_device_update\n");
- thermal_zone_device_update(phdev->tzone);
+ thermal_zone_device_update(phdev->tzone,
+ THERMAL_EVENT_UNSPECIFIED);
}
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 886fcf37f291..b9923464599f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1bfb6fdbaa20..1731b98d2471 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index b8d9c8c9d02a..417d9e7038e1 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* Divisor latch access bit does not exist. */
- value &= ~(UART_LCR_DLAB << valshift);
+ value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
@@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
- dev_err(dev, "failed to get memory resource");
+ dev_err(dev, "failed to get memory resource\n");
return -EINVAL;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c7831407a882..25c1d7bc0100 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE
config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
+ depends on HAS_DMA
depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fd8aa1f4ba78..168b10cad47b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {
/* RS232 with hardware handshake (RTS/CTS) */
- if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
- dev_info(port->dev, "not enabling hardware flow control because DMA is used");
- termios->c_cflag &= ~CRTSCTS;
- } else {
+ if (atmel_use_fifo(port) &&
+ !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+ /*
+ * with ATMEL_US_USMODE_HWHS set, the controller will
+ * be able to drive the RTS pin high/low when the RX
+ * FIFO is above RXFTHRES/below RXFTHRES2.
+ * It will also disable the transmitter when the CTS
+ * pin is high.
+ * This mode is not activated if CTS pin is a GPIO
+ * because in this case, the transmitter is always
+ * disabled (there must be an internal pull-up
+ * responsible for this behaviour).
+ * If the RTS pin is a GPIO, the controller won't be
+ * able to drive it according to the FIFO thresholds,
+ * but it will be handled by the driver.
+ */
mode |= ATMEL_US_USMODE_HWHS;
+ } else {
+ /*
+ * For platforms without FIFO, the flow control is
+ * handled by the driver.
+ */
+ mode |= ATMEL_US_USMODE_NORMAL;
}
} else {
/* RS232 without hadware handshake */
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index de9d5107c00a..76103f2c4a80 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head) {
+ if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
} else {
@@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
dma_async_issue_pending(sport->dma_tx_chan);
-
}
static void lpuart_dma_tx_complete(void *arg)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d391650b82e7..42caccb5e87e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = {
},
(void *)MINNOW_UARTCLK,
},
+ { }
};
/* Return UART clock, checking for board specific clocks. */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index a9d94f7cf683..fb0672554123 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -708,7 +708,7 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
- queue_kthread_work(&s->kworker, &s->irq_work);
+ kthread_queue_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
@@ -784,7 +784,7 @@ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_clear |= bit;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_stop_tx(struct uart_port *port)
@@ -802,7 +802,7 @@ static void sc16is7xx_start_tx(struct uart_port *port)
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
- queue_kthread_work(&s->kworker, &one->tx_work);
+ kthread_queue_work(&s->kworker, &one->tx_work);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -828,7 +828,7 @@ static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
one->config.flags |= SC16IS7XX_RECONF_MD;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@@ -957,7 +957,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
- queue_kthread_work(&s->kworker, &one->reg_work);
+ kthread_queue_work(&s->kworker, &one->reg_work);
return 0;
}
@@ -1030,7 +1030,7 @@ static void sc16is7xx_shutdown(struct uart_port *port)
sc16is7xx_power(port, 0);
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
}
static const char *sc16is7xx_type(struct uart_port *port)
@@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
+ u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
- sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
- val ? BIT(offset) : 0);
+ if (val)
+ state |= BIT(offset);
+ else
+ state &= ~BIT(offset);
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
@@ -1176,8 +1180,8 @@ static int sc16is7xx_probe(struct device *dev,
s->devtype = devtype;
dev_set_drvdata(dev, s);
- init_kthread_worker(&s->kworker);
- init_kthread_work(&s->irq_work, sc16is7xx_ist);
+ kthread_init_worker(&s->kworker);
+ kthread_init_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
@@ -1234,8 +1238,8 @@ static int sc16is7xx_probe(struct device *dev,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
/* Initialize kthread work structs */
- init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
- init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+ kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+ kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
@@ -1301,7 +1305,7 @@ static int sc16is7xx_remove(struct device *dev)
sc16is7xx_power(&s->p[i].port, 0);
}
- flush_kthread_worker(&s->kworker);
+ kthread_flush_worker(&s->kworker);
kthread_stop(s->kworker_task);
if (!IS_ERR(s->clk))
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e4f63627479..f2303f390345 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port)
* closed. No cookie for you.
*/
BUG_ON(!state);
- tty_wakeup(state->port.tty);
+ tty_port_tty_wakeup(&state->port);
}
static void uart_stop(struct tty_struct *tty)
@@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
- tty_wakeup(tty);
+ tty_port_tty_wakeup(&state->port);
}
/*
@@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
- port->console = uart_console(uport);
-
/*
* If this port is a console, then the spinlock is already
* initialised.
@@ -2761,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uart_configure_port(drv, state, uport);
+ port->console = uart_console(uport);
+
num_groups = 2;
if (uport->attr_group)
num_groups++;
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 41d974923102..cd97ceb76e4f 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -31,7 +31,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG ~0
+#define UNDEF_REG 0xff
/* Register offsets */
struct stm32_usart_info stm32f4_info = {
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f37edaa5ac75..dd4c02fa4820 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
/**
* cdns_uart_console_write - perform write operation
@@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{ .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+ { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
{}
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e841a4e0e726..8c3bf3d613c0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
@@ -3187,11 +3192,11 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
pr_info("Console: switching ");
if (!deflt)
- printk("consoles %d-%d ", first+1, last+1);
+ printk(KERN_CONT "consoles %d-%d ", first+1, last+1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
- printk("to %s %s %dx%d\n",
+ printk(KERN_CONT "to %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
desc, vc->vc_cols, vc->vc_rows);
@@ -3200,7 +3205,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
update_screen(vc);
}
} else
- printk("to %s\n", desc);
+ printk(KERN_CONT "to %s\n", desc);
retval = 0;
err:
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 96ae69502c86..111b0e0b8698 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
+ ci->role = CI_ROLE_END;
+ synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 78f0f85bebdc..fada988512a1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -932,8 +932,6 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
- if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
- return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
old = acm->oldcount;
@@ -1161,6 +1159,8 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == IGNORE_DEVICE)
return -ENODEV;
+ memset(&h, 0x00, sizeof(struct usb_cdc_parsed_header));
+
num_rx_buf = (quirks == SINGLE_RX_URB) ? 1 : ACM_NR;
/* handle quirks deadly to normal probing*/
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 09c8d9ca61ae..4016dae7433b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2409,21 +2409,21 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_RESET:
@@ -2435,7 +2435,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_GETDRIVER:
@@ -2462,7 +2462,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
#ifdef CONFIG_COMPAT
@@ -2470,14 +2470,14 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_DISCSIGNAL32:
@@ -2489,7 +2489,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = CURRENT_TIME;
+ inode->i_mtime = current_time(inode);
break;
case USBDEVFS_IOCTL32:
@@ -2552,7 +2552,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
done:
usb_unlock_device(dev);
if (ret >= 0)
- inode->i_atime = CURRENT_TIME;
+ inode->i_atime = current_time(inode);
return ret;
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index fa9b26b91507..4c0fa0b17353 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
*/
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
{
+ bool ret;
+
switch (hsotg->dr_mode) {
case USB_DR_MODE_HOST:
- dwc2_force_mode(hsotg, true);
+ ret = dwc2_force_mode(hsotg, true);
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms on their host-only dwc2.
+ */
+ if (!ret)
+ msleep(50);
+
break;
case USB_DR_MODE_PERIPHERAL:
dwc2_force_mode(hsotg, false);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index aad4107ef927..2a21a0414b1d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -259,6 +259,13 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
/* Gadget ep0 states */
enum dwc2_ep0_state {
DWC2_EP0_SETUP,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4cd6403a7566..24fbebc9b409 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -186,10 +186,9 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
- unsigned int fifo;
+ unsigned int ep;
unsigned int addr;
int timeout;
- u32 dptxfsizn;
u32 val;
/* Reset fifo map if not correctly cleared during previous session */
@@ -217,16 +216,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
- for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
- dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
-
- val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
- addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
-
- if (addr > hsotg->fifo_mem)
- break;
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!hsotg->g_tx_fifo_sz[ep])
+ continue;
+ val = addr;
+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ "insufficient fifo memory");
+ addr += hsotg->g_tx_fifo_sz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
{
struct device_node *np = hsotg->dev->of_node;
+ u32 len = 0;
+ u32 i = 0;
/* Enable dma if requested in device tree */
hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
+ /*
+ * Register TX periodic fifo size per endpoint.
+ * EP0 is excluded since it has no fifo configuration.
+ */
+ if (!of_find_property(np, "g-tx-fifo-size", &len))
+ goto rx_fifo;
+
+ len /= sizeof(u32);
+
+ /* Read tx fifo sizes other than ep0 */
+ if (of_property_read_u32_array(np, "g-tx-fifo-size",
+ &hsotg->g_tx_fifo_sz[1], len))
+ goto rx_fifo;
+
+ /* Add ep0 */
+ len++;
+
+ /* Make remaining TX fifos unavailable */
+ if (len < MAX_EPS_CHANNELS) {
+ for (i = len; i < MAX_EPS_CHANNELS; i++)
+ hsotg->g_tx_fifo_sz[i] = 0;
+ }
+
+rx_fifo:
/* Register RX fifo size */
of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
@@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
+ int i;
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Initialize to legacy fifo configuration values */
hsotg->g_rx_fifo_sz = 2048;
hsotg->g_np_g_tx_fifo_sz = 1024;
+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
/* Device tree specific probe */
dwc2_hsotg_of_probe(hsotg);
@@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->g_np_g_tx_fifo_sz);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+ hsotg->g_tx_fifo_sz[i]);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 530959a8a6d1..8e1728b39a49 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -182,6 +182,38 @@ static const struct dwc2_core_params params_ltq = {
.hibernation = -1,
};
+static const struct dwc2_core_params params_amlogic = {
+ .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+ .otg_ver = -1,
+ .dma_enable = 1,
+ .dma_desc_enable = 0,
+ .dma_desc_fs_enable = 0,
+ .speed = DWC2_SPEED_PARAM_HIGH,
+ .enable_dynamic_fifo = 1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 512,
+ .host_nperio_tx_fifo_size = 500,
+ .host_perio_tx_fifo_size = 500,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = 16,
+ .phy_type = DWC2_PHY_TYPE_PARAM_UTMI,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = 1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = 0,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -486,6 +518,8 @@ static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
{ .compatible = "snps,dwc2", .data = NULL },
{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+ { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7287a763cd0c..fea446900cad 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -769,15 +769,14 @@ static int dwc3_core_init(struct dwc3 *dwc)
return 0;
err4:
- phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
err3:
- phy_power_off(dwc->usb3_generic_phy);
+ phy_power_off(dwc->usb2_generic_phy);
err2:
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
err1:
usb_phy_shutdown(dwc->usb2_phy);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 89a2f712fdfe..aaaf256f71dd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/of.h>
#include "core.h"
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07cc8929f271..1dfa56a5f1c5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
req->first_trb_index = dep->trb_enqueue;
+ dep->queued_requests++;
}
dwc3_ep_inc_enq(dep);
@@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
- dep->queued_requests++;
-
trace_dwc3_prepare_trb(dep, trb);
}
@@ -1074,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- dep->flags & DWC3_EP_PENDING_REQUEST) {
- if (list_empty(&dep->started_list)) {
+ /*
+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+ * wait for a XferNotReady event so we will know what's the current
+ * (micro-)frame number.
+ *
+ * Without this trick, we are very, very likely gonna get Bus Expiry
+ * errors which will force us issue EndTransfer command.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
+ list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -1861,8 +1868,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
- dep->queued_requests--;
dwc3_ep_inc_deq(dep);
+
+ if (req->trb == trb)
+ dep->queued_requests--;
+
trace_dwc3_complete_trb(dep, trb);
/*
@@ -2980,7 +2990,7 @@ err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
@@ -3005,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 12731e67d2c7..ea73afb026d8 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -20,7 +20,6 @@
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
-#include <linux/kconfig.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <asm/io.h>
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 0aeed85bb5cb..e40d47d47d82 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -136,8 +136,60 @@ struct ffs_epfile {
/*
* Buffer for holding data from partial reads which may happen since
* we’re rounding user read requests to a multiple of a max packet size.
+ *
+ * The pointer is initialised with NULL value and may be set by
+ * __ffs_epfile_read_data function to point to a temporary buffer.
+ *
+ * In normal operation, calls to __ffs_epfile_read_buffered will consume
+ * data from said buffer and eventually free it. Importantly, while the
+ * function is using the buffer, it sets the pointer to NULL. This is
+ * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
+ * can never run concurrently (they are synchronised by epfile->mutex)
+ * so the latter will not assign a new value to the pointer.
+ *
+ * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
+ * valid) and sets the pointer to READ_BUFFER_DROP value. This special
+ * value is crux of the synchronisation between ffs_func_eps_disable and
+ * __ffs_epfile_read_data.
+ *
+ * Once __ffs_epfile_read_data is about to finish it will try to set the
+ * pointer back to its old value (as described above), but seeing as the
+ * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
+ * the buffer.
+ *
+ * == State transitions ==
+ *
+ * • ptr == NULL: (initial state)
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: nop
+ * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == DROP:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
+ * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == buf:
+ * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
+ * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
+ * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
+ * is always called first
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == NULL and reading:
+ * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes and …
+ * … all data read: free buf, go to ptr == NULL
+ * … otherwise: go to ptr == buf and reading
+ * • ptr == DROP and reading:
+ * ◦ __ffs_epfile_read_buffer_free: nop
+ * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * ◦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes: free buf, go to ptr == DROP
*/
- struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+ struct ffs_buffer *read_buffer;
+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
char name[5];
@@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
+{
+ /*
+ * See comment in struct ffs_epfile for full read_buffer pointer
+ * synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
+ if (buf && buf != READ_BUFFER_DROP)
+ kfree(buf);
+}
+
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
struct iov_iter *iter)
{
- struct ffs_buffer *buf = epfile->read_buffer;
+ /*
+ * Null out epfile->read_buffer so ffs_func_eps_disable does not free
+ * the buffer while we are using it. See comment in struct ffs_epfile
+ * for full read_buffer pointer synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret;
- if (!buf)
+ if (!buf || buf == READ_BUFFER_DROP)
return 0;
ret = copy_to_iter(buf->data, buf->length, iter);
if (buf->length == ret) {
kfree(buf);
- epfile->read_buffer = NULL;
- } else if (unlikely(iov_iter_count(iter))) {
+ return ret;
+ }
+
+ if (unlikely(iov_iter_count(iter))) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
+
+ if (cmpxchg(&epfile->read_buffer, NULL, buf))
+ kfree(buf);
+
return ret;
}
@@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
buf->length = data_len;
buf->data = buf->storage;
memcpy(buf->storage, data + ret, data_len);
- epfile->read_buffer = buf;
+
+ /*
+ * At this point read_buffer is NULL or READ_BUFFER_DROP (if
+ * ffs_func_eps_disable has been called in the meanwhile). See comment
+ * in struct ffs_epfile for full read_buffer pointer synchronisation
+ * story.
+ */
+ if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
+ kfree(buf);
return ret;
}
@@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
+ __ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
return 0;
@@ -1196,15 +1277,15 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode = new_inode(sb);
if (likely(inode)) {
- struct timespec current_time = CURRENT_TIME;
+ struct timespec ts = current_time(inode);
inode->i_ino = get_next_ino();
inode->i_mode = perms->mode;
inode->i_uid = perms->uid;
inode->i_gid = perms->gid;
- inode->i_atime = current_time;
- inode->i_mtime = current_time;
- inode->i_ctime = current_time;
+ inode->i_atime = ts;
+ inode->i_mtime = ts;
+ inode->i_ctime = ts;
inode->i_private = data;
if (fops)
inode->i_fop = fops;
@@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
- if (epfile)
- mutex_lock(&epfile->mutex);
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
- mutex_unlock(&epfile->mutex);
+ __ffs_epfile_read_buffer_free(epfile);
++epfile;
}
} while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 9c8c9ed1dc9e..5d1bd13a56c1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -588,13 +588,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length;
- /* throttle high/super speed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
- ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
- : 0;
-
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 16104b5ebdcb..bd82dd12deff 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1913,7 +1913,7 @@ gadgetfs_make_inode (struct super_block *sb,
inode->i_uid = make_kuid(&init_user_ns, default_uid);
inode->i_gid = make_kgid(&init_user_ns, default_gid);
inode->i_atime = inode->i_mtime = inode->i_ctime
- = CURRENT_TIME;
+ = current_time(inode);
inode->i_private = data;
inode->i_fop = fops;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index bb1f6c8f0f01..45bc997d0711 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = name;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index f5fccb3e4152..f78503203f42 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -21,7 +21,6 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1e5f529d51a2..063064801ceb 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1308,11 +1308,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_mv_driver
#endif
-#ifdef CONFIG_MIPS_SEAD3
-#include "ehci-sead3.c"
-#define PLATFORM_DRIVER ehci_hcd_sead3_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 876dca4fc216..a268d9e8d6cf 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,7 +39,7 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 3
+#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
deleted file mode 100644
index 3d86cc2ffe68..000000000000
--- a/drivers/usb/host/ehci-sead3.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * MIPS CI13320A EHCI Host Controller driver
- * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-static int ehci_sead3_setup(struct usb_hcd *hcd)
-{
- int ret;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- ehci->caps = hcd->regs + 0x100;
-
-#ifdef __BIG_ENDIAN
- ehci->big_endian_mmio = 1;
- ehci->big_endian_desc = 1;
-#endif
-
- ret = ehci_setup(hcd);
- if (ret)
- return ret;
-
- ehci->need_io_watchdog = 0;
-
- /* Set burst length to 16 words. */
- ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
-
- return ret;
-}
-
-const struct hc_driver ehci_sead3_hc_driver = {
- .description = hcd_name,
- .product_desc = "SEAD-3 EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- *
- */
- .reset = ehci_sead3_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
- hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
- if (!hcd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err1;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- /* Root hub has integrated TT. */
- hcd->has_tt = 1;
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- device_wakeup_enable(hcd->self.controller);
- return ret;
- }
-
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_sead3_drv_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- bool do_wakeup = device_may_wakeup(dev);
-
- return ehci_suspend(hcd, do_wakeup);
-}
-
-static int ehci_hcd_sead3_drv_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
- ehci_resume(hcd, false);
- return 0;
-}
-
-static const struct dev_pm_ops sead3_ehci_pmops = {
- .suspend = ehci_hcd_sead3_drv_suspend,
- .resume = ehci_hcd_sead3_drv_resume,
-};
-
-#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
-
-#else
-#define SEAD3_EHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ehci_hcd_sead3_driver = {
- .probe = ehci_hcd_sead3_drv_probe,
- .remove = ehci_hcd_sead3_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "sead3-ehci",
- .pm = SEAD3_EHCI_PMOPS,
- }
-};
-
-MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5b5880c0ae19..b38a228134df 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
+ /*
+ * The RemoteWakeupConnected bit has to be set explicitly
+ * before calling ohci_run. The reset value of this bit is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
@@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (!ohci_at91->wakeup) {
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- ohci->hc_control &= OHCI_CTRL_RWC;
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
/* flush the writes */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1700908b84ef..86612ac3fda2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
+#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 35af36253440..a9a1e4c40480 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -9,7 +9,6 @@
*/
#include <linux/types.h>
-#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -996,6 +995,14 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
}
val = readl(base + ext_cap_offset);
+ /* Auto handoff never worked for these devices. Force it and continue */
+ if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
+ (pdev->vendor == PCI_VENDOR_ID_RENESAS
+ && pdev->device == 0x0014)) {
+ val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
+ writel(val, base + ext_cap_offset);
+ }
+
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 730b9fd26685..0ef16900efed 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+ __le32 __iomem **port_array)
+{
+ u32 portsc;
+
+ portsc = readl(port_array[port_index]);
+
+ /* if any of these are set we are not stuck */
+ if (portsc & (PORT_CONNECT | PORT_CAS))
+ return false;
+
+ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+ return false;
+
+ /* clear wakeup/change bits, and do a warm port reset */
+ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ portsc |= PORT_WR;
+ writel(portsc, port_array[port_index]);
+ /* flush write */
+ readl(port_array[port_index]);
+ return true;
+}
+
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
+
+ /* warm reset CAS limited ports stuck in polling/compliance */
+ if ((xhci->quirks & XHCI_MISSING_CAS) &&
+ (hcd->speed >= HCD_USB3) &&
+ xhci_port_missing_cas_quirk(port_index, port_array)) {
+ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+ continue;
+ }
if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7b0f97abbad..e96ae80d107e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -45,11 +45,13 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
static const char hcd_name[] = "xhci_hcd";
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b2c1dc5dc0f3..f945380035d0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -314,6 +314,8 @@ struct xhci_op_regs {
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+#define XHCI_MISSING_CAS (1 << 24)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 210b7e43a6fd..2440f88e07a3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -479,7 +479,8 @@ static int da8xx_probe(struct platform_device *pdev)
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
if (IS_ERR(glue->phy)) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get phy\n");
return PTR_ERR(glue->phy);
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 27dadc0d9114..e01116e4c067 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2114,11 +2114,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb->io.ep_offset = musb_flat_ep_offset;
musb->io.ep_select = musb_flat_ep_select;
}
- /* And override them with platform specific ops if specified. */
- if (musb->ops->ep_offset)
- musb->io.ep_offset = musb->ops->ep_offset;
- if (musb->ops->ep_select)
- musb->io.ep_select = musb->ops->ep_select;
/* At least tusb6010 has its own offsets */
if (musb->ops->ep_offset)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bff4869a57cd..4042ea017985 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
map_dma_buffer(request, musb, musb_ep);
+ pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
@@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
return status;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 1ab6973d4f61..cc1225485509 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb)
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
+ phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
- if (!WARN_ON(!musb->phy))
- phy_power_on(musb->phy);
switch (glue->status) {
@@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (!WARN_ON(!musb->phy))
- phy_power_off(musb->phy);
-
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
@@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb)
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 1d70add926f0..d544b331c9f2 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
- if (enable)
+ if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
- else
+ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+ udelay(45);
+ } else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ }
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 54a4de0efdba..f61477bed3a8 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
u8 control;
int result;
- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ if (result)
+ return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b2d767e743fc..0ff7f38d7800 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f87a938cf005..21011c0a4c64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -626,8 +626,9 @@
/*
* Infineon Technologies
*/
-#define INFINEON_VID 0x058b
-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
* Acton Research Corp.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d213cf44a7e4..4a037b4a79cf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0;
- usb_serial_console_init(serial->port[0]->minor);
+ if (num_ports > 0)
+ usb_serial_console_init(serial->port[0]->minor);
exit:
module_put(type->driver.owner);
return 0;
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79b2b628066d..79451f7ef1b7 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
bo[itr] = bi1[itr] ^ bi2[itr];
}
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+};
+
/*
* CC-MAC function WUSB1.0[6.5]
*
@@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
- struct crypto_cipher *tfm_aes, void *mic,
+ struct crypto_cipher *tfm_aes,
+ struct wusb_mac_scratch *scratch,
+ void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
int result = 0;
SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
- struct aes_ccm_b0 b0;
- struct aes_ccm_b1 b1;
- struct aes_ccm_a ax;
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* These checks should be compile time optimized out
* ensure @a fills b1's mac_header and following fields
*/
- WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
- WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+ WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
result = -ENOMEM;
zero_padding = blen % sizeof(struct aes_ccm_block);
if (zero_padding)
zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
- dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+ dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) +
+ zero_padding;
dst_buf = kzalloc(dst_size, GFP_KERNEL);
if (!dst_buf)
goto error_dst_buf;
@@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
memset(iv, 0, sizeof(iv));
/* Setup B0 */
- b0.flags = 0x59; /* Format B0 */
- b0.ccm_nonce = *n;
- b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+ scratch->b0.flags = 0x59; /* Format B0 */
+ scratch->b0.ccm_nonce = *n;
+ scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
/* Setup B1
*
@@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* 14'--after clarification, it means to use A's contents
* for MAC Header, EO, sec reserved and padding.
*/
- b1.la = cpu_to_be16(blen + 14);
- memcpy(&b1.mac_header, a, sizeof(*a));
+ scratch->b1.la = cpu_to_be16(blen + 14);
+ memcpy(&scratch->b1.mac_header, a, sizeof(*a));
sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(&sg[0], &b0, sizeof(b0));
- sg_set_buf(&sg[1], &b1, sizeof(b1));
+ sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0));
+ sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
sg_set_buf(&sg[3], bzero, zero_padding);
@@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* POS Crypto API: size is assumed to be AES's block size.
* Thanks for documenting it -- tip taken from airo.c
*/
- ax.flags = 0x01; /* as per WUSB 1.0 spec */
- ax.ccm_nonce = *n;
- ax.counter = 0;
- crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
- bytewise_xor(mic, &ax, iv, 8);
+ scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ scratch->ax.ccm_nonce = *n;
+ scratch->ax.counter = 0;
+ crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax,
+ (void *)&scratch->ax);
+ bytewise_xor(mic, &scratch->ax, iv, 8);
result = 8;
error_cbc_crypt:
kfree(dst_buf);
@@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
struct aes_ccm_nonce n = *_n;
struct crypto_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
+ struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
@@ -329,17 +338,25 @@ ssize_t wusb_prf(void *out, size_t out_size,
printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
goto error_setkey_aes;
}
+ scratch = kmalloc(sizeof(*scratch), GFP_KERNEL);
+ if (!scratch) {
+ result = -ENOMEM;
+ goto error_alloc_scratch;
+ }
for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
sfn_le = cpu_to_le64(sfn++);
memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
- result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+ result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes,
&n, a, b, blen);
if (result < 0)
goto error_ccm_mac;
bytes += result;
}
result = bytes;
+
+ kfree(scratch);
+error_alloc_scratch:
error_ccm_mac:
error_setkey_aes:
crypto_free_cipher(tfm_aes);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
index d059ad4d0dbd..97ee1b46db69 100644
--- a/drivers/uwb/lc-rc.c
+++ b/drivers/uwb/lc-rc.c
@@ -56,8 +56,11 @@ static struct uwb_rc *uwb_rc_find_by_index(int index)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
@@ -467,7 +470,9 @@ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
@@ -520,8 +525,11 @@ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
dev = class_find_device(&uwb_rc_class, NULL, grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
@@ -553,8 +561,10 @@ struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, addr, find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
return rc;
}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index c1304b8d4985..678e93741ae1 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -97,6 +97,8 @@ static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+ put_device(dev);
+
return (dev != NULL);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index d624a527777f..031bc08d000d 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -829,8 +829,9 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -838,23 +839,31 @@ static long vfio_pci_ioctl(void *device_data,
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index c2e60893cd09..1c46045b0e7f 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -256,7 +256,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 88b008fb8a4e..5d3b0db5ce0a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -284,12 +284,14 @@ config FB_PM2_FIFO_DISCONNECT
config FB_ARMCLCD
tristate "ARM PrimeCell PL110 support"
depends on ARM || ARM64 || COMPILE_TEST
- depends on FB && ARM_AMBA
+ depends on FB && ARM_AMBA && HAS_IOMEM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_MODE_HELPERS if OF
select VIDEOMODE_HELPERS if OF
+ select BACKLIGHT_LCD_SUPPORT if OF
+ select BACKLIGHT_CLASS_DEVICE if OF
help
This framebuffer device driver is for the ARM PrimeCell PL110
Colour LCD controller. ARM PrimeCells provide the building
@@ -305,6 +307,8 @@ config PLAT_VERSATILE_CLCD
def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
depends on ARM
depends on FB_ARMCLCD && FB=y
+ select REGMAP
+ select MFD_SYSCON
config FB_ACORN
bool "Acorn VIDC support"
@@ -2183,7 +2187,7 @@ config FB_GOLDFISH
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
- depends on FB && (MIPS_COBALT || MIPS_SEAD3)
+ depends on FB && MIPS_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2443,7 +2447,6 @@ config FB_SIMPLE
source "drivers/video/fbdev/omap/Kconfig"
source "drivers/video/fbdev/omap2/Kconfig"
-source "drivers/video/fbdev/exynos/Kconfig"
source "drivers/video/fbdev/mmp/Kconfig"
config FB_SH_MOBILE_MERAM
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index f6731867dd26..ee8c81405a7f 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -6,8 +6,6 @@
obj-y += core/
-obj-$(CONFIG_EXYNOS_VIDEO) += exynos/
-
obj-$(CONFIG_FB_MACMODES) += macmodes.o
obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
@@ -79,6 +77,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
+obj-$(CONFIG_ARCH_NOMADIK) += amba-clcd-nomadik.o
obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.c b/drivers/video/fbdev/amba-clcd-nomadik.c
new file mode 100644
index 000000000000..0c06fcaaa6e8
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-nomadik.c
@@ -0,0 +1,259 @@
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "amba-clcd-nomadik.h"
+
+static struct gpio_desc *grestb;
+static struct gpio_desc *scen;
+static struct gpio_desc *scl;
+static struct gpio_desc *sda;
+
+static u8 tpg110_readwrite_reg(bool write, u8 address, u8 outval)
+{
+ int i;
+ u8 inval = 0;
+
+ /* Assert SCEN */
+ gpiod_set_value_cansleep(scen, 1);
+ ndelay(150);
+ /* Hammer out the address */
+ for (i = 5; i >= 0; i--) {
+ if (address & BIT(i))
+ gpiod_set_value_cansleep(sda, 1);
+ else
+ gpiod_set_value_cansleep(sda, 0);
+ ndelay(150);
+ /* Send an SCL pulse */
+ gpiod_set_value_cansleep(scl, 1);
+ ndelay(160);
+ gpiod_set_value_cansleep(scl, 0);
+ ndelay(160);
+ }
+
+ if (write) {
+ /* WRITE */
+ gpiod_set_value_cansleep(sda, 0);
+ } else {
+ /* READ */
+ gpiod_set_value_cansleep(sda, 1);
+ }
+ ndelay(150);
+ /* Send an SCL pulse */
+ gpiod_set_value_cansleep(scl, 1);
+ ndelay(160);
+ gpiod_set_value_cansleep(scl, 0);
+ ndelay(160);
+
+ if (!write)
+ /* HiZ turn-around cycle */
+ gpiod_direction_input(sda);
+ ndelay(150);
+ /* Send an SCL pulse */
+ gpiod_set_value_cansleep(scl, 1);
+ ndelay(160);
+ gpiod_set_value_cansleep(scl, 0);
+ ndelay(160);
+
+ /* Hammer in/out the data */
+ for (i = 7; i >= 0; i--) {
+ int value;
+
+ if (write) {
+ value = !!(outval & BIT(i));
+ gpiod_set_value_cansleep(sda, value);
+ } else {
+ value = gpiod_get_value(sda);
+ if (value)
+ inval |= BIT(i);
+ }
+ ndelay(150);
+ /* Send an SCL pulse */
+ gpiod_set_value_cansleep(scl, 1);
+ ndelay(160);
+ gpiod_set_value_cansleep(scl, 0);
+ ndelay(160);
+ }
+
+ gpiod_direction_output(sda, 0);
+ /* Deassert SCEN */
+ gpiod_set_value_cansleep(scen, 0);
+ /* Satisfies SCEN pulse width */
+ udelay(1);
+
+ return inval;
+}
+
+static u8 tpg110_read_reg(u8 address)
+{
+ return tpg110_readwrite_reg(false, address, 0);
+}
+
+static void tpg110_write_reg(u8 address, u8 outval)
+{
+ tpg110_readwrite_reg(true, address, outval);
+}
+
+static void tpg110_startup(struct device *dev)
+{
+ u8 val;
+
+ dev_info(dev, "TPG110 display enable\n");
+ /* De-assert the reset signal */
+ gpiod_set_value_cansleep(grestb, 0);
+ mdelay(1);
+ dev_info(dev, "de-asserted GRESTB\n");
+
+ /* Test display communication */
+ tpg110_write_reg(0x00, 0x55);
+ val = tpg110_read_reg(0x00);
+ if (val == 0x55)
+ dev_info(dev, "passed communication test\n");
+ val = tpg110_read_reg(0x01);
+ dev_info(dev, "TPG110 chip ID: %d version: %d\n",
+ val>>4, val&0x0f);
+
+ /* Show display resolution */
+ val = tpg110_read_reg(0x02);
+ val &= 7;
+ switch (val) {
+ case 0x0:
+ dev_info(dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)");
+ break;
+ case 0x1:
+ dev_info(dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)");
+ break;
+ case 0x4:
+ dev_info(dev, "480x640 RGB");
+ break;
+ case 0x5:
+ dev_info(dev, "480x272 RGB");
+ break;
+ case 0x6:
+ dev_info(dev, "640x480 RGB");
+ break;
+ case 0x7:
+ dev_info(dev, "800x480 RGB");
+ break;
+ default:
+ dev_info(dev, "ILLEGAL RESOLUTION");
+ break;
+ }
+
+ val = tpg110_read_reg(0x03);
+ dev_info(dev, "resolution is controlled by %s\n",
+ (val & BIT(7)) ? "software" : "hardware");
+}
+
+static void tpg110_enable(struct clcd_fb *fb)
+{
+ struct device *dev = &fb->dev->dev;
+ static bool startup;
+ u8 val;
+
+ if (!startup) {
+ tpg110_startup(dev);
+ startup = true;
+ }
+
+ /* Take chip out of standby */
+ val = tpg110_read_reg(0x03);
+ val |= BIT(0);
+ tpg110_write_reg(0x03, val);
+}
+
+static void tpg110_disable(struct clcd_fb *fb)
+{
+ u8 val;
+
+ dev_info(&fb->dev->dev, "TPG110 display disable\n");
+ val = tpg110_read_reg(0x03);
+ /* Put into standby */
+ val &= ~BIT(0);
+ tpg110_write_reg(0x03, val);
+}
+
+static void tpg110_init(struct device *dev, struct device_node *np,
+ struct clcd_board *board)
+{
+ dev_info(dev, "TPG110 display init\n");
+
+ grestb = devm_get_gpiod_from_child(dev, "grestb", &np->fwnode);
+ if (IS_ERR(grestb)) {
+ dev_err(dev, "no GRESTB GPIO\n");
+ return;
+ }
+ /* This asserts the GRESTB signal, putting the display into reset */
+ gpiod_direction_output(grestb, 1);
+
+ scen = devm_get_gpiod_from_child(dev, "scen", &np->fwnode);
+ if (IS_ERR(scen)) {
+ dev_err(dev, "no SCEN GPIO\n");
+ return;
+ }
+ gpiod_direction_output(scen, 0);
+ scl = devm_get_gpiod_from_child(dev, "scl", &np->fwnode);
+ if (IS_ERR(scl)) {
+ dev_err(dev, "no SCL GPIO\n");
+ return;
+ }
+ gpiod_direction_output(scl, 0);
+ sda = devm_get_gpiod_from_child(dev, "sda", &np->fwnode);
+ if (IS_ERR(sda)) {
+ dev_err(dev, "no SDA GPIO\n");
+ return;
+ }
+ gpiod_direction_output(sda, 0);
+ board->enable = tpg110_enable;
+ board->disable = tpg110_disable;
+}
+
+int nomadik_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint)
+{
+ struct device_node *panel;
+
+ panel = of_graph_get_remote_port_parent(endpoint);
+ if (!panel)
+ return -ENODEV;
+
+ if (of_device_is_compatible(panel, "tpo,tpg110"))
+ tpg110_init(&fb->dev->dev, panel, fb->board);
+ else
+ dev_info(&fb->dev->dev, "unknown panel\n");
+
+ /* Unknown panel, fall through */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nomadik_clcd_init_panel);
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+int nomadik_clcd_init_board(struct amba_device *adev,
+ struct clcd_board *board)
+{
+ struct regmap *pmu_regmap;
+
+ dev_info(&adev->dev, "Nomadik CLCD board init\n");
+ pmu_regmap =
+ syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+ if (IS_ERR(pmu_regmap)) {
+ dev_err(&adev->dev, "could not find PMU syscon regmap\n");
+ return PTR_ERR(pmu_regmap);
+ }
+ regmap_update_bits(pmu_regmap,
+ PMU_CTRL_OFFSET,
+ PMU_CTRL_LCDNDIF,
+ 0);
+ dev_info(&adev->dev, "set PMU mux to CLCD mode\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nomadik_clcd_init_board);
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h
new file mode 100644
index 000000000000..50aa9bda69fd
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-nomadik.h
@@ -0,0 +1,24 @@
+#ifndef _AMBA_CLCD_NOMADIK_H
+#define _AMBA_CLCD_NOMADIK_H
+
+#include <linux/amba/bus.h>
+
+#ifdef CONFIG_ARCH_NOMADIK
+int nomadik_clcd_init_board(struct amba_device *adev,
+ struct clcd_board *board);
+int nomadik_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint);
+#else
+static inline int nomadik_clcd_init_board(struct amba_device *adev,
+ struct clcd_board *board)
+{
+ return 0;
+}
+static inline int nomadik_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint)
+{
+ return 0;
+}
+#endif
+
+#endif /* inclusion guard */
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index a8a22daa3f9d..e5d9bfc1703a 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -3,6 +3,12 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <linux/platform_data/video-clcd-versatile.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include "amba-clcd-versatile.h"
static struct clcd_panel vga = {
.mode = {
@@ -178,3 +184,392 @@ void versatile_clcd_remove_dma(struct clcd_fb *fb)
dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
fb->fb.fix.smem_start);
}
+
+#ifdef CONFIG_OF
+
+static struct regmap *versatile_syscon_map;
+static struct regmap *versatile_ib2_map;
+
+/*
+ * We detect the different syscon types from the compatible strings.
+ */
+enum versatile_clcd {
+ INTEGRATOR_CLCD_CM,
+ VERSATILE_CLCD,
+ REALVIEW_CLCD_EB,
+ REALVIEW_CLCD_PB1176,
+ REALVIEW_CLCD_PB11MP,
+ REALVIEW_CLCD_PBA8,
+ REALVIEW_CLCD_PBX,
+};
+
+static const struct of_device_id versatile_clcd_of_match[] = {
+ {
+ .compatible = "arm,core-module-integrator",
+ .data = (void *)INTEGRATOR_CLCD_CM,
+ },
+ {
+ .compatible = "arm,versatile-sysreg",
+ .data = (void *)VERSATILE_CLCD,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_CLCD_EB,
+ },
+ {
+ .compatible = "arm,realview-pb1176-syscon",
+ .data = (void *)REALVIEW_CLCD_PB1176,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_CLCD_PB11MP,
+ },
+ {
+ .compatible = "arm,realview-pba8-syscon",
+ .data = (void *)REALVIEW_CLCD_PBA8,
+ },
+ {
+ .compatible = "arm,realview-pbx-syscon",
+ .data = (void *)REALVIEW_CLCD_PBX,
+ },
+ {},
+};
+
+/*
+ * Core module CLCD control on the Integrator/CP, bits
+ * 8 thru 19 of the CM_CONTROL register controls a bunch
+ * of CLCD settings.
+ */
+#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
+#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
+#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
+#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
+/* Bits 11,12,13 controls the LCD type */
+#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
+#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
+#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
+#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
+#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
+#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
+/* R/L flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
+/* U/D flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
+/* No connection on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
+/* 0 = 24bit VGA, 1 = 18bit VGA */
+#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
+
+#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
+ INTEGRATOR_CLCD_LCDBIASUP | \
+ INTEGRATOR_CLCD_LCDBIASDN | \
+ INTEGRATOR_CLCD_LCDMUX_MASK | \
+ INTEGRATOR_CLCD_LCD0_EN | \
+ INTEGRATOR_CLCD_LCD1_EN | \
+ INTEGRATOR_CLCD_LCD_STATIC1 | \
+ INTEGRATOR_CLCD_LCD_STATIC2 | \
+ INTEGRATOR_CLCD_LCD_STATIC | \
+ INTEGRATOR_CLCD_LCD_N24BITEN)
+
+static void integrator_clcd_enable(struct clcd_fb *fb)
+{
+ struct fb_var_screeninfo *var = &fb->fb.var;
+ u32 val;
+
+ dev_info(&fb->dev->dev, "enable Integrator CLCD connectors\n");
+
+ /* FIXME: really needed? */
+ val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
+ INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
+ if (var->bits_per_pixel <= 8 ||
+ (var->bits_per_pixel == 16 && var->green.length == 5))
+ /* Pseudocolor, RGB555, BGR555 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
+ else if (fb->fb.var.bits_per_pixel <= 16)
+ /* truecolor RGB565 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
+ else
+ val = 0; /* no idea for this, don't trust the docs */
+
+ regmap_update_bits(versatile_syscon_map,
+ INTEGRATOR_HDR_CTRL_OFFSET,
+ INTEGRATOR_CLCD_MASK,
+ val);
+}
+
+/*
+ * This configuration register in the Versatile and RealView
+ * family is uniformly present but appears more and more
+ * unutilized starting with the RealView series.
+ */
+#define SYS_CLCD 0x50
+#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
+#define SYS_CLCD_MODE_888 0
+#define SYS_CLCD_MODE_5551 BIT(0)
+#define SYS_CLCD_MODE_565_R_LSB BIT(1)
+#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
+#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
+#define SYS_CLCD_NLCDIOON BIT(2)
+#define SYS_CLCD_VDDPOSSWITCH BIT(3)
+#define SYS_CLCD_PWR3V5SWITCH BIT(4)
+#define SYS_CLCD_VDDNEGSWITCH BIT(5)
+#define SYS_CLCD_TSNSS BIT(6) /* touchscreen enable */
+#define SYS_CLCD_SSPEXP BIT(7) /* SSP expansion enable */
+
+/* The Versatile can detect the connected panel type */
+#define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12))
+#define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8)
+#define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8)
+#define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8)
+#define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8)
+#define SYS_CLCD_ID_VGA (0x1f << 8)
+
+#define SYS_CLCD_TSNDAV BIT(13) /* data ready from TS */
+
+/* IB2 control register for the Versatile daughterboard */
+#define IB2_CTRL 0x00
+#define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */
+#define IB2_CTRL_LCD_BL_ON BIT(0)
+#define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1))
+
+static void versatile_clcd_disable(struct clcd_fb *fb)
+{
+ dev_info(&fb->dev->dev, "disable Versatile CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+
+ /* If we're on an IB2 daughterboard, turn off display */
+ if (versatile_ib2_map) {
+ dev_info(&fb->dev->dev, "disable IB2 display\n");
+ regmap_update_bits(versatile_ib2_map,
+ IB2_CTRL,
+ IB2_CTRL_LCD_MASK,
+ IB2_CTRL_LCD_SD);
+ }
+}
+
+static void versatile_clcd_enable(struct clcd_fb *fb)
+{
+ struct fb_var_screeninfo *var = &fb->fb.var;
+ u32 val = 0;
+
+ dev_info(&fb->dev->dev, "enable Versatile CLCD connectors\n");
+ switch (var->green.length) {
+ case 5:
+ val |= SYS_CLCD_MODE_5551;
+ break;
+ case 6:
+ if (var->red.offset == 0)
+ val |= SYS_CLCD_MODE_565_R_LSB;
+ else
+ val |= SYS_CLCD_MODE_565_B_LSB;
+ break;
+ case 8:
+ val |= SYS_CLCD_MODE_888;
+ break;
+ }
+
+ /* Set up the MUX */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_MODE_MASK,
+ val);
+
+ /* Then enable the display */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+
+ /* If we're on an IB2 daughterboard, turn on display */
+ if (versatile_ib2_map) {
+ dev_info(&fb->dev->dev, "enable IB2 display\n");
+ regmap_update_bits(versatile_ib2_map,
+ IB2_CTRL,
+ IB2_CTRL_LCD_MASK,
+ IB2_CTRL_LCD_BL_ON);
+ }
+}
+
+static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs)
+{
+ clcdfb_decode(fb, regs);
+
+ /* Always clear BGR for RGB565: we do the routing externally */
+ if (fb->fb.var.green.length == 6)
+ regs->cntl &= ~CNTL_BGR;
+}
+
+static void realview_clcd_disable(struct clcd_fb *fb)
+{
+ dev_info(&fb->dev->dev, "disable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+}
+
+static void realview_clcd_enable(struct clcd_fb *fb)
+{
+ dev_info(&fb->dev->dev, "enable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+struct versatile_panel {
+ u32 id;
+ char *compatible;
+ bool ib2;
+};
+
+static const struct versatile_panel versatile_panels[] = {
+ {
+ .id = SYS_CLCD_ID_VGA,
+ .compatible = "VGA",
+ },
+ {
+ .id = SYS_CLCD_ID_SANYO_3_8,
+ .compatible = "sanyo,tm38qv67a02a",
+ },
+ {
+ .id = SYS_CLCD_ID_SHARP_8_4,
+ .compatible = "sharp,lq084v1dg21",
+ },
+ {
+ .id = SYS_CLCD_ID_EPSON_2_2,
+ .compatible = "epson,l2f50113t00",
+ },
+ {
+ .id = SYS_CLCD_ID_SANYO_2_5,
+ .compatible = "sanyo,alr252rgt",
+ .ib2 = true,
+ },
+};
+
+static void versatile_panel_probe(struct device *dev,
+ struct device_node *endpoint)
+{
+ struct versatile_panel const *vpanel = NULL;
+ struct device_node *panel = NULL;
+ u32 val;
+ int ret;
+ int i;
+
+ /*
+ * The Versatile CLCD has a panel auto-detection mechanism.
+ * We use this and look for the compatible panel in the
+ * device tree.
+ */
+ ret = regmap_read(versatile_syscon_map, SYS_CLCD, &val);
+ if (ret) {
+ dev_err(dev, "cannot read CLCD syscon register\n");
+ return;
+ }
+ val &= SYS_CLCD_CLCDID_MASK;
+
+ /* First find corresponding panel information */
+ for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) {
+ vpanel = &versatile_panels[i];
+
+ if (val == vpanel->id) {
+ dev_err(dev, "autodetected panel \"%s\"\n",
+ vpanel->compatible);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(versatile_panels)) {
+ dev_err(dev, "could not auto-detect panel\n");
+ return;
+ }
+
+ panel = of_graph_get_remote_port_parent(endpoint);
+ if (!panel) {
+ dev_err(dev, "could not locate panel in DT\n");
+ return;
+ }
+ if (!of_device_is_compatible(panel, vpanel->compatible))
+ dev_err(dev, "panel in DT is not compatible with the "
+ "auto-detected panel, continuing anyway\n");
+
+ /*
+ * If we have a Sanyo 2.5" port
+ * that we're running on an IB2 and proceed to look for the
+ * IB2 syscon regmap.
+ */
+ if (!vpanel->ib2)
+ return;
+
+ versatile_ib2_map = syscon_regmap_lookup_by_compatible(
+ "arm,versatile-ib2-syscon");
+ if (IS_ERR(versatile_ib2_map)) {
+ dev_err(dev, "could not locate IB2 control register\n");
+ versatile_ib2_map = NULL;
+ return;
+ }
+}
+
+int versatile_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint)
+{
+ const struct of_device_id *clcd_id;
+ enum versatile_clcd versatile_clcd_type;
+ struct device_node *np;
+ struct regmap *map;
+ struct device *dev = &fb->dev->dev;
+
+ np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
+ &clcd_id);
+ if (!np) {
+ /* Vexpress does not have this */
+ return 0;
+ }
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "no Versatile syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ switch (versatile_clcd_type) {
+ case INTEGRATOR_CLCD_CM:
+ versatile_syscon_map = map;
+ fb->board->enable = integrator_clcd_enable;
+ /* Override the caps, we have only these */
+ fb->board->caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 |
+ CLCD_CAP_888;
+ dev_info(dev, "set up callbacks for Integrator PL110\n");
+ break;
+ case VERSATILE_CLCD:
+ versatile_syscon_map = map;
+ fb->board->enable = versatile_clcd_enable;
+ fb->board->disable = versatile_clcd_disable;
+ fb->board->decode = versatile_clcd_decode;
+ versatile_panel_probe(dev, endpoint);
+ dev_info(dev, "set up callbacks for Versatile\n");
+ break;
+ case REALVIEW_CLCD_EB:
+ case REALVIEW_CLCD_PB1176:
+ case REALVIEW_CLCD_PB11MP:
+ case REALVIEW_CLCD_PBA8:
+ case REALVIEW_CLCD_PBX:
+ versatile_syscon_map = map;
+ fb->board->enable = realview_clcd_enable;
+ fb->board->disable = realview_clcd_disable;
+ dev_info(dev, "set up callbacks for RealView PL111\n");
+ break;
+ default:
+ dev_info(dev, "unknown Versatile system controller\n");
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(versatile_clcd_init_panel);
+#endif
diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h
new file mode 100644
index 000000000000..1b14359c2cf6
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-versatile.h
@@ -0,0 +1,17 @@
+/*
+ * Special local versatile callbacks
+ */
+#include <linux/of.h>
+#include <linux/amba/bus.h>
+#include <linux/platform_data/video-clcd-versatile.h>
+
+#if defined(CONFIG_PLAT_VERSATILE_CLCD) && defined(CONFIG_OF)
+int versatile_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint);
+#else
+static inline int versatile_clcd_init_panel(struct clcd_fb *fb,
+ struct device_node *endpoint)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9b158869cb89..ec2671d98abc 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -30,10 +30,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/backlight.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
+#include "amba-clcd-nomadik.h"
+#include "amba-clcd-versatile.h"
+
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
/* This is limited to 16 characters when displayed by X startup */
@@ -71,6 +75,11 @@ static void clcdfb_disable(struct clcd_fb *fb)
if (fb->board->disable)
fb->board->disable(fb);
+ if (fb->panel->backlight) {
+ fb->panel->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(fb->panel->backlight);
+ }
+
val = readl(fb->regs + fb->off_cntl);
if (val & CNTL_LCDPWR) {
val &= ~CNTL_LCDPWR;
@@ -117,6 +126,14 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
writel(cntl, fb->regs + fb->off_cntl);
/*
+ * Turn on backlight
+ */
+ if (fb->panel->backlight) {
+ fb->panel->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(fb->panel->backlight);
+ }
+
+ /*
* finally, enable the interface.
*/
if (fb->board->enable)
@@ -211,6 +228,15 @@ clcdfb_set_bitfields(struct clcd_fb *fb, struct fb_var_screeninfo *var)
var->blue.length = 4;
}
break;
+ case 24:
+ if (fb->vendor->packed_24_bit_pixels) {
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
case 32:
/* If we can't do 888, reject */
caps &= CLCD_CAP_888;
@@ -297,6 +323,12 @@ static int clcdfb_set_par(struct fb_info *info)
clcdfb_disable(fb);
+ /* Some variants must be clocked here */
+ if (fb->vendor->clock_timregs && !fb->clk_enabled) {
+ fb->clk_enabled = true;
+ clk_enable(fb->clk);
+ }
+
writel(regs.tim0, fb->regs + CLCD_TIM0);
writel(regs.tim1, fb->regs + CLCD_TIM1);
writel(regs.tim2, fb->regs + CLCD_TIM2);
@@ -551,7 +583,7 @@ static int clcdfb_register(struct clcd_fb *fb)
#ifdef CONFIG_OF
static int clcdfb_of_get_dpi_panel_mode(struct device_node *node,
- struct fb_videomode *mode)
+ struct clcd_panel *clcd_panel)
{
int err;
struct display_timing timing;
@@ -563,10 +595,31 @@ static int clcdfb_of_get_dpi_panel_mode(struct device_node *node,
videomode_from_timing(&timing, &video);
- err = fb_videomode_from_videomode(&video, mode);
+ err = fb_videomode_from_videomode(&video, &clcd_panel->mode);
if (err)
return err;
+ /* Set up some inversion flags */
+ if (timing.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ clcd_panel->tim2 |= TIM2_IPC;
+ else if (!(timing.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE))
+ /*
+ * To preserve backwards compatibility, the IPC (inverted
+ * pixel clock) flag needs to be set on any display that
+ * doesn't explicitly specify that the pixel clock is
+ * active on the negative or positive edge.
+ */
+ clcd_panel->tim2 |= TIM2_IPC;
+
+ if (timing.flags & DISPLAY_FLAGS_HSYNC_LOW)
+ clcd_panel->tim2 |= TIM2_IHS;
+
+ if (timing.flags & DISPLAY_FLAGS_VSYNC_LOW)
+ clcd_panel->tim2 |= TIM2_IVS;
+
+ if (timing.flags & DISPLAY_FLAGS_DE_LOW)
+ clcd_panel->tim2 |= TIM2_IOE;
+
return 0;
}
@@ -576,11 +629,34 @@ static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode)
mode->refresh);
}
+static int clcdfb_of_get_backlight(struct device_node *endpoint,
+ struct clcd_panel *clcd_panel)
+{
+ struct device_node *panel;
+ struct device_node *backlight;
+
+ panel = of_graph_get_remote_port_parent(endpoint);
+ if (!panel)
+ return -ENODEV;
+
+ /* Look up the optional backlight phandle */
+ backlight = of_parse_phandle(panel, "backlight", 0);
+ if (backlight) {
+ clcd_panel->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!clcd_panel->backlight)
+ return -EPROBE_DEFER;
+ }
+ return 0;
+}
+
static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
- struct fb_videomode *mode)
+ struct clcd_panel *clcd_panel)
{
int err;
struct device_node *panel;
+ struct fb_videomode *mode;
char *name;
int len;
@@ -590,11 +666,12 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
/* Only directly connected DPI panels supported for now */
if (of_device_is_compatible(panel, "panel-dpi"))
- err = clcdfb_of_get_dpi_panel_mode(panel, mode);
+ err = clcdfb_of_get_dpi_panel_mode(panel, clcd_panel);
else
err = -ENOENT;
if (err)
return err;
+ mode = &clcd_panel->mode;
len = clcdfb_snprintf_mode(NULL, 0, mode);
name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
@@ -616,6 +693,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
} panels[] = {
{ 0x110, 1, 7, 13, CLCD_CAP_5551 },
{ 0x110, 0, 8, 16, CLCD_CAP_888 },
+ { 0x110, 16, 8, 0, CLCD_CAP_888 },
{ 0x111, 4, 14, 20, CLCD_CAP_444 },
{ 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 },
{ 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 |
@@ -625,8 +703,8 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
};
int i;
- /* Bypass pixel clock divider, data output on the falling edge */
- fb->panel->tim2 = TIM2_BCD | TIM2_IPC;
+ /* Bypass pixel clock divider */
+ fb->panel->tim2 |= TIM2_BCD;
/* TFT display, vert. comp. interrupt at the start of the back porch */
fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1);
@@ -643,6 +721,49 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
fb->panel->caps = panels[i].caps;
}
+ /*
+ * If we actually physically connected the R lines to B and
+ * vice versa
+ */
+ if (r0 != 0 && b0 == 0)
+ fb->panel->bgr_connection = true;
+
+ if (fb->panel->caps && fb->vendor->st_bitmux_control) {
+ /*
+ * Set up the special bits for the Nomadik control register
+ * (other platforms tend to do this through an external
+ * register).
+ */
+
+ /* Offset of the highest used color */
+ int maxoff = max3(r0, g0, b0);
+ /* Most significant bit out, highest used bit */
+ int msb = 0;
+
+ if (fb->panel->caps & CLCD_CAP_888) {
+ msb = maxoff + 8 - 1;
+ } else if (fb->panel->caps & CLCD_CAP_565) {
+ msb = maxoff + 5 - 1;
+ fb->panel->cntl |= CNTL_ST_1XBPP_565;
+ } else if (fb->panel->caps & CLCD_CAP_5551) {
+ msb = maxoff + 5 - 1;
+ fb->panel->cntl |= CNTL_ST_1XBPP_5551;
+ } else if (fb->panel->caps & CLCD_CAP_444) {
+ msb = maxoff + 4 - 1;
+ fb->panel->cntl |= CNTL_ST_1XBPP_444;
+ }
+
+ /* Send out as many bits as we need */
+ if (msb > 17)
+ fb->panel->cntl |= CNTL_ST_CDWID_24;
+ else if (msb > 15)
+ fb->panel->cntl |= CNTL_ST_CDWID_18;
+ else if (msb > 11)
+ fb->panel->cntl |= CNTL_ST_CDWID_16;
+ else
+ fb->panel->cntl |= CNTL_ST_CDWID_12;
+ }
+
return fb->panel->caps ? 0 : -EINVAL;
}
@@ -658,11 +779,24 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (!fb->panel)
return -ENOMEM;
+ /*
+ * Fetch the panel endpoint.
+ */
endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL);
if (!endpoint)
return -ENODEV;
- err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, &fb->panel->mode);
+ if (fb->vendor->init_panel) {
+ err = fb->vendor->init_panel(fb, endpoint);
+ if (err)
+ return err;
+ }
+
+ err = clcdfb_of_get_backlight(endpoint, fb->panel);
+ if (err)
+ return err;
+
+ err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, fb->panel);
if (err)
return err;
@@ -693,11 +827,11 @@ static int clcdfb_of_init_display(struct clcd_fb *fb)
if (of_property_read_u32_array(endpoint,
"arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) == 0)
- return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
- tft_r0b0g0[1], tft_r0b0g0[2]);
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) != 0)
+ return -ENOENT;
- return -ENOENT;
+ return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
+ tft_r0b0g0[1], tft_r0b0g0[2]);
}
static int clcdfb_of_vram_setup(struct clcd_fb *fb)
@@ -818,6 +952,7 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev_get_platdata(&dev->dev);
+ struct clcd_vendor_data *vendor = id->data;
struct clcd_fb *fb;
int ret;
@@ -827,6 +962,12 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
if (!board)
return -EINVAL;
+ if (vendor->init_board) {
+ ret = vendor->init_board(dev, board);
+ if (ret)
+ return ret;
+ }
+
ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (ret)
goto out;
@@ -845,17 +986,18 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
}
fb->dev = dev;
+ fb->vendor = vendor;
fb->board = board;
- dev_info(&fb->dev->dev, "PL%03x rev%u at 0x%08llx\n",
- amba_part(dev), amba_rev(dev),
+ dev_info(&fb->dev->dev, "PL%03x designer %02x rev%u at 0x%08llx\n",
+ amba_part(dev), amba_manf(dev), amba_rev(dev),
(unsigned long long)dev->res.start);
ret = fb->board->setup(fb);
if (ret)
goto free_fb;
- ret = clcdfb_register(fb);
+ ret = clcdfb_register(fb);
if (ret == 0) {
amba_set_drvdata(dev, fb);
goto out;
@@ -891,10 +1033,30 @@ static int clcdfb_remove(struct amba_device *dev)
return 0;
}
+static struct clcd_vendor_data vendor_arm = {
+ /* Sets up the versatile board displays */
+ .init_panel = versatile_clcd_init_panel,
+};
+
+static struct clcd_vendor_data vendor_nomadik = {
+ .clock_timregs = true,
+ .packed_24_bit_pixels = true,
+ .st_bitmux_control = true,
+ .init_board = nomadik_clcd_init_board,
+ .init_panel = nomadik_clcd_init_panel,
+};
+
static struct amba_id clcdfb_id_table[] = {
{
.id = 0x00041110,
.mask = 0x000ffffe,
+ .data = &vendor_arm,
+ },
+ /* ST Electronics Nomadik variant */
+ {
+ .id = 0x00180110,
+ .mask = 0x00fffffe,
+ .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index 1b0b233b8b39..1928cb2b5386 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -79,7 +79,7 @@ struct arcfb_par {
spinlock_t lock;
};
-static struct fb_fix_screeninfo arcfb_fix = {
+static const struct fb_fix_screeninfo arcfb_fix = {
.id = "arcfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -89,7 +89,7 @@ static struct fb_fix_screeninfo arcfb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo arcfb_var = {
+static const struct fb_var_screeninfo arcfb_var = {
.xres = 128,
.yres = 64,
.xres_virtual = 128,
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 7e8ddf00ccc2..91eea4583382 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -474,7 +474,7 @@ static void chips_hw_init(struct fb_info *p)
write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
}
-static struct fb_fix_screeninfo asiliantfb_fix = {
+static const struct fb_fix_screeninfo asiliantfb_fix = {
.id = "Asiliant 69000",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -483,7 +483,7 @@ static struct fb_fix_screeninfo asiliantfb_fix = {
.smem_len = 0x200000, /* 2MB */
};
-static struct fb_var_screeninfo asiliantfb_var = {
+static const struct fb_var_screeninfo asiliantfb_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 0a4626886b00..fa07242a78d2 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -93,7 +93,7 @@
#ifndef CONFIG_PPC_PMAC
/* default mode */
-static struct fb_var_screeninfo default_var = {
+static const struct fb_var_screeninfo default_var = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
640, 480, 640, 480, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -104,7 +104,7 @@ static struct fb_var_screeninfo default_var = {
#else /* CONFIG_PPC_PMAC */
/* default to 1024x768 at 75Hz on PPC - this will work
* on the iMac, the usual 640x480 @ 60Hz doesn't. */
-static struct fb_var_screeninfo default_var = {
+static const struct fb_var_screeninfo default_var = {
/* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
1024, 768, 1024, 768, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
@@ -375,7 +375,7 @@ static const struct aty128_meminfo ddr_sgram = {
.name = "64-bit DDR SGRAM",
};
-static struct fb_fix_screeninfo aty128fb_fix = {
+static const struct fb_fix_screeninfo aty128fb_fix = {
.id = "ATY Rage128",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index f34ed47fcaf8..11026e726b68 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -212,7 +212,7 @@ struct pci_mmap_map {
unsigned long prot_mask;
};
-static struct fb_fix_screeninfo atyfb_fix = {
+static const struct fb_fix_screeninfo atyfb_fix = {
.id = "ATY Mach64",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index f1ce229de78d..278b421ab3fe 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -4,7 +4,7 @@
#include "../edid.h"
-static struct fb_var_screeninfo radeonfb_default_var = {
+static const struct fb_var_screeninfo radeonfb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index f9507b1894df..6c2b2ca4a909 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -43,6 +43,7 @@
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1200fb.h> /* platform_data */
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index e2d7d039ce3b..542ffaddc6ab 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -375,7 +375,6 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
{
int ret = 0;
struct proc_dir_entry *entry;
- int num_modes = ARRAY_SIZE(known_modes);
struct adv7393fb_device *fbdev = NULL;
@@ -384,7 +383,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
return -EINVAL;
}
- if (mode > num_modes) {
+ if (mode >= ARRAY_SIZE(known_modes)) {
dev_err(&client->dev, "mode %d: not supported", mode);
return -EFAULT;
}
@@ -797,7 +796,7 @@ static struct i2c_driver bfin_adv7393_fb_driver = {
static int __init bfin_adv7393_fb_driver_init(void)
{
-#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+#if IS_ENABLED(CONFIG_I2C_BLACKFIN_TWI)
request_module("i2c-bfin-twi");
#else
request_module("i2c-gpio");
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6f323e..2d3b691f3fc4 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -63,7 +63,6 @@
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
-#ifdef CONFIG_MIPS_COBALT
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
@@ -83,47 +82,6 @@ static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
-#else
-
-#define LCD_CTL 0x00
-#define LCD_DATA 0x08
-#define CPLD_STATUS 0x10
-#define CPLD_DATA 0x18
-
-static inline void cpld_wait(struct fb_info *info)
-{
- do {
- } while (readl(info->screen_base + CPLD_STATUS) & 1);
-}
-
-static inline void lcd_write_control(struct fb_info *info, u8 control)
-{
- cpld_wait(info);
- writel(control, info->screen_base + LCD_CTL);
-}
-
-static inline u8 lcd_read_control(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_CTL);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-
-static inline void lcd_write_data(struct fb_info *info, u8 data)
-{
- cpld_wait(info);
- writel(data, info->screen_base + LCD_DATA);
-}
-
-static inline u8 lcd_read_data(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_DATA);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-#endif
static int lcd_busy_wait(struct fb_info *info)
{
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 924bad45c176..37a37c4d04cb 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -50,9 +50,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
if (regno < 16) {
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
diff --git a/drivers/video/fbdev/exynos/Kconfig b/drivers/video/fbdev/exynos/Kconfig
deleted file mode 100644
index d916bef94f25..000000000000
--- a/drivers/video/fbdev/exynos/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Exynos Video configuration
-#
-
-menuconfig EXYNOS_VIDEO
- tristate "Exynos Video driver support"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
- help
- This enables support for EXYNOS Video device.
-
-if EXYNOS_VIDEO
-
-#
-# MIPI DSI driver
-#
-
-config EXYNOS_MIPI_DSI
- tristate "EXYNOS MIPI DSI driver support."
- select GENERIC_PHY
- help
- This enables support for MIPI-DSI device.
-
-config EXYNOS_LCD_S6E8AX0
- tristate "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
- depends on (LCD_CLASS_DEVICE = y)
- default n
- help
- If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
- LCD control driver.
-
-endif # EXYNOS_VIDEO
diff --git a/drivers/video/fbdev/exynos/Makefile b/drivers/video/fbdev/exynos/Makefile
deleted file mode 100644
index 02d8dc522fea..000000000000
--- a/drivers/video/fbdev/exynos/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the exynos video drivers.
-#
-
-obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos-mipi-dsi-mod.o
-
-exynos-mipi-dsi-mod-objs += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
- exynos_mipi_dsi_lowlevel.o
-obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
deleted file mode 100644
index 92e4af3caaf8..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
+++ /dev/null
@@ -1,574 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi.c
- *
- * Samsung SoC MIPI-DSIM driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/memory.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kthread.h>
-#include <linux/notifier.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_runtime.h>
-#include <linux/err.h>
-
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_common.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-
-struct mipi_dsim_ddi {
- int bus_id;
- struct list_head list;
- struct mipi_dsim_lcd_device *dsim_lcd_dev;
- struct mipi_dsim_lcd_driver *dsim_lcd_drv;
-};
-
-static LIST_HEAD(dsim_ddi_list);
-
-static DEFINE_MUTEX(mipi_dsim_lock);
-
-static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
- *pdev)
-{
- return pdev->dev.platform_data;
-}
-
-static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd11", },
- { .supply = "vdd18", },
-};
-
-static int exynos_mipi_regulator_enable(struct mipi_dsim_device *dsim)
-{
- int ret;
-
- mutex_lock(&dsim->lock);
- ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
- mutex_unlock(&dsim->lock);
-
- return ret;
-}
-
-static int exynos_mipi_regulator_disable(struct mipi_dsim_device *dsim)
-{
- int ret;
-
- mutex_lock(&dsim->lock);
- ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
- mutex_unlock(&dsim->lock);
-
- return ret;
-}
-
-/* update all register settings to MIPI DSI controller. */
-static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
-{
- /*
- * data from Display controller(FIMD) is not transferred in video mode
- * but in case of command mode, all settings is not updated to
- * registers.
- */
- exynos_mipi_dsi_stand_by(dsim, 0);
-
- exynos_mipi_dsi_init_dsim(dsim);
- exynos_mipi_dsi_init_link(dsim);
-
- exynos_mipi_dsi_set_hs_enable(dsim);
-
- /* set display timing. */
- exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
-
- exynos_mipi_dsi_init_interrupt(dsim);
-
- /*
- * data from Display controller(FIMD) is transferred in video mode
- * but in case of command mode, all settings are updated to registers.
- */
- exynos_mipi_dsi_stand_by(dsim, 1);
-}
-
-static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim,
- int power)
-{
- struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
- struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
- switch (power) {
- case FB_BLANK_POWERDOWN:
- if (dsim->suspended)
- return 0;
-
- if (client_drv && client_drv->suspend)
- client_drv->suspend(client_dev);
-
- clk_disable(dsim->clock);
-
- exynos_mipi_regulator_disable(dsim);
-
- dsim->suspended = true;
-
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power)
-{
- struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
- struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
- switch (power) {
- case FB_BLANK_UNBLANK:
- if (!dsim->suspended)
- return 0;
-
- /* lcd panel power on. */
- if (client_drv && client_drv->power_on)
- client_drv->power_on(client_dev, 1);
-
- exynos_mipi_regulator_enable(dsim);
-
- /* enable MIPI-DSI PHY. */
- phy_power_on(dsim->phy);
-
- clk_enable(dsim->clock);
-
- exynos_mipi_update_cfg(dsim);
-
- /* set lcd panel sequence commands. */
- if (client_drv && client_drv->set_sequence)
- client_drv->set_sequence(client_dev);
-
- dsim->suspended = false;
-
- break;
- case FB_BLANK_NORMAL:
- /* TODO. */
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device *lcd_dev)
-{
- struct mipi_dsim_ddi *dsim_ddi;
-
- if (!lcd_dev->name) {
- pr_err("dsim_lcd_device name is NULL.\n");
- return -EFAULT;
- }
-
- dsim_ddi = kzalloc(sizeof(struct mipi_dsim_ddi), GFP_KERNEL);
- if (!dsim_ddi) {
- pr_err("failed to allocate dsim_ddi object.\n");
- return -ENOMEM;
- }
-
- dsim_ddi->dsim_lcd_dev = lcd_dev;
-
- mutex_lock(&mipi_dsim_lock);
- list_add_tail(&dsim_ddi->list, &dsim_ddi_list);
- mutex_unlock(&mipi_dsim_lock);
-
- return 0;
-}
-
-static struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device(
- struct mipi_dsim_lcd_driver *lcd_drv)
-{
- struct mipi_dsim_ddi *dsim_ddi, *next;
- struct mipi_dsim_lcd_device *lcd_dev;
-
- mutex_lock(&mipi_dsim_lock);
-
- list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
- if (!dsim_ddi)
- goto out;
-
- lcd_dev = dsim_ddi->dsim_lcd_dev;
- if (!lcd_dev)
- continue;
-
- if ((strcmp(lcd_drv->name, lcd_dev->name)) == 0) {
- /**
- * bus_id would be used to identify
- * connected bus.
- */
- dsim_ddi->bus_id = lcd_dev->bus_id;
- mutex_unlock(&mipi_dsim_lock);
-
- return dsim_ddi;
- }
-
- list_del(&dsim_ddi->list);
- kfree(dsim_ddi);
- }
-
-out:
- mutex_unlock(&mipi_dsim_lock);
-
- return NULL;
-}
-
-int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv)
-{
- struct mipi_dsim_ddi *dsim_ddi;
-
- if (!lcd_drv->name) {
- pr_err("dsim_lcd_driver name is NULL.\n");
- return -EFAULT;
- }
-
- dsim_ddi = exynos_mipi_dsi_find_lcd_device(lcd_drv);
- if (!dsim_ddi) {
- pr_err("mipi_dsim_ddi object not found.\n");
- return -EFAULT;
- }
-
- dsim_ddi->dsim_lcd_drv = lcd_drv;
-
- pr_info("registered panel driver(%s) to mipi-dsi driver.\n",
- lcd_drv->name);
-
- return 0;
-
-}
-EXPORT_SYMBOL_GPL(exynos_mipi_dsi_register_lcd_driver);
-
-static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi(
- struct mipi_dsim_device *dsim,
- const char *name)
-{
- struct mipi_dsim_ddi *dsim_ddi, *next;
- struct mipi_dsim_lcd_driver *lcd_drv;
- struct mipi_dsim_lcd_device *lcd_dev;
- int ret;
-
- mutex_lock(&dsim->lock);
-
- list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
- lcd_drv = dsim_ddi->dsim_lcd_drv;
- lcd_dev = dsim_ddi->dsim_lcd_dev;
- if (!lcd_drv || !lcd_dev ||
- (dsim->id != dsim_ddi->bus_id))
- continue;
-
- dev_dbg(dsim->dev, "lcd_drv->id = %d, lcd_dev->id = %d\n",
- lcd_drv->id, lcd_dev->id);
- dev_dbg(dsim->dev, "lcd_dev->bus_id = %d, dsim->id = %d\n",
- lcd_dev->bus_id, dsim->id);
-
- if ((strcmp(lcd_drv->name, name) == 0)) {
- lcd_dev->master = dsim;
-
- lcd_dev->dev.parent = dsim->dev;
- dev_set_name(&lcd_dev->dev, "%s", lcd_drv->name);
-
- ret = device_register(&lcd_dev->dev);
- if (ret < 0) {
- dev_err(dsim->dev,
- "can't register %s, status %d\n",
- dev_name(&lcd_dev->dev), ret);
- mutex_unlock(&dsim->lock);
-
- return NULL;
- }
-
- dsim->dsim_lcd_dev = lcd_dev;
- dsim->dsim_lcd_drv = lcd_drv;
-
- mutex_unlock(&dsim->lock);
-
- return dsim_ddi;
- }
- }
-
- mutex_unlock(&dsim->lock);
-
- return NULL;
-}
-
-/* define MIPI-DSI Master operations. */
-static struct mipi_dsim_master_ops master_ops = {
- .cmd_read = exynos_mipi_dsi_rd_data,
- .cmd_write = exynos_mipi_dsi_wr_data,
- .get_dsim_frame_done = exynos_mipi_dsi_get_frame_done_status,
- .clear_dsim_frame_done = exynos_mipi_dsi_clear_frame_done,
- .set_early_blank_mode = exynos_mipi_dsi_early_blank_mode,
- .set_blank_mode = exynos_mipi_dsi_blank_mode,
-};
-
-static int exynos_mipi_dsi_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct mipi_dsim_device *dsim;
- struct mipi_dsim_config *dsim_config;
- struct mipi_dsim_platform_data *dsim_pd;
- struct mipi_dsim_ddi *dsim_ddi;
- int ret = -EINVAL;
-
- dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device),
- GFP_KERNEL);
- if (!dsim) {
- dev_err(&pdev->dev, "failed to allocate dsim object.\n");
- return -ENOMEM;
- }
-
- dsim->pd = to_dsim_plat(pdev);
- dsim->dev = &pdev->dev;
- dsim->id = pdev->id;
-
- /* get mipi_dsim_platform_data. */
- dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
- if (dsim_pd == NULL) {
- dev_err(&pdev->dev, "failed to get platform data for dsim.\n");
- return -EINVAL;
- }
- /* get mipi_dsim_config. */
- dsim_config = dsim_pd->dsim_config;
- if (dsim_config == NULL) {
- dev_err(&pdev->dev, "failed to get dsim config data.\n");
- return -EINVAL;
- }
-
- dsim->dsim_config = dsim_config;
- dsim->master_ops = &master_ops;
-
- mutex_init(&dsim->lock);
-
- ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies),
- supplies);
- if (ret) {
- dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret);
- return ret;
- }
-
- dsim->phy = devm_phy_get(&pdev->dev, "dsim");
- if (IS_ERR(dsim->phy))
- return PTR_ERR(dsim->phy);
-
- dsim->clock = devm_clk_get(&pdev->dev, "dsim0");
- if (IS_ERR(dsim->clock)) {
- dev_err(&pdev->dev, "failed to get dsim clock source\n");
- return -ENODEV;
- }
-
- clk_enable(dsim->clock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dsim->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dsim->reg_base)) {
- ret = PTR_ERR(dsim->reg_base);
- goto error;
- }
-
- mutex_init(&dsim->lock);
-
- /* bind lcd ddi matched with panel name. */
- dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name);
- if (!dsim_ddi) {
- dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n");
- ret = -EINVAL;
- goto error;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request dsim irq resource\n");
- goto error;
- }
- dsim->irq = ret;
-
- init_completion(&dsim_wr_comp);
- init_completion(&dsim_rd_comp);
- platform_set_drvdata(pdev, dsim);
-
- ret = devm_request_irq(&pdev->dev, dsim->irq,
- exynos_mipi_dsi_interrupt_handler,
- IRQF_SHARED, dev_name(&pdev->dev), dsim);
- if (ret != 0) {
- dev_err(&pdev->dev, "failed to request dsim irq\n");
- ret = -EINVAL;
- goto error;
- }
-
- /* enable interrupts */
- exynos_mipi_dsi_init_interrupt(dsim);
-
- /* initialize mipi-dsi client(lcd panel). */
- if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
- dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
-
- /* in case mipi-dsi has been enabled by bootloader */
- if (dsim_pd->enabled) {
- exynos_mipi_regulator_enable(dsim);
- goto done;
- }
-
- /* lcd panel power on. */
- if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
- dsim_ddi->dsim_lcd_drv->power_on(dsim_ddi->dsim_lcd_dev, 1);
-
- exynos_mipi_regulator_enable(dsim);
-
- /* enable MIPI-DSI PHY. */
- phy_power_on(dsim->phy);
-
- exynos_mipi_update_cfg(dsim);
-
- /* set lcd panel sequence commands. */
- if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->set_sequence)
- dsim_ddi->dsim_lcd_drv->set_sequence(dsim_ddi->dsim_lcd_dev);
-
- dsim->suspended = false;
-
-done:
- platform_set_drvdata(pdev, dsim);
-
- dev_dbg(&pdev->dev, "%s() completed successfully (%s mode)\n", __func__,
- dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
-
- return 0;
-
-error:
- clk_disable(dsim->clock);
- return ret;
-}
-
-static int exynos_mipi_dsi_remove(struct platform_device *pdev)
-{
- struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
- struct mipi_dsim_ddi *dsim_ddi, *next;
- struct mipi_dsim_lcd_driver *dsim_lcd_drv;
-
- clk_disable(dsim->clock);
-
- list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) {
- if (dsim_ddi) {
- if (dsim->id != dsim_ddi->bus_id)
- continue;
-
- dsim_lcd_drv = dsim_ddi->dsim_lcd_drv;
-
- if (dsim_lcd_drv->remove)
- dsim_lcd_drv->remove(dsim_ddi->dsim_lcd_dev);
-
- kfree(dsim_ddi);
- }
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mipi_dsi_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
- struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
- struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
- disable_irq(dsim->irq);
-
- if (dsim->suspended)
- return 0;
-
- if (client_drv && client_drv->suspend)
- client_drv->suspend(client_dev);
-
- /* disable MIPI-DSI PHY. */
- phy_power_off(dsim->phy);
-
- clk_disable(dsim->clock);
-
- exynos_mipi_regulator_disable(dsim);
-
- dsim->suspended = true;
-
- return 0;
-}
-
-static int exynos_mipi_dsi_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
- struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
- struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
-
- enable_irq(dsim->irq);
-
- if (!dsim->suspended)
- return 0;
-
- /* lcd panel power on. */
- if (client_drv && client_drv->power_on)
- client_drv->power_on(client_dev, 1);
-
- exynos_mipi_regulator_enable(dsim);
-
- /* enable MIPI-DSI PHY. */
- phy_power_on(dsim->phy);
-
- clk_enable(dsim->clock);
-
- exynos_mipi_update_cfg(dsim);
-
- /* set lcd panel sequence commands. */
- if (client_drv && client_drv->set_sequence)
- client_drv->set_sequence(client_dev);
-
- dsim->suspended = false;
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
-};
-
-static struct platform_driver exynos_mipi_dsi_driver = {
- .probe = exynos_mipi_dsi_probe,
- .remove = exynos_mipi_dsi_remove,
- .driver = {
- .name = "exynos-mipi-dsim",
- .pm = &exynos_mipi_dsi_pm_ops,
- },
-};
-
-module_platform_driver(exynos_mipi_dsi_driver);
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
deleted file mode 100644
index 2358a2fbbbcd..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
+++ /dev/null
@@ -1,880 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_common.c
- *
- * Samsung SoC MIPI-DSI common driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/memory.h>
-#include <linux/delay.h>
-#include <linux/irqreturn.h>
-#include <linux/kthread.h>
-
-#include <video/mipi_display.h>
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_regs.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-#include "exynos_mipi_dsi_common.h"
-
-#define MIPI_FIFO_TIMEOUT msecs_to_jiffies(250)
-#define MIPI_RX_FIFO_READ_DONE 0x30800002
-#define MIPI_MAX_RX_FIFO 20
-#define MHZ (1000 * 1000)
-#define FIN_HZ (24 * MHZ)
-
-#define DFIN_PLL_MIN_HZ (6 * MHZ)
-#define DFIN_PLL_MAX_HZ (12 * MHZ)
-
-#define DFVCO_MIN_HZ (500 * MHZ)
-#define DFVCO_MAX_HZ (1000 * MHZ)
-
-#define TRY_GET_FIFO_TIMEOUT (5000 * 2)
-#define TRY_FIFO_CLEAR (10)
-
-/* MIPI-DSIM status types. */
-enum {
- DSIM_STATE_INIT, /* should be initialized. */
- DSIM_STATE_STOP, /* CPU and LCDC are LP mode. */
- DSIM_STATE_HSCLKEN, /* HS clock was enabled. */
- DSIM_STATE_ULPS
-};
-
-/* define DSI lane types. */
-enum {
- DSIM_LANE_CLOCK = (1 << 0),
- DSIM_LANE_DATA0 = (1 << 1),
- DSIM_LANE_DATA1 = (1 << 2),
- DSIM_LANE_DATA2 = (1 << 3),
- DSIM_LANE_DATA3 = (1 << 4)
-};
-
-static unsigned int dpll_table[15] = {
- 100, 120, 170, 220, 270,
- 320, 390, 450, 510, 560,
- 640, 690, 770, 870, 950
-};
-
-irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
-{
- struct mipi_dsim_device *dsim = dev_id;
- unsigned int intsrc, intmsk;
-
- intsrc = exynos_mipi_dsi_read_interrupt(dsim);
- intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
- intmsk = ~intmsk & intsrc;
-
- if (intsrc & INTMSK_RX_DONE) {
- complete(&dsim_rd_comp);
- dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
- }
- if (intsrc & INTMSK_FIFO_EMPTY) {
- complete(&dsim_wr_comp);
- dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
- }
-
- exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
-
- return IRQ_HANDLED;
-}
-
-/*
- * write long packet to mipi dsi slave
- * @dsim: mipi dsim device structure.
- * @data0: packet data to send.
- * @data1: size of packet data
- */
-static void exynos_mipi_dsi_long_data_wr(struct mipi_dsim_device *dsim,
- const unsigned char *data0, unsigned int data_size)
-{
- unsigned int data_cnt = 0, payload = 0;
-
- /* in case that data count is more then 4 */
- for (data_cnt = 0; data_cnt < data_size; data_cnt += 4) {
- /*
- * after sending 4bytes per one time,
- * send remainder data less then 4.
- */
- if ((data_size - data_cnt) < 4) {
- if ((data_size - data_cnt) == 3) {
- payload = data0[data_cnt] |
- data0[data_cnt + 1] << 8 |
- data0[data_cnt + 2] << 16;
- dev_dbg(dsim->dev, "count = 3 payload = %x, %x %x %x\n",
- payload, data0[data_cnt],
- data0[data_cnt + 1],
- data0[data_cnt + 2]);
- } else if ((data_size - data_cnt) == 2) {
- payload = data0[data_cnt] |
- data0[data_cnt + 1] << 8;
- dev_dbg(dsim->dev,
- "count = 2 payload = %x, %x %x\n", payload,
- data0[data_cnt],
- data0[data_cnt + 1]);
- } else if ((data_size - data_cnt) == 1) {
- payload = data0[data_cnt];
- }
-
- exynos_mipi_dsi_wr_tx_data(dsim, payload);
- /* send 4bytes per one time. */
- } else {
- payload = data0[data_cnt] |
- data0[data_cnt + 1] << 8 |
- data0[data_cnt + 2] << 16 |
- data0[data_cnt + 3] << 24;
-
- dev_dbg(dsim->dev,
- "count = 4 payload = %x, %x %x %x %x\n",
- payload, *(u8 *)(data0 + data_cnt),
- data0[data_cnt + 1],
- data0[data_cnt + 2],
- data0[data_cnt + 3]);
-
- exynos_mipi_dsi_wr_tx_data(dsim, payload);
- }
- }
-}
-
-int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
- const unsigned char *data0, unsigned int data_size)
-{
- unsigned int check_rx_ack = 0;
-
- if (dsim->state == DSIM_STATE_ULPS) {
- dev_err(dsim->dev, "state is ULPS.\n");
-
- return -EINVAL;
- }
-
- /* FIXME!!! why does it need this delay? */
- msleep(20);
-
- mutex_lock(&dsim->lock);
-
- switch (data_id) {
- /* short packet types of packet types for command. */
- case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
- exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]);
- if (check_rx_ack) {
- /* process response func should be implemented */
- mutex_unlock(&dsim->lock);
- return 0;
- } else {
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
-
- /* general command */
- case MIPI_DSI_COLOR_MODE_OFF:
- case MIPI_DSI_COLOR_MODE_ON:
- case MIPI_DSI_SHUTDOWN_PERIPHERAL:
- case MIPI_DSI_TURN_ON_PERIPHERAL:
- exynos_mipi_dsi_wr_tx_header(dsim, data_id, data0[0], data0[1]);
- if (check_rx_ack) {
- /* process response func should be implemented. */
- mutex_unlock(&dsim->lock);
- return 0;
- } else {
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
-
- /* packet types for video data */
- case MIPI_DSI_V_SYNC_START:
- case MIPI_DSI_V_SYNC_END:
- case MIPI_DSI_H_SYNC_START:
- case MIPI_DSI_H_SYNC_END:
- case MIPI_DSI_END_OF_TRANSMISSION:
- mutex_unlock(&dsim->lock);
- return 0;
-
- /* long packet type and null packet */
- case MIPI_DSI_NULL_PACKET:
- case MIPI_DSI_BLANKING_PACKET:
- mutex_unlock(&dsim->lock);
- return 0;
- case MIPI_DSI_GENERIC_LONG_WRITE:
- case MIPI_DSI_DCS_LONG_WRITE:
- {
- unsigned int size, payload = 0;
- reinit_completion(&dsim_wr_comp);
-
- size = data_size * 4;
-
- /* if data count is less then 4, then send 3bytes data. */
- if (data_size < 4) {
- payload = data0[0] |
- data0[1] << 8 |
- data0[2] << 16;
-
- exynos_mipi_dsi_wr_tx_data(dsim, payload);
-
- dev_dbg(dsim->dev, "count = %d payload = %x,%x %x %x\n",
- data_size, payload, data0[0],
- data0[1], data0[2]);
-
- /* in case that data count is more then 4 */
- } else
- exynos_mipi_dsi_long_data_wr(dsim, data0, data_size);
-
- /* put data into header fifo */
- exynos_mipi_dsi_wr_tx_header(dsim, data_id, data_size & 0xff,
- (data_size & 0xff00) >> 8);
-
- if (!wait_for_completion_interruptible_timeout(&dsim_wr_comp,
- MIPI_FIFO_TIMEOUT)) {
- dev_warn(dsim->dev, "command write timeout.\n");
- mutex_unlock(&dsim->lock);
- return -EAGAIN;
- }
-
- if (check_rx_ack) {
- /* process response func should be implemented. */
- mutex_unlock(&dsim->lock);
- return 0;
- } else {
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
- }
-
- /* packet typo for video data */
- case MIPI_DSI_PACKED_PIXEL_STREAM_16:
- case MIPI_DSI_PACKED_PIXEL_STREAM_18:
- case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
- case MIPI_DSI_PACKED_PIXEL_STREAM_24:
- if (check_rx_ack) {
- /* process response func should be implemented. */
- mutex_unlock(&dsim->lock);
- return 0;
- } else {
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
- default:
- dev_warn(dsim->dev,
- "data id %x is not supported current DSI spec.\n",
- data_id);
-
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
-}
-
-static unsigned int exynos_mipi_dsi_long_data_rd(struct mipi_dsim_device *dsim,
- unsigned int req_size, unsigned int rx_data, u8 *rx_buf)
-{
- unsigned int rcv_pkt, i, j;
- u16 rxsize;
-
- /* for long packet */
- rxsize = (u16)((rx_data & 0x00ffff00) >> 8);
- dev_dbg(dsim->dev, "mipi dsi rx size : %d\n", rxsize);
- if (rxsize != req_size) {
- dev_dbg(dsim->dev,
- "received size mismatch received: %d, requested: %d\n",
- rxsize, req_size);
- goto err;
- }
-
- for (i = 0; i < (rxsize >> 2); i++) {
- rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
- dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt);
- for (j = 0; j < 4; j++) {
- rx_buf[(i * 4) + j] =
- (u8)(rcv_pkt >> (j * 8)) & 0xff;
- dev_dbg(dsim->dev, "received value : %02x\n",
- (rcv_pkt >> (j * 8)) & 0xff);
- }
- }
- if (rxsize % 4) {
- rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
- dev_dbg(dsim->dev, "received pkt : %08x\n", rcv_pkt);
- for (j = 0; j < (rxsize % 4); j++) {
- rx_buf[(i * 4) + j] =
- (u8)(rcv_pkt >> (j * 8)) & 0xff;
- dev_dbg(dsim->dev, "received value : %02x\n",
- (rcv_pkt >> (j * 8)) & 0xff);
- }
- }
-
- return rxsize;
-
-err:
- return -EINVAL;
-}
-
-static unsigned int exynos_mipi_dsi_response_size(unsigned int req_size)
-{
- switch (req_size) {
- case 1:
- return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE;
- case 2:
- return MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE;
- default:
- return MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE;
- }
-}
-
-int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
- unsigned int data0, unsigned int req_size, u8 *rx_buf)
-{
- unsigned int rx_data, rcv_pkt, i;
- u8 response = 0;
- u16 rxsize;
-
- if (dsim->state == DSIM_STATE_ULPS) {
- dev_err(dsim->dev, "state is ULPS.\n");
-
- return -EINVAL;
- }
-
- /* FIXME!!! */
- msleep(20);
-
- mutex_lock(&dsim->lock);
- reinit_completion(&dsim_rd_comp);
- exynos_mipi_dsi_rd_tx_header(dsim,
- MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, req_size);
-
- response = exynos_mipi_dsi_response_size(req_size);
-
- switch (data_id) {
- case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
- case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- case MIPI_DSI_DCS_READ:
- exynos_mipi_dsi_rd_tx_header(dsim,
- data_id, data0);
- /* process response func should be implemented. */
- break;
- default:
- dev_warn(dsim->dev,
- "data id %x is not supported current DSI spec.\n",
- data_id);
-
- mutex_unlock(&dsim->lock);
- return -EINVAL;
- }
-
- if (!wait_for_completion_interruptible_timeout(&dsim_rd_comp,
- MIPI_FIFO_TIMEOUT)) {
- pr_err("RX done interrupt timeout\n");
- mutex_unlock(&dsim->lock);
- return 0;
- }
-
- msleep(20);
-
- rx_data = exynos_mipi_dsi_rd_rx_fifo(dsim);
-
- if ((u8)(rx_data & 0xff) != response) {
- printk(KERN_ERR
- "mipi dsi wrong response rx_data : %x, response:%x\n",
- rx_data, response);
- goto clear_rx_fifo;
- }
-
- if (req_size <= 2) {
- /* for short packet */
- for (i = 0; i < req_size; i++)
- rx_buf[i] = (rx_data >> (8 + (i * 8))) & 0xff;
- rxsize = req_size;
- } else {
- /* for long packet */
- rxsize = exynos_mipi_dsi_long_data_rd(dsim, req_size, rx_data,
- rx_buf);
- if (rxsize != req_size)
- goto clear_rx_fifo;
- }
-
- rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
-
- msleep(20);
-
- if (rcv_pkt != MIPI_RX_FIFO_READ_DONE) {
- dev_info(dsim->dev,
- "Can't found RX FIFO READ DONE FLAG : %x\n", rcv_pkt);
- goto clear_rx_fifo;
- }
-
- mutex_unlock(&dsim->lock);
-
- return rxsize;
-
-clear_rx_fifo:
- i = 0;
- while (1) {
- rcv_pkt = exynos_mipi_dsi_rd_rx_fifo(dsim);
- if ((rcv_pkt == MIPI_RX_FIFO_READ_DONE)
- || (i > MIPI_MAX_RX_FIFO))
- break;
- dev_dbg(dsim->dev,
- "mipi dsi clear rx fifo : %08x\n", rcv_pkt);
- i++;
- }
- dev_info(dsim->dev,
- "mipi dsi rx done count : %d, rcv_pkt : %08x\n", i, rcv_pkt);
-
- mutex_unlock(&dsim->lock);
-
- return 0;
-}
-
-static int exynos_mipi_dsi_pll_on(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- int sw_timeout;
-
- if (enable) {
- sw_timeout = 1000;
-
- exynos_mipi_dsi_enable_pll(dsim, 1);
- while (1) {
- sw_timeout--;
- if (exynos_mipi_dsi_is_pll_stable(dsim))
- return 0;
- if (sw_timeout == 0)
- return -EINVAL;
- }
- } else
- exynos_mipi_dsi_enable_pll(dsim, 0);
-
- return 0;
-}
-
-static unsigned long exynos_mipi_dsi_change_pll(struct mipi_dsim_device *dsim,
- unsigned int pre_divider, unsigned int main_divider,
- unsigned int scaler)
-{
- unsigned long dfin_pll, dfvco, dpll_out;
- unsigned int i, freq_band = 0xf;
-
- dfin_pll = (FIN_HZ / pre_divider);
-
- /******************************************************
- * Serial Clock(=ByteClk X 8) FreqBand[3:0] *
- ******************************************************
- * ~ 99.99 MHz 0000
- * 100 ~ 119.99 MHz 0001
- * 120 ~ 159.99 MHz 0010
- * 160 ~ 199.99 MHz 0011
- * 200 ~ 239.99 MHz 0100
- * 140 ~ 319.99 MHz 0101
- * 320 ~ 389.99 MHz 0110
- * 390 ~ 449.99 MHz 0111
- * 450 ~ 509.99 MHz 1000
- * 510 ~ 559.99 MHz 1001
- * 560 ~ 639.99 MHz 1010
- * 640 ~ 689.99 MHz 1011
- * 690 ~ 769.99 MHz 1100
- * 770 ~ 869.99 MHz 1101
- * 870 ~ 949.99 MHz 1110
- * 950 ~ 1000 MHz 1111
- ******************************************************/
- if (dfin_pll < DFIN_PLL_MIN_HZ || dfin_pll > DFIN_PLL_MAX_HZ) {
- dev_warn(dsim->dev, "fin_pll range should be 6MHz ~ 12MHz\n");
- exynos_mipi_dsi_enable_afc(dsim, 0, 0);
- } else {
- if (dfin_pll < 7 * MHZ)
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x1);
- else if (dfin_pll < 8 * MHZ)
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x0);
- else if (dfin_pll < 9 * MHZ)
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x3);
- else if (dfin_pll < 10 * MHZ)
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x2);
- else if (dfin_pll < 11 * MHZ)
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x5);
- else
- exynos_mipi_dsi_enable_afc(dsim, 1, 0x4);
- }
-
- dfvco = dfin_pll * main_divider;
- dev_dbg(dsim->dev, "dfvco = %lu, dfin_pll = %lu, main_divider = %d\n",
- dfvco, dfin_pll, main_divider);
- if (dfvco < DFVCO_MIN_HZ || dfvco > DFVCO_MAX_HZ)
- dev_warn(dsim->dev, "fvco range should be 500MHz ~ 1000MHz\n");
-
- dpll_out = dfvco / (1 << scaler);
- dev_dbg(dsim->dev, "dpll_out = %lu, dfvco = %lu, scaler = %d\n",
- dpll_out, dfvco, scaler);
-
- for (i = 0; i < ARRAY_SIZE(dpll_table); i++) {
- if (dpll_out < dpll_table[i] * MHZ) {
- freq_band = i;
- break;
- }
- }
-
- dev_dbg(dsim->dev, "freq_band = %d\n", freq_band);
-
- exynos_mipi_dsi_pll_freq(dsim, pre_divider, main_divider, scaler);
-
- exynos_mipi_dsi_hs_zero_ctrl(dsim, 0);
- exynos_mipi_dsi_prep_ctrl(dsim, 0);
-
- /* Freq Band */
- exynos_mipi_dsi_pll_freq_band(dsim, freq_band);
-
- /* Stable time */
- exynos_mipi_dsi_pll_stable_time(dsim, dsim->dsim_config->pll_stable_time);
-
- /* Enable PLL */
- dev_dbg(dsim->dev, "FOUT of mipi dphy pll is %luMHz\n",
- (dpll_out / MHZ));
-
- return dpll_out;
-}
-
-static int exynos_mipi_dsi_set_clock(struct mipi_dsim_device *dsim,
- unsigned int byte_clk_sel, unsigned int enable)
-{
- unsigned int esc_div;
- unsigned long esc_clk_error_rate;
- unsigned long hs_clk = 0, byte_clk = 0, escape_clk = 0;
-
- if (enable) {
- dsim->e_clk_src = byte_clk_sel;
-
- /* Escape mode clock and byte clock source */
- exynos_mipi_dsi_set_byte_clock_src(dsim, byte_clk_sel);
-
- /* DPHY, DSIM Link : D-PHY clock out */
- if (byte_clk_sel == DSIM_PLL_OUT_DIV8) {
- hs_clk = exynos_mipi_dsi_change_pll(dsim,
- dsim->dsim_config->p, dsim->dsim_config->m,
- dsim->dsim_config->s);
- if (hs_clk == 0) {
- dev_err(dsim->dev,
- "failed to get hs clock.\n");
- return -EINVAL;
- }
-
- byte_clk = hs_clk / 8;
- exynos_mipi_dsi_enable_pll_bypass(dsim, 0);
- exynos_mipi_dsi_pll_on(dsim, 1);
- /* DPHY : D-PHY clock out, DSIM link : external clock out */
- } else if (byte_clk_sel == DSIM_EXT_CLK_DIV8) {
- dev_warn(dsim->dev, "this project is not support\n");
- dev_warn(dsim->dev,
- "external clock source for MIPI DSIM.\n");
- } else if (byte_clk_sel == DSIM_EXT_CLK_BYPASS) {
- dev_warn(dsim->dev, "this project is not support\n");
- dev_warn(dsim->dev,
- "external clock source for MIPI DSIM\n");
- }
-
- /* escape clock divider */
- esc_div = byte_clk / (dsim->dsim_config->esc_clk);
- dev_dbg(dsim->dev,
- "esc_div = %d, byte_clk = %lu, esc_clk = %lu\n",
- esc_div, byte_clk, dsim->dsim_config->esc_clk);
- if ((byte_clk / esc_div) >= (20 * MHZ) ||
- (byte_clk / esc_div) >
- dsim->dsim_config->esc_clk)
- esc_div += 1;
-
- escape_clk = byte_clk / esc_div;
- dev_dbg(dsim->dev,
- "escape_clk = %lu, byte_clk = %lu, esc_div = %d\n",
- escape_clk, byte_clk, esc_div);
-
- /* enable escape clock. */
- exynos_mipi_dsi_enable_byte_clock(dsim, 1);
-
- /* enable byte clk and escape clock */
- exynos_mipi_dsi_set_esc_clk_prs(dsim, 1, esc_div);
- /* escape clock on lane */
- exynos_mipi_dsi_enable_esc_clk_on_lane(dsim,
- (DSIM_LANE_CLOCK | dsim->data_lane), 1);
-
- dev_dbg(dsim->dev, "byte clock is %luMHz\n",
- (byte_clk / MHZ));
- dev_dbg(dsim->dev, "escape clock that user's need is %lu\n",
- (dsim->dsim_config->esc_clk / MHZ));
- dev_dbg(dsim->dev, "escape clock divider is %x\n", esc_div);
- dev_dbg(dsim->dev, "escape clock is %luMHz\n",
- ((byte_clk / esc_div) / MHZ));
-
- if ((byte_clk / esc_div) > escape_clk) {
- esc_clk_error_rate = escape_clk /
- (byte_clk / esc_div);
- dev_warn(dsim->dev, "error rate is %lu over.\n",
- (esc_clk_error_rate / 100));
- } else if ((byte_clk / esc_div) < (escape_clk)) {
- esc_clk_error_rate = (byte_clk / esc_div) /
- escape_clk;
- dev_warn(dsim->dev, "error rate is %lu under.\n",
- (esc_clk_error_rate / 100));
- }
- } else {
- exynos_mipi_dsi_enable_esc_clk_on_lane(dsim,
- (DSIM_LANE_CLOCK | dsim->data_lane), 0);
- exynos_mipi_dsi_set_esc_clk_prs(dsim, 0, 0);
-
- /* disable escape clock. */
- exynos_mipi_dsi_enable_byte_clock(dsim, 0);
-
- if (byte_clk_sel == DSIM_PLL_OUT_DIV8)
- exynos_mipi_dsi_pll_on(dsim, 0);
- }
-
- return 0;
-}
-
-int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim)
-{
- dsim->state = DSIM_STATE_INIT;
-
- switch (dsim->dsim_config->e_no_data_lane) {
- case DSIM_DATA_LANE_1:
- dsim->data_lane = DSIM_LANE_DATA0;
- break;
- case DSIM_DATA_LANE_2:
- dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1;
- break;
- case DSIM_DATA_LANE_3:
- dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 |
- DSIM_LANE_DATA2;
- break;
- case DSIM_DATA_LANE_4:
- dsim->data_lane = DSIM_LANE_DATA0 | DSIM_LANE_DATA1 |
- DSIM_LANE_DATA2 | DSIM_LANE_DATA3;
- break;
- default:
- dev_info(dsim->dev, "data lane is invalid.\n");
- return -EINVAL;
- }
-
- exynos_mipi_dsi_sw_reset(dsim);
- exynos_mipi_dsi_func_reset(dsim);
-
- exynos_mipi_dsi_dp_dn_swap(dsim, 0);
-
- return 0;
-}
-
-void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim)
-{
- unsigned int src = 0;
-
- src = (INTSRC_SFR_FIFO_EMPTY | INTSRC_RX_DATA_DONE);
- exynos_mipi_dsi_set_interrupt(dsim, src, 1);
-
- src = 0;
- src = ~(INTMSK_RX_DONE | INTMSK_FIFO_EMPTY);
- exynos_mipi_dsi_set_interrupt_mask(dsim, src, 1);
-}
-
-int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- /* enable only frame done interrupt */
- exynos_mipi_dsi_set_interrupt_mask(dsim, INTMSK_FRAME_DONE, enable);
-
- return 0;
-}
-
-void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
-
- /* consider Main display and Sub display. */
-
- exynos_mipi_dsi_set_main_stand_by(dsim, enable);
-}
-
-int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
- struct mipi_dsim_config *dsim_config)
-{
- struct mipi_dsim_platform_data *dsim_pd;
- struct fb_videomode *timing;
-
- dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd;
- timing = (struct fb_videomode *)dsim_pd->lcd_panel_info;
-
- /* in case of VIDEO MODE (RGB INTERFACE), it sets polarities. */
- if (dsim_config->e_interface == (u32) DSIM_VIDEO) {
- if (dsim_config->auto_vertical_cnt == 0) {
- exynos_mipi_dsi_set_main_disp_vporch(dsim,
- dsim_config->cmd_allow,
- timing->lower_margin,
- timing->upper_margin);
- exynos_mipi_dsi_set_main_disp_hporch(dsim,
- timing->right_margin,
- timing->left_margin);
- exynos_mipi_dsi_set_main_disp_sync_area(dsim,
- timing->vsync_len,
- timing->hsync_len);
- }
- }
-
- exynos_mipi_dsi_set_main_disp_resol(dsim, timing->xres,
- timing->yres);
-
- exynos_mipi_dsi_display_config(dsim, dsim_config);
-
- dev_info(dsim->dev, "lcd panel ==> width = %d, height = %d\n",
- timing->xres, timing->yres);
-
- return 0;
-}
-
-int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim)
-{
- unsigned int time_out = 100;
-
- switch (dsim->state) {
- case DSIM_STATE_INIT:
- exynos_mipi_dsi_init_fifo_pointer(dsim, 0x1f);
-
- /* dsi configuration */
- exynos_mipi_dsi_init_config(dsim);
- exynos_mipi_dsi_enable_lane(dsim, DSIM_LANE_CLOCK, 1);
- exynos_mipi_dsi_enable_lane(dsim, dsim->data_lane, 1);
-
- /* set clock configuration */
- exynos_mipi_dsi_set_clock(dsim, dsim->dsim_config->e_byte_clk, 1);
-
- /* check clock and data lane state are stop state */
- while (!(exynos_mipi_dsi_is_lane_state(dsim))) {
- time_out--;
- if (time_out == 0) {
- dev_err(dsim->dev,
- "DSI Master is not stop state.\n");
- dev_err(dsim->dev,
- "Check initialization process\n");
-
- return -EINVAL;
- }
- }
- if (time_out != 0) {
- dev_info(dsim->dev,
- "DSI Master driver has been completed.\n");
- dev_info(dsim->dev, "DSI Master state is stop state\n");
- }
-
- dsim->state = DSIM_STATE_STOP;
-
- /* BTA sequence counters */
- exynos_mipi_dsi_set_stop_state_counter(dsim,
- dsim->dsim_config->stop_holding_cnt);
- exynos_mipi_dsi_set_bta_timeout(dsim,
- dsim->dsim_config->bta_timeout);
- exynos_mipi_dsi_set_lpdr_timeout(dsim,
- dsim->dsim_config->rx_timeout);
-
- return 0;
- default:
- dev_info(dsim->dev, "DSI Master is already init.\n");
- return 0;
- }
-
- return 0;
-}
-
-int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim)
-{
- if (dsim->state != DSIM_STATE_STOP) {
- dev_warn(dsim->dev, "DSIM is not in stop state.\n");
- return 0;
- }
-
- if (dsim->e_clk_src == DSIM_EXT_CLK_BYPASS) {
- dev_warn(dsim->dev, "clock source is external bypass.\n");
- return 0;
- }
-
- dsim->state = DSIM_STATE_HSCLKEN;
-
- /* set LCDC and CPU transfer mode to HS. */
- exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0);
- exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0);
- exynos_mipi_dsi_enable_hs_clock(dsim, 1);
-
- return 0;
-}
-
-int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int mode)
-{
- if (mode) {
- if (dsim->state != DSIM_STATE_HSCLKEN) {
- dev_err(dsim->dev, "HS Clock lane is not enabled.\n");
- return -EINVAL;
- }
-
- exynos_mipi_dsi_set_lcdc_transfer_mode(dsim, 0);
- } else {
- if (dsim->state == DSIM_STATE_INIT || dsim->state ==
- DSIM_STATE_ULPS) {
- dev_err(dsim->dev,
- "DSI Master is not STOP or HSDT state.\n");
- return -EINVAL;
- }
-
- exynos_mipi_dsi_set_cpu_transfer_mode(dsim, 0);
- }
-
- return 0;
-}
-
-int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim)
-{
- return _exynos_mipi_dsi_get_frame_done_status(dsim);
-}
-
-int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim)
-{
- _exynos_mipi_dsi_clear_frame_done(dsim);
-
- return 0;
-}
-
-int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
- unsigned int val)
-{
- int try = TRY_FIFO_CLEAR;
-
- exynos_mipi_dsi_sw_reset_release(dsim);
- exynos_mipi_dsi_func_reset(dsim);
-
- do {
- if (exynos_mipi_dsi_get_sw_reset_release(dsim)) {
- exynos_mipi_dsi_init_interrupt(dsim);
- dev_dbg(dsim->dev, "reset release done.\n");
- return 0;
- }
- } while (--try);
-
- dev_err(dsim->dev, "failed to clear dsim fifo.\n");
- return -EAGAIN;
-}
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
deleted file mode 100644
index 412552274df3..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* linux/drivers/video/exynos_mipi_dsi_common.h
- *
- * Header file for Samsung SoC MIPI-DSI common driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_COMMON_H
-#define _EXYNOS_MIPI_DSI_COMMON_H
-
-static DECLARE_COMPLETION(dsim_rd_comp);
-static DECLARE_COMPLETION(dsim_wr_comp);
-
-int exynos_mipi_dsi_wr_data(struct mipi_dsim_device *dsim, unsigned int data_id,
- const unsigned char *data0, unsigned int data_size);
-int exynos_mipi_dsi_rd_data(struct mipi_dsim_device *dsim, unsigned int data_id,
- unsigned int data0, unsigned int req_size, u8 *rx_buf);
-irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id);
-void exynos_mipi_dsi_init_interrupt(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_init_dsim(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_stand_by(struct mipi_dsim_device *dsim,
- unsigned int enable);
-int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
- struct mipi_dsim_config *dsim_info);
-int exynos_mipi_dsi_init_link(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_set_hs_enable(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_set_data_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int mode);
-int exynos_mipi_dsi_enable_frame_done_int(struct mipi_dsim_device *dsim,
- unsigned int enable);
-int exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim);
-
-extern struct fb_info *registered_fb[FB_MAX] __read_mostly;
-
-int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
- unsigned int val);
-
-#endif /* _EXYNOS_MIPI_DSI_COMMON_H */
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
deleted file mode 100644
index c148d06540c1..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
+++ /dev/null
@@ -1,618 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
- *
- * Samsung SoC MIPI-DSI lowlevel driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/ctype.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <video/exynos_mipi_dsim.h>
-
-#include "exynos_mipi_dsi_regs.h"
-#include "exynos_mipi_dsi_lowlevel.h"
-
-void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST);
-
- reg |= DSIM_FUNCRST;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST);
-}
-
-void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_SWRST);
-
- reg |= DSIM_SWRST;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_SWRST);
-}
-
-void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
- reg |= INTSRC_SW_RST_RELEASE;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim)
-{
- return (readl(dsim->reg_base + EXYNOS_DSIM_INTSRC)) &
- INTSRC_SW_RST_RELEASE;
-}
-
-unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_INTMSK);
-
- return reg;
-}
-
-void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim,
- unsigned int mode, unsigned int mask)
-{
- unsigned int reg = 0;
-
- if (mask)
- reg |= mode;
- else
- reg &= ~mode;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_INTMSK);
-}
-
-void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim,
- unsigned int cfg)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
-
- writel(reg & ~(cfg), dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
- mdelay(10);
- reg |= cfg;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_FIFOCTRL);
-}
-
-/*
- * this function set PLL P, M and S value in D-PHY
- */
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
- unsigned int value)
-{
- writel(DSIM_AFC_CTL(value), dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-}
-
-void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-
- reg &= ~DSIM_MAIN_STAND_BY;
-
- if (enable)
- reg |= DSIM_MAIN_STAND_BY;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-}
-
-void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim,
- unsigned int width_resol, unsigned int height_resol)
-{
- unsigned int reg;
-
- /* standby should be set after configuration so set to not ready*/
- reg = (readl(dsim->reg_base + EXYNOS_DSIM_MDRESOL)) &
- ~(DSIM_MAIN_STAND_BY);
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-
- reg &= ~((0x7ff << 16) | (0x7ff << 0));
- reg |= DSIM_MAIN_VRESOL(height_resol) | DSIM_MAIN_HRESOL(width_resol);
-
- reg |= DSIM_MAIN_STAND_BY;
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MDRESOL);
-}
-
-void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim,
- unsigned int cmd_allow, unsigned int vfront, unsigned int vback)
-{
- unsigned int reg;
-
- reg = (readl(dsim->reg_base + EXYNOS_DSIM_MVPORCH)) &
- ~((DSIM_CMD_ALLOW_MASK) | (DSIM_STABLE_VFP_MASK) |
- (DSIM_MAIN_VBP_MASK));
-
- reg |= (DSIM_CMD_ALLOW_SHIFT(cmd_allow & 0xf) |
- DSIM_STABLE_VFP_SHIFT(vfront & 0x7ff) |
- DSIM_MAIN_VBP_SHIFT(vback & 0x7ff));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MVPORCH);
-}
-
-void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim,
- unsigned int front, unsigned int back)
-{
- unsigned int reg;
-
- reg = (readl(dsim->reg_base + EXYNOS_DSIM_MHPORCH)) &
- ~((DSIM_MAIN_HFP_MASK) | (DSIM_MAIN_HBP_MASK));
-
- reg |= DSIM_MAIN_HFP_SHIFT(front) | DSIM_MAIN_HBP_SHIFT(back);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MHPORCH);
-}
-
-void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim,
- unsigned int vert, unsigned int hori)
-{
- unsigned int reg;
-
- reg = (readl(dsim->reg_base + EXYNOS_DSIM_MSYNC)) &
- ~((DSIM_MAIN_VSA_MASK) | (DSIM_MAIN_HSA_MASK));
-
- reg |= (DSIM_MAIN_VSA_SHIFT(vert & 0x3ff) |
- DSIM_MAIN_HSA_SHIFT(hori));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_MSYNC);
-}
-
-void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim,
- unsigned int vert, unsigned int hori)
-{
- unsigned int reg;
-
- reg = (readl(dsim->reg_base + EXYNOS_DSIM_SDRESOL)) &
- ~(DSIM_SUB_STANDY_MASK);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-
- reg &= ~(DSIM_SUB_VRESOL_MASK) | ~(DSIM_SUB_HRESOL_MASK);
- reg |= (DSIM_SUB_VRESOL_SHIFT(vert & 0x7ff) |
- DSIM_SUB_HRESOL_SHIFT(hori & 0x7ff));
- writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-
- reg |= DSIM_SUB_STANDY_SHIFT(1);
- writel(reg, dsim->reg_base + EXYNOS_DSIM_SDRESOL);
-}
-
-void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim)
-{
- struct mipi_dsim_config *dsim_config = dsim->dsim_config;
-
- unsigned int cfg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) &
- ~((1 << 28) | (0x1f << 20) | (0x3 << 5));
-
- cfg = ((DSIM_AUTO_FLUSH(dsim_config->auto_flush)) |
- (DSIM_EOT_DISABLE(dsim_config->eot_disable)) |
- (DSIM_AUTO_MODE_SHIFT(dsim_config->auto_vertical_cnt)) |
- (DSIM_HSE_MODE_SHIFT(dsim_config->hse)) |
- (DSIM_HFP_MODE_SHIFT(dsim_config->hfp)) |
- (DSIM_HBP_MODE_SHIFT(dsim_config->hbp)) |
- (DSIM_HSA_MODE_SHIFT(dsim_config->hsa)) |
- (DSIM_NUM_OF_DATALANE_SHIFT(dsim_config->e_no_data_lane)));
-
- writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim,
- struct mipi_dsim_config *dsim_config)
-{
- u32 reg = (readl(dsim->reg_base + EXYNOS_DSIM_CONFIG)) &
- ~((0x3 << 26) | (1 << 25) | (0x3 << 18) | (0x7 << 12) |
- (0x3 << 16) | (0x7 << 8));
-
- if (dsim_config->e_interface == DSIM_VIDEO)
- reg |= (1 << 25);
- else if (dsim_config->e_interface == DSIM_COMMAND)
- reg &= ~(1 << 25);
- else {
- dev_err(dsim->dev, "unknown lcd type.\n");
- return;
- }
-
- /* main lcd */
- reg |= ((u8) (dsim_config->e_burst_mode) & 0x3) << 26 |
- ((u8) (dsim_config->e_virtual_ch) & 0x3) << 18 |
- ((u8) (dsim_config->e_pixel_format) & 0x7) << 12;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane,
- unsigned int enable)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_CONFIG);
-
- if (enable)
- reg |= DSIM_LANE_ENx(lane);
- else
- reg &= ~DSIM_LANE_ENx(lane);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
- unsigned int count)
-{
- unsigned int cfg;
-
- /* get the data lane number. */
- cfg = DSIM_NUM_OF_DATALANE_SHIFT(count);
-
- writel(cfg, dsim->reg_base + EXYNOS_DSIM_CONFIG);
-}
-
-void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable,
- unsigned int afc_code)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-
- if (enable) {
- reg |= (1 << 14);
- reg &= ~(0x7 << 5);
- reg |= (afc_code & 0x7) << 5;
- } else
- reg &= ~(1 << 14);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR);
-}
-
-void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
- ~(DSIM_PLL_BYPASS_SHIFT(0x1));
-
- reg |= DSIM_PLL_BYPASS_SHIFT(enable);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p,
- unsigned int m, unsigned int s)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-
- reg |= ((p & 0x3f) << 13) | ((m & 0x1ff) << 4) | ((s & 0x7) << 1);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim,
- unsigned int freq_band)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
- ~(DSIM_FREQ_BAND_SHIFT(0x1f));
-
- reg |= DSIM_FREQ_BAND_SHIFT(freq_band & 0x1f);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim,
- unsigned int pre_divider, unsigned int main_divider,
- unsigned int scaler)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
- ~(0x7ffff << 1);
-
- reg |= (pre_divider & 0x3f) << 13 | (main_divider & 0x1ff) << 4 |
- (scaler & 0x7) << 1;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim,
- unsigned int lock_time)
-{
- writel(lock_time, dsim->reg_base + EXYNOS_DSIM_PLLTMR);
-}
-
-void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim, unsigned int enable)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
- ~(DSIM_PLL_EN_SHIFT(0x1));
-
- reg |= DSIM_PLL_EN_SHIFT(enable & 0x1);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim,
- unsigned int src)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
- ~(DSIM_BYTE_CLK_SRC_SHIFT(0x3));
-
- reg |= (DSIM_BYTE_CLK_SRC_SHIFT(src));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
- ~(DSIM_BYTE_CLKEN_SHIFT(0x1));
-
- reg |= DSIM_BYTE_CLKEN_SHIFT(enable);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim,
- unsigned int enable, unsigned int prs_val)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
- ~(DSIM_ESC_CLKEN_SHIFT(0x1) | 0xffff);
-
- reg |= DSIM_ESC_CLKEN_SHIFT(enable);
- if (enable)
- reg |= prs_val;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim,
- unsigned int lane_sel, unsigned int enable)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-
- if (enable)
- reg |= DSIM_LANE_ESC_CLKEN(lane_sel);
- else
-
- reg &= ~DSIM_LANE_ESC_CLKEN(lane_sel);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) &
- ~(DSIM_FORCE_STOP_STATE_SHIFT(0x1));
-
- reg |= (DSIM_FORCE_STOP_STATE_SHIFT(enable & 0x1));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS);
-
- /**
- * check clock and data lane states.
- * if MIPI-DSI controller was enabled at bootloader then
- * TX_READY_HS_CLK is enabled otherwise STOP_STATE_CLK.
- * so it should be checked for two case.
- */
- if ((reg & DSIM_STOP_STATE_DAT(0xf)) &&
- ((reg & DSIM_STOP_STATE_CLK) ||
- (reg & DSIM_TX_READY_HS_CLK)))
- return 1;
-
- return 0;
-}
-
-void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim,
- unsigned int cnt_val)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE)) &
- ~(DSIM_STOP_STATE_CNT_SHIFT(0x7ff));
-
- reg |= (DSIM_STOP_STATE_CNT_SHIFT(cnt_val & 0x7ff));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim,
- unsigned int timeout)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) &
- ~(DSIM_BTA_TOUT_SHIFT(0xff));
-
- reg |= (DSIM_BTA_TOUT_SHIFT(timeout));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT);
-}
-
-void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim,
- unsigned int timeout)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_TIMEOUT)) &
- ~(DSIM_LPDR_TOUT_SHIFT(0xffff));
-
- reg |= (DSIM_LPDR_TOUT_SHIFT(timeout));
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_TIMEOUT);
-}
-
-void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int lp)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-
- reg &= ~DSIM_CMD_LPDT_LP;
-
- if (lp)
- reg |= DSIM_CMD_LPDT_LP;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int lp)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-
- reg &= ~DSIM_TX_LPDT_LP;
-
- if (lp)
- reg |= DSIM_TX_LPDT_LP;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_ESCMODE);
-}
-
-void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim,
- unsigned int enable)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_CLKCTRL)) &
- ~(DSIM_TX_REQUEST_HSCLK_SHIFT(0x1));
-
- reg |= DSIM_TX_REQUEST_HSCLK_SHIFT(enable);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_CLKCTRL);
-}
-
-void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim,
- unsigned int swap_en)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_PHYACCHR1);
-
- reg &= ~(0x3 << 0);
- reg |= (swap_en & 0x3) << 0;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PHYACCHR1);
-}
-
-void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim,
- unsigned int hs_zero)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
- ~(0xf << 28);
-
- reg |= ((hs_zero & 0xf) << 28);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep)
-{
- unsigned int reg = (readl(dsim->reg_base + EXYNOS_DSIM_PLLCTRL)) &
- ~(0x7 << 20);
-
- reg |= ((prep & 0x7) << 20);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PLLCTRL);
-}
-
-unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim)
-{
- return readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim,
- unsigned int src)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
- reg |= src;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim,
- unsigned int src, unsigned int enable)
-{
- unsigned int reg = 0;
-
- if (enable)
- reg |= src;
- else
- reg &= ~src;
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_INTSRC);
-}
-
-unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim)
-{
- unsigned int reg;
-
- reg = readl(dsim->reg_base + EXYNOS_DSIM_STATUS);
-
- return reg & (1 << 31) ? 1 : 0;
-}
-
-unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim)
-{
- return readl(dsim->reg_base + EXYNOS_DSIM_FIFOCTRL) & ~(0x1f);
-}
-
-void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim,
- unsigned int di, unsigned int data0, unsigned int data1)
-{
- unsigned int reg = (data1 << 16) | (data0 << 8) | ((di & 0x3f) << 0);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR);
-}
-
-void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim,
- unsigned int di, unsigned int data0)
-{
- unsigned int reg = (data0 << 8) | (di << 0);
-
- writel(reg, dsim->reg_base + EXYNOS_DSIM_PKTHDR);
-}
-
-unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim)
-{
- return readl(dsim->reg_base + EXYNOS_DSIM_RXFIFO);
-}
-
-unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
- return (reg & INTSRC_FRAME_DONE) ? 1 : 0;
-}
-
-void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim)
-{
- unsigned int reg = readl(dsim->reg_base + EXYNOS_DSIM_INTSRC);
-
- writel(reg | INTSRC_FRAME_DONE, dsim->reg_base +
- EXYNOS_DSIM_INTSRC);
-}
-
-void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim,
- unsigned int tx_data)
-{
- writel(tx_data, dsim->reg_base + EXYNOS_DSIM_PAYLOAD);
-}
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
deleted file mode 100644
index 85460701c7ea..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/* linux/drivers/video/exynos/exynos_mipi_dsi_lowlevel.h
- *
- * Header file for Samsung SoC MIPI-DSI lowlevel driver.
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_LOWLEVEL_H
-#define _EXYNOS_MIPI_DSI_LOWLEVEL_H
-
-void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_sw_reset(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_sw_reset_release(struct mipi_dsim_device *dsim);
-int exynos_mipi_dsi_get_sw_reset_release(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_set_interrupt_mask(struct mipi_dsim_device *dsim,
- unsigned int mode, unsigned int mask);
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
- unsigned int count);
-void exynos_mipi_dsi_init_fifo_pointer(struct mipi_dsim_device *dsim,
- unsigned int cfg);
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
- unsigned int value);
-void exynos_mipi_dsi_set_phy_tunning(struct mipi_dsim_device *dsim,
- unsigned int value);
-void exynos_mipi_dsi_set_main_stand_by(struct mipi_dsim_device *dsim,
- unsigned int enable);
-void exynos_mipi_dsi_set_main_disp_resol(struct mipi_dsim_device *dsim,
- unsigned int width_resol, unsigned int height_resol);
-void exynos_mipi_dsi_set_main_disp_vporch(struct mipi_dsim_device *dsim,
- unsigned int cmd_allow, unsigned int vfront, unsigned int vback);
-void exynos_mipi_dsi_set_main_disp_hporch(struct mipi_dsim_device *dsim,
- unsigned int front, unsigned int back);
-void exynos_mipi_dsi_set_main_disp_sync_area(struct mipi_dsim_device *dsim,
- unsigned int vert, unsigned int hori);
-void exynos_mipi_dsi_set_sub_disp_resol(struct mipi_dsim_device *dsim,
- unsigned int vert, unsigned int hori);
-void exynos_mipi_dsi_init_config(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_display_config(struct mipi_dsim_device *dsim,
- struct mipi_dsim_config *dsim_config);
-void exynos_mipi_dsi_set_data_lane_number(struct mipi_dsim_device *dsim,
- unsigned int count);
-void exynos_mipi_dsi_enable_lane(struct mipi_dsim_device *dsim, unsigned int lane,
- unsigned int enable);
-void exynos_mipi_dsi_enable_afc(struct mipi_dsim_device *dsim, unsigned int enable,
- unsigned int afc_code);
-void exynos_mipi_dsi_enable_pll_bypass(struct mipi_dsim_device *dsim,
- unsigned int enable);
-void exynos_mipi_dsi_set_pll_pms(struct mipi_dsim_device *dsim, unsigned int p,
- unsigned int m, unsigned int s);
-void exynos_mipi_dsi_pll_freq_band(struct mipi_dsim_device *dsim,
- unsigned int freq_band);
-void exynos_mipi_dsi_pll_freq(struct mipi_dsim_device *dsim,
- unsigned int pre_divider, unsigned int main_divider,
- unsigned int scaler);
-void exynos_mipi_dsi_pll_stable_time(struct mipi_dsim_device *dsim,
- unsigned int lock_time);
-void exynos_mipi_dsi_enable_pll(struct mipi_dsim_device *dsim,
- unsigned int enable);
-void exynos_mipi_dsi_set_byte_clock_src(struct mipi_dsim_device *dsim,
- unsigned int src);
-void exynos_mipi_dsi_enable_byte_clock(struct mipi_dsim_device *dsim,
- unsigned int enable);
-void exynos_mipi_dsi_set_esc_clk_prs(struct mipi_dsim_device *dsim,
- unsigned int enable, unsigned int prs_val);
-void exynos_mipi_dsi_enable_esc_clk_on_lane(struct mipi_dsim_device *dsim,
- unsigned int lane_sel, unsigned int enable);
-void exynos_mipi_dsi_force_dphy_stop_state(struct mipi_dsim_device *dsim,
- unsigned int enable);
-unsigned int exynos_mipi_dsi_is_lane_state(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_set_stop_state_counter(struct mipi_dsim_device *dsim,
- unsigned int cnt_val);
-void exynos_mipi_dsi_set_bta_timeout(struct mipi_dsim_device *dsim,
- unsigned int timeout);
-void exynos_mipi_dsi_set_lpdr_timeout(struct mipi_dsim_device *dsim,
- unsigned int timeout);
-void exynos_mipi_dsi_set_lcdc_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int lp);
-void exynos_mipi_dsi_set_cpu_transfer_mode(struct mipi_dsim_device *dsim,
- unsigned int lp);
-void exynos_mipi_dsi_enable_hs_clock(struct mipi_dsim_device *dsim,
- unsigned int enable);
-void exynos_mipi_dsi_dp_dn_swap(struct mipi_dsim_device *dsim,
- unsigned int swap_en);
-void exynos_mipi_dsi_hs_zero_ctrl(struct mipi_dsim_device *dsim,
- unsigned int hs_zero);
-void exynos_mipi_dsi_prep_ctrl(struct mipi_dsim_device *dsim, unsigned int prep);
-unsigned int exynos_mipi_dsi_read_interrupt(struct mipi_dsim_device *dsim);
-unsigned int exynos_mipi_dsi_read_interrupt_mask(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_clear_interrupt(struct mipi_dsim_device *dsim,
- unsigned int src);
-void exynos_mipi_dsi_set_interrupt(struct mipi_dsim_device *dsim,
- unsigned int src, unsigned int enable);
-unsigned int exynos_mipi_dsi_is_pll_stable(struct mipi_dsim_device *dsim);
-unsigned int exynos_mipi_dsi_get_fifo_state(struct mipi_dsim_device *dsim);
-unsigned int _exynos_mipi_dsi_get_frame_done_status(struct mipi_dsim_device *dsim);
-void _exynos_mipi_dsi_clear_frame_done(struct mipi_dsim_device *dsim);
-void exynos_mipi_dsi_wr_tx_header(struct mipi_dsim_device *dsim, unsigned int di,
- unsigned int data0, unsigned int data1);
-void exynos_mipi_dsi_wr_tx_data(struct mipi_dsim_device *dsim,
- unsigned int tx_data);
-void exynos_mipi_dsi_rd_tx_header(struct mipi_dsim_device *dsim,
- unsigned int data0, unsigned int data1);
-unsigned int exynos_mipi_dsi_rd_rx_fifo(struct mipi_dsim_device *dsim);
-
-#endif /* _EXYNOS_MIPI_DSI_LOWLEVEL_H */
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
deleted file mode 100644
index 4227106d3fd0..000000000000
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* linux/driver/video/exynos/exynos_mipi_dsi_regs.h
- *
- * Register definition file for Samsung MIPI-DSIM driver
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- *
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _EXYNOS_MIPI_DSI_REGS_H
-#define _EXYNOS_MIPI_DSI_REGS_H
-
-#define EXYNOS_DSIM_STATUS 0x0 /* Status register */
-#define EXYNOS_DSIM_SWRST 0x4 /* Software reset register */
-#define EXYNOS_DSIM_CLKCTRL 0x8 /* Clock control register */
-#define EXYNOS_DSIM_TIMEOUT 0xc /* Time out register */
-#define EXYNOS_DSIM_CONFIG 0x10 /* Configuration register */
-#define EXYNOS_DSIM_ESCMODE 0x14 /* Escape mode register */
-
-/* Main display image resolution register */
-#define EXYNOS_DSIM_MDRESOL 0x18
-#define EXYNOS_DSIM_MVPORCH 0x1c /* Main display Vporch register */
-#define EXYNOS_DSIM_MHPORCH 0x20 /* Main display Hporch register */
-#define EXYNOS_DSIM_MSYNC 0x24 /* Main display sync area register */
-
-/* Sub display image resolution register */
-#define EXYNOS_DSIM_SDRESOL 0x28
-#define EXYNOS_DSIM_INTSRC 0x2c /* Interrupt source register */
-#define EXYNOS_DSIM_INTMSK 0x30 /* Interrupt mask register */
-#define EXYNOS_DSIM_PKTHDR 0x34 /* Packet Header FIFO register */
-#define EXYNOS_DSIM_PAYLOAD 0x38 /* Payload FIFO register */
-#define EXYNOS_DSIM_RXFIFO 0x3c /* Read FIFO register */
-#define EXYNOS_DSIM_FIFOTHLD 0x40 /* FIFO threshold level register */
-#define EXYNOS_DSIM_FIFOCTRL 0x44 /* FIFO status and control register */
-
-/* FIFO memory AC characteristic register */
-#define EXYNOS_DSIM_PLLCTRL 0x4c /* PLL control register */
-#define EXYNOS_DSIM_PLLTMR 0x50 /* PLL timer register */
-#define EXYNOS_DSIM_PHYACCHR 0x54 /* D-PHY AC characteristic register */
-#define EXYNOS_DSIM_PHYACCHR1 0x58 /* D-PHY AC characteristic register1 */
-
-/* DSIM_STATUS */
-#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
-#define DSIM_STOP_STATE_CLK (1 << 8)
-#define DSIM_TX_READY_HS_CLK (1 << 10)
-
-/* DSIM_SWRST */
-#define DSIM_FUNCRST (1 << 16)
-#define DSIM_SWRST (1 << 0)
-
-/* EXYNOS_DSIM_TIMEOUT */
-#define DSIM_LPDR_TOUT_SHIFT(x) ((x) << 0)
-#define DSIM_BTA_TOUT_SHIFT(x) ((x) << 16)
-
-/* EXYNOS_DSIM_CLKCTRL */
-#define DSIM_LANE_ESC_CLKEN(x) (((x) & 0x1f) << 19)
-#define DSIM_BYTE_CLKEN_SHIFT(x) ((x) << 24)
-#define DSIM_BYTE_CLK_SRC_SHIFT(x) ((x) << 25)
-#define DSIM_PLL_BYPASS_SHIFT(x) ((x) << 27)
-#define DSIM_ESC_CLKEN_SHIFT(x) ((x) << 28)
-#define DSIM_TX_REQUEST_HSCLK_SHIFT(x) ((x) << 31)
-
-/* EXYNOS_DSIM_CONFIG */
-#define DSIM_LANE_ENx(x) (((x) & 0x1f) << 0)
-#define DSIM_NUM_OF_DATALANE_SHIFT(x) ((x) << 5)
-#define DSIM_HSA_MODE_SHIFT(x) ((x) << 20)
-#define DSIM_HBP_MODE_SHIFT(x) ((x) << 21)
-#define DSIM_HFP_MODE_SHIFT(x) ((x) << 22)
-#define DSIM_HSE_MODE_SHIFT(x) ((x) << 23)
-#define DSIM_AUTO_MODE_SHIFT(x) ((x) << 24)
-#define DSIM_EOT_DISABLE(x) ((x) << 28)
-#define DSIM_AUTO_FLUSH(x) ((x) << 29)
-
-#define DSIM_NUM_OF_DATA_LANE(x) ((x) << DSIM_NUM_OF_DATALANE_SHIFT)
-
-/* EXYNOS_DSIM_ESCMODE */
-#define DSIM_TX_LPDT_LP (1 << 6)
-#define DSIM_CMD_LPDT_LP (1 << 7)
-#define DSIM_FORCE_STOP_STATE_SHIFT(x) ((x) << 20)
-#define DSIM_STOP_STATE_CNT_SHIFT(x) ((x) << 21)
-
-/* EXYNOS_DSIM_MDRESOL */
-#define DSIM_MAIN_STAND_BY (1 << 31)
-#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16)
-#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0)
-
-/* EXYNOS_DSIM_MVPORCH */
-#define DSIM_CMD_ALLOW_SHIFT(x) ((x) << 28)
-#define DSIM_STABLE_VFP_SHIFT(x) ((x) << 16)
-#define DSIM_MAIN_VBP_SHIFT(x) ((x) << 0)
-#define DSIM_CMD_ALLOW_MASK (0xf << 28)
-#define DSIM_STABLE_VFP_MASK (0x7ff << 16)
-#define DSIM_MAIN_VBP_MASK (0x7ff << 0)
-
-/* EXYNOS_DSIM_MHPORCH */
-#define DSIM_MAIN_HFP_SHIFT(x) ((x) << 16)
-#define DSIM_MAIN_HBP_SHIFT(x) ((x) << 0)
-#define DSIM_MAIN_HFP_MASK ((0xffff) << 16)
-#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
-
-/* EXYNOS_DSIM_MSYNC */
-#define DSIM_MAIN_VSA_SHIFT(x) ((x) << 22)
-#define DSIM_MAIN_HSA_SHIFT(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
-#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
-
-/* EXYNOS_DSIM_SDRESOL */
-#define DSIM_SUB_STANDY_SHIFT(x) ((x) << 31)
-#define DSIM_SUB_VRESOL_SHIFT(x) ((x) << 16)
-#define DSIM_SUB_HRESOL_SHIFT(x) ((x) << 0)
-#define DSIM_SUB_STANDY_MASK ((0x1) << 31)
-#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16)
-#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0)
-
-/* EXYNOS_DSIM_INTSRC */
-#define INTSRC_PLL_STABLE (1 << 31)
-#define INTSRC_SW_RST_RELEASE (1 << 30)
-#define INTSRC_SFR_FIFO_EMPTY (1 << 29)
-#define INTSRC_FRAME_DONE (1 << 24)
-#define INTSRC_RX_DATA_DONE (1 << 18)
-
-/* EXYNOS_DSIM_INTMSK */
-#define INTMSK_FIFO_EMPTY (1 << 29)
-#define INTMSK_BTA (1 << 25)
-#define INTMSK_FRAME_DONE (1 << 24)
-#define INTMSK_RX_TIMEOUT (1 << 21)
-#define INTMSK_BTA_TIMEOUT (1 << 20)
-#define INTMSK_RX_DONE (1 << 18)
-#define INTMSK_RX_TE (1 << 17)
-#define INTMSK_RX_ACK (1 << 16)
-#define INTMSK_RX_ECC_ERR (1 << 15)
-#define INTMSK_RX_CRC_ERR (1 << 14)
-
-/* EXYNOS_DSIM_FIFOCTRL */
-#define SFR_HEADER_EMPTY (1 << 22)
-
-/* EXYNOS_DSIM_PHYACCHR */
-#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5)
-
-/* EXYNOS_DSIM_PLLCTRL */
-#define DSIM_PLL_EN_SHIFT(x) ((x) << 23)
-#define DSIM_FREQ_BAND_SHIFT(x) ((x) << 24)
-
-#endif /* _EXYNOS_MIPI_DSI_REGS_H */
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
deleted file mode 100644
index de2f3e793786..000000000000
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ /dev/null
@@ -1,887 +0,0 @@
-/* linux/drivers/video/exynos/s6e8ax0.c
- *
- * MIPI-DSI based s6e8ax0 AMOLED lcd 4.65 inch panel driver.
- *
- * Inki Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/ctype.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/lcd.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/mipi_display.h>
-#include <video/exynos_mipi_dsim.h>
-
-#define LDI_MTP_LENGTH 24
-#define DSIM_PM_STABLE_TIME 10
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 24
-#define GAMMA_TABLE_COUNT 26
-
-#define POWER_IS_ON(pwr) ((pwr) == FB_BLANK_UNBLANK)
-#define POWER_IS_OFF(pwr) ((pwr) == FB_BLANK_POWERDOWN)
-#define POWER_IS_NRM(pwr) ((pwr) == FB_BLANK_NORMAL)
-
-#define lcd_to_master(a) (a->dsim_dev->master)
-#define lcd_to_master_ops(a) ((lcd_to_master(a))->master_ops)
-
-enum {
- DSIM_NONE_STATE = 0,
- DSIM_RESUME_COMPLETE = 1,
- DSIM_FRAME_DONE = 2,
-};
-
-struct s6e8ax0 {
- struct device *dev;
- unsigned int power;
- unsigned int id;
- unsigned int gamma;
- unsigned int acl_enable;
- unsigned int cur_acl;
-
- struct lcd_device *ld;
- struct backlight_device *bd;
-
- struct mipi_dsim_lcd_device *dsim_dev;
- struct lcd_platform_data *ddi_pd;
- struct mutex lock;
- bool enabled;
-};
-
-
-static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd3", },
- { .supply = "vci", },
-};
-
-static void s6e8ax0_regulator_enable(struct s6e8ax0 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
-
- pd = lcd->ddi_pd;
- mutex_lock(&lcd->lock);
- if (!lcd->enabled) {
- ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = true;
- }
- msleep(pd->power_on_delay);
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static void s6e8ax0_regulator_disable(struct s6e8ax0 *lcd)
-{
- int ret = 0;
-
- mutex_lock(&lcd->lock);
- if (lcd->enabled) {
- ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = false;
- }
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static const unsigned char s6e8ax0_22_gamma_30[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xf5, 0x00, 0xff, 0xad, 0xaf,
- 0xbA, 0xc3, 0xd8, 0xc5, 0x9f, 0xc6, 0x9e, 0xc1, 0xdc, 0xc0,
- 0x00, 0x61, 0x00, 0x5a, 0x00, 0x74,
-};
-
-static const unsigned char s6e8ax0_22_gamma_50[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xe8, 0x1f, 0xf7, 0xad, 0xc0,
- 0xb5, 0xc4, 0xdc, 0xc4, 0x9e, 0xc6, 0x9c, 0xbb, 0xd8, 0xbb,
- 0x00, 0x70, 0x00, 0x68, 0x00, 0x86,
-};
-
-static const unsigned char s6e8ax0_22_gamma_60[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xde, 0x1f, 0xef, 0xad, 0xc4,
- 0xb3, 0xc3, 0xdd, 0xc4, 0x9e, 0xc6, 0x9c, 0xbc, 0xd6, 0xba,
- 0x00, 0x75, 0x00, 0x6e, 0x00, 0x8d,
-};
-
-static const unsigned char s6e8ax0_22_gamma_70[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xd8, 0x1f, 0xe7, 0xaf, 0xc8,
- 0xb4, 0xc4, 0xdd, 0xc3, 0x9d, 0xc6, 0x9c, 0xbb, 0xd6, 0xb9,
- 0x00, 0x7a, 0x00, 0x72, 0x00, 0x93,
-};
-
-static const unsigned char s6e8ax0_22_gamma_80[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xc9, 0x1f, 0xde, 0xae, 0xc9,
- 0xb1, 0xc3, 0xdd, 0xc2, 0x9d, 0xc5, 0x9b, 0xbc, 0xd6, 0xbb,
- 0x00, 0x7f, 0x00, 0x77, 0x00, 0x99,
-};
-
-static const unsigned char s6e8ax0_22_gamma_90[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xc7, 0x1f, 0xd9, 0xb0, 0xcc,
- 0xb2, 0xc3, 0xdc, 0xc1, 0x9c, 0xc6, 0x9c, 0xbc, 0xd4, 0xb9,
- 0x00, 0x83, 0x00, 0x7b, 0x00, 0x9e,
-};
-
-static const unsigned char s6e8ax0_22_gamma_100[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xbd, 0x80, 0xcd, 0xba, 0xce,
- 0xb3, 0xc4, 0xde, 0xc3, 0x9c, 0xc4, 0x9, 0xb8, 0xd3, 0xb6,
- 0x00, 0x88, 0x00, 0x80, 0x00, 0xa5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_120[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb9, 0x95, 0xc8, 0xb1, 0xcf,
- 0xb2, 0xc6, 0xdf, 0xc5, 0x9b, 0xc3, 0x99, 0xb6, 0xd2, 0xb6,
- 0x00, 0x8f, 0x00, 0x86, 0x00, 0xac,
-};
-
-static const unsigned char s6e8ax0_22_gamma_130[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc7, 0xb1, 0xd0,
- 0xb2, 0xc4, 0xdd, 0xc3, 0x9a, 0xc3, 0x98, 0xb6, 0xd0, 0xb4,
- 0x00, 0x92, 0x00, 0x8a, 0x00, 0xb1,
-};
-
-static const unsigned char s6e8ax0_22_gamma_140[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb7, 0xa0, 0xc5, 0xb2, 0xd0,
- 0xb3, 0xc3, 0xde, 0xc3, 0x9b, 0xc2, 0x98, 0xb6, 0xd0, 0xb4,
- 0x00, 0x95, 0x00, 0x8d, 0x00, 0xb5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_150[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xa0, 0xc2, 0xb2, 0xd0,
- 0xb2, 0xc1, 0xdd, 0xc2, 0x9b, 0xc2, 0x98, 0xb4, 0xcf, 0xb1,
- 0x00, 0x99, 0x00, 0x90, 0x00, 0xba,
-};
-
-static const unsigned char s6e8ax0_22_gamma_160[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xa5, 0xbf, 0xb0, 0xd0,
- 0xb1, 0xc3, 0xde, 0xc2, 0x99, 0xc1, 0x97, 0xb4, 0xce, 0xb1,
- 0x00, 0x9c, 0x00, 0x93, 0x00, 0xbe,
-};
-
-static const unsigned char s6e8ax0_22_gamma_170[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb5, 0xbf, 0xb1, 0xd1,
- 0xb1, 0xc3, 0xde, 0xc3, 0x99, 0xc0, 0x96, 0xb4, 0xce, 0xb1,
- 0x00, 0x9f, 0x00, 0x96, 0x00, 0xc2,
-};
-
-static const unsigned char s6e8ax0_22_gamma_180[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb7, 0xbe, 0xb3, 0xd2,
- 0xb3, 0xc3, 0xde, 0xc2, 0x97, 0xbf, 0x95, 0xb4, 0xcd, 0xb1,
- 0x00, 0xa2, 0x00, 0x99, 0x00, 0xc5,
-};
-
-static const unsigned char s6e8ax0_22_gamma_190[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbe, 0xb2, 0xd2,
- 0xb2, 0xc3, 0xdd, 0xc3, 0x98, 0xbf, 0x95, 0xb2, 0xcc, 0xaf,
- 0x00, 0xa5, 0x00, 0x9c, 0x00, 0xc9,
-};
-
-static const unsigned char s6e8ax0_22_gamma_200[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xaf, 0xb9, 0xbc, 0xb2, 0xd2,
- 0xb1, 0xc4, 0xdd, 0xc3, 0x97, 0xbe, 0x95, 0xb1, 0xcb, 0xae,
- 0x00, 0xa8, 0x00, 0x9f, 0x00, 0xcd,
-};
-
-static const unsigned char s6e8ax0_22_gamma_210[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc1, 0xbd, 0xb1, 0xd1,
- 0xb1, 0xc2, 0xde, 0xc2, 0x97, 0xbe, 0x94, 0xB0, 0xc9, 0xad,
- 0x00, 0xae, 0x00, 0xa4, 0x00, 0xd4,
-};
-
-static const unsigned char s6e8ax0_22_gamma_220[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc7, 0xbd, 0xb1, 0xd1,
- 0xb1, 0xc2, 0xdd, 0xc2, 0x97, 0xbd, 0x94, 0xb0, 0xc9, 0xad,
- 0x00, 0xad, 0x00, 0xa2, 0x00, 0xd3,
-};
-
-static const unsigned char s6e8ax0_22_gamma_230[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xc3, 0xbd, 0xb2, 0xd1,
- 0xb1, 0xc3, 0xdd, 0xc1, 0x96, 0xbd, 0x94, 0xb0, 0xc9, 0xad,
- 0x00, 0xb0, 0x00, 0xa7, 0x00, 0xd7,
-};
-
-static const unsigned char s6e8ax0_22_gamma_240[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb1, 0xcb, 0xbd, 0xb1, 0xd2,
- 0xb1, 0xc3, 0xdD, 0xc2, 0x95, 0xbd, 0x93, 0xaf, 0xc8, 0xab,
- 0x00, 0xb3, 0x00, 0xa9, 0x00, 0xdb,
-};
-
-static const unsigned char s6e8ax0_22_gamma_250[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xcc, 0xbe, 0xb0, 0xd2,
- 0xb0, 0xc3, 0xdD, 0xc2, 0x94, 0xbc, 0x92, 0xae, 0xc8, 0xab,
- 0x00, 0xb6, 0x00, 0xab, 0x00, 0xde,
-};
-
-static const unsigned char s6e8ax0_22_gamma_260[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb3, 0xd0, 0xbe, 0xaf, 0xd1,
- 0xaf, 0xc2, 0xdd, 0xc1, 0x96, 0xbc, 0x93, 0xaf, 0xc8, 0xac,
- 0x00, 0xb7, 0x00, 0xad, 0x00, 0xe0,
-};
-
-static const unsigned char s6e8ax0_22_gamma_270[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xcF, 0xbd, 0xb0, 0xd2,
- 0xaf, 0xc2, 0xdc, 0xc1, 0x95, 0xbd, 0x93, 0xae, 0xc6, 0xaa,
- 0x00, 0xba, 0x00, 0xb0, 0x00, 0xe4,
-};
-
-static const unsigned char s6e8ax0_22_gamma_280[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb2, 0xd0, 0xbd, 0xaf, 0xd0,
- 0xad, 0xc4, 0xdd, 0xc3, 0x95, 0xbd, 0x93, 0xac, 0xc5, 0xa9,
- 0x00, 0xbd, 0x00, 0xb2, 0x00, 0xe7,
-};
-
-static const unsigned char s6e8ax0_22_gamma_300[] = {
- 0xfa, 0x01, 0x60, 0x10, 0x60, 0xb5, 0xd3, 0xbd, 0xb1, 0xd2,
- 0xb0, 0xc0, 0xdc, 0xc0, 0x94, 0xba, 0x91, 0xac, 0xc5, 0xa9,
- 0x00, 0xc2, 0x00, 0xb7, 0x00, 0xed,
-};
-
-static const unsigned char *s6e8ax0_22_gamma_table[] = {
- s6e8ax0_22_gamma_30,
- s6e8ax0_22_gamma_50,
- s6e8ax0_22_gamma_60,
- s6e8ax0_22_gamma_70,
- s6e8ax0_22_gamma_80,
- s6e8ax0_22_gamma_90,
- s6e8ax0_22_gamma_100,
- s6e8ax0_22_gamma_120,
- s6e8ax0_22_gamma_130,
- s6e8ax0_22_gamma_140,
- s6e8ax0_22_gamma_150,
- s6e8ax0_22_gamma_160,
- s6e8ax0_22_gamma_170,
- s6e8ax0_22_gamma_180,
- s6e8ax0_22_gamma_190,
- s6e8ax0_22_gamma_200,
- s6e8ax0_22_gamma_210,
- s6e8ax0_22_gamma_220,
- s6e8ax0_22_gamma_230,
- s6e8ax0_22_gamma_240,
- s6e8ax0_22_gamma_250,
- s6e8ax0_22_gamma_260,
- s6e8ax0_22_gamma_270,
- s6e8ax0_22_gamma_280,
- s6e8ax0_22_gamma_300,
-};
-
-static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
- static const unsigned char data_to_send[] = {
- 0xf8, 0x3d, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
- 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
- 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
- 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
- };
- static const unsigned char data_to_send_panel_reverse[] = {
- 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
- 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
- 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
- 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
- };
-
- if (lcd->dsim_dev->panel_reverse)
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send_panel_reverse,
- ARRAY_SIZE(data_to_send_panel_reverse));
- else
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xf2, 0x80, 0x03, 0x0d
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-/* Gamma 2.2 Setting (200cd, 7500K, 10MPCD) */
-static void s6e8ax0_gamma_cond(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- unsigned int gamma = lcd->bd->props.brightness;
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- s6e8ax0_22_gamma_table[gamma],
- GAMMA_TABLE_COUNT);
-}
-
-static void s6e8ax0_gamma_update(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xf7, 0x03
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE_PARAM, data_to_send,
- ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond1(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xd1, 0xfe, 0x80, 0x00, 0x01, 0x0b, 0x00, 0x00, 0x40,
- 0x0d, 0x00, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond2(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0,
- 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond3(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xe1, 0x10, 0x1c, 0x17, 0x08, 0x1d
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond4(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xe2, 0xed, 0x07, 0xc3, 0x13, 0x0d, 0x03
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond5(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x19, 0x33, 0x02
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-static void s6e8ax0_etc_cond6(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xe3, 0x40
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE_PARAM,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_etc_cond7(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xe4, 0x00, 0x00, 0x14, 0x80, 0x00, 0x00, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_elvss_set(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xb1, 0x04, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_elvss_nvm_set(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xd9, 0x5c, 0x20, 0x0c, 0x0f, 0x41, 0x00, 0x10, 0x11,
- 0x12, 0xd1, 0x00, 0x00, 0x00, 0x00, 0x80, 0xcb, 0xed,
- 0x64, 0xaf
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_sleep_in(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0x10, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_sleep_out(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0x11, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_on(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0x29, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_display_off(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0x28, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_apply_level2_key(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xf0, 0x5a, 0x5a
- };
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_acl_on(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xc0, 0x01
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-static void s6e8ax0_acl_off(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- static const unsigned char data_to_send[] = {
- 0xc0, 0x00
- };
-
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_SHORT_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
-}
-
-/* Full white 50% reducing setting */
-static void s6e8ax0_acl_ctrl_set(struct s6e8ax0 *lcd)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- /* Full white 50% reducing setting */
- static const unsigned char cutoff_50[] = {
- 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
- 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x08, 0x0f, 0x16, 0x1d, 0x24, 0x2a, 0x31, 0x38,
- 0x3f, 0x46
- };
- /* Full white 45% reducing setting */
- static const unsigned char cutoff_45[] = {
- 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
- 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x07, 0x0d, 0x13, 0x19, 0x1f, 0x25, 0x2b, 0x31,
- 0x37, 0x3d
- };
- /* Full white 40% reducing setting */
- static const unsigned char cutoff_40[] = {
- 0xc1, 0x47, 0x53, 0x13, 0x53, 0x00, 0x00, 0x02, 0xcf,
- 0x00, 0x00, 0x04, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x06, 0x0c, 0x11, 0x16, 0x1c, 0x21, 0x26, 0x2b,
- 0x31, 0x36
- };
-
- if (lcd->acl_enable) {
- if (lcd->cur_acl == 0) {
- if (lcd->gamma == 0 || lcd->gamma == 1) {
- s6e8ax0_acl_off(lcd);
- dev_dbg(&lcd->ld->dev,
- "cur_acl=%d\n", lcd->cur_acl);
- } else
- s6e8ax0_acl_on(lcd);
- }
- switch (lcd->gamma) {
- case 0: /* 30cd */
- s6e8ax0_acl_off(lcd);
- lcd->cur_acl = 0;
- break;
- case 1 ... 3: /* 50cd ~ 90cd */
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_LONG_WRITE,
- cutoff_40,
- ARRAY_SIZE(cutoff_40));
- lcd->cur_acl = 40;
- break;
- case 4 ... 7: /* 120cd ~ 210cd */
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_LONG_WRITE,
- cutoff_45,
- ARRAY_SIZE(cutoff_45));
- lcd->cur_acl = 45;
- break;
- case 8 ... 10: /* 220cd ~ 300cd */
- ops->cmd_write(lcd_to_master(lcd),
- MIPI_DSI_DCS_LONG_WRITE,
- cutoff_50,
- ARRAY_SIZE(cutoff_50));
- lcd->cur_acl = 50;
- break;
- default:
- break;
- }
- } else {
- s6e8ax0_acl_off(lcd);
- lcd->cur_acl = 0;
- dev_dbg(&lcd->ld->dev, "cur_acl = %d\n", lcd->cur_acl);
- }
-}
-
-static void s6e8ax0_read_id(struct s6e8ax0 *lcd, u8 *mtp_id)
-{
- unsigned int ret;
- unsigned int addr = 0xd1; /* MTP ID */
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
- ret = ops->cmd_read(lcd_to_master(lcd),
- MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM,
- addr, 3, mtp_id);
-}
-
-static int s6e8ax0_panel_init(struct s6e8ax0 *lcd)
-{
- s6e8ax0_apply_level2_key(lcd);
- s6e8ax0_sleep_out(lcd);
- msleep(1);
- s6e8ax0_panel_cond(lcd);
- s6e8ax0_display_cond(lcd);
- s6e8ax0_gamma_cond(lcd);
- s6e8ax0_gamma_update(lcd);
-
- s6e8ax0_etc_cond1(lcd);
- s6e8ax0_etc_cond2(lcd);
- s6e8ax0_etc_cond3(lcd);
- s6e8ax0_etc_cond4(lcd);
- s6e8ax0_etc_cond5(lcd);
- s6e8ax0_etc_cond6(lcd);
- s6e8ax0_etc_cond7(lcd);
-
- s6e8ax0_elvss_nvm_set(lcd);
- s6e8ax0_elvss_set(lcd);
-
- s6e8ax0_acl_ctrl_set(lcd);
- s6e8ax0_acl_on(lcd);
-
- /* if ID3 value is not 33h, branch private elvss mode */
- msleep(lcd->ddi_pd->power_on_delay);
-
- return 0;
-}
-
-static int s6e8ax0_update_gamma_ctrl(struct s6e8ax0 *lcd, int brightness)
-{
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
-
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- s6e8ax0_22_gamma_table[brightness],
- ARRAY_SIZE(s6e8ax0_22_gamma_table));
-
- /* update gamma table. */
- s6e8ax0_gamma_update(lcd);
- lcd->gamma = brightness;
-
- return 0;
-}
-
-static int s6e8ax0_gamma_ctrl(struct s6e8ax0 *lcd, int gamma)
-{
- s6e8ax0_update_gamma_ctrl(lcd, gamma);
-
- return 0;
-}
-
-static int s6e8ax0_set_power(struct lcd_device *ld, int power)
-{
- struct s6e8ax0 *lcd = lcd_get_data(ld);
- struct mipi_dsim_master_ops *ops = lcd_to_master_ops(lcd);
- int ret = 0;
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- if ((power == FB_BLANK_UNBLANK) && ops->set_blank_mode) {
- /* LCD power on */
- if ((POWER_IS_ON(power) && POWER_IS_OFF(lcd->power))
- || (POWER_IS_ON(power) && POWER_IS_NRM(lcd->power))) {
- ret = ops->set_blank_mode(lcd_to_master(lcd), power);
- if (!ret && lcd->power != power)
- lcd->power = power;
- }
- } else if ((power == FB_BLANK_POWERDOWN) && ops->set_early_blank_mode) {
- /* LCD power off */
- if ((POWER_IS_OFF(power) && POWER_IS_ON(lcd->power)) ||
- (POWER_IS_ON(lcd->power) && POWER_IS_NRM(power))) {
- ret = ops->set_early_blank_mode(lcd_to_master(lcd),
- power);
- if (!ret && lcd->power != power)
- lcd->power = power;
- }
- }
-
- return ret;
-}
-
-static int s6e8ax0_get_power(struct lcd_device *ld)
-{
- struct s6e8ax0 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int s6e8ax0_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct s6e8ax0 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(lcd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = s6e8ax0_gamma_ctrl(lcd, brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops s6e8ax0_lcd_ops = {
- .set_power = s6e8ax0_set_power,
- .get_power = s6e8ax0_get_power,
-};
-
-static const struct backlight_ops s6e8ax0_backlight_ops = {
- .update_status = s6e8ax0_set_brightness,
-};
-
-static void s6e8ax0_power_on(struct mipi_dsim_lcd_device *dsim_dev, int power)
-{
- struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
- msleep(lcd->ddi_pd->power_on_delay);
-
- /* lcd power on */
- if (power)
- s6e8ax0_regulator_enable(lcd);
- else
- s6e8ax0_regulator_disable(lcd);
-
- msleep(lcd->ddi_pd->reset_delay);
-
- /* lcd reset */
- if (lcd->ddi_pd->reset)
- lcd->ddi_pd->reset(lcd->ld);
- msleep(5);
-}
-
-static void s6e8ax0_set_sequence(struct mipi_dsim_lcd_device *dsim_dev)
-{
- struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
- s6e8ax0_panel_init(lcd);
- s6e8ax0_display_on(lcd);
-
- lcd->power = FB_BLANK_UNBLANK;
-}
-
-static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
-{
- struct s6e8ax0 *lcd;
- int ret;
- u8 mtp_id[3] = {0, };
-
- lcd = devm_kzalloc(&dsim_dev->dev, sizeof(struct s6e8ax0), GFP_KERNEL);
- if (!lcd) {
- dev_err(&dsim_dev->dev, "failed to allocate s6e8ax0 structure.\n");
- return -ENOMEM;
- }
-
- lcd->dsim_dev = dsim_dev;
- lcd->ddi_pd = (struct lcd_platform_data *)dsim_dev->platform_data;
- lcd->dev = &dsim_dev->dev;
-
- mutex_init(&lcd->lock);
-
- ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
- if (ret) {
- dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- return ret;
- }
-
- lcd->ld = devm_lcd_device_register(lcd->dev, "s6e8ax0", lcd->dev, lcd,
- &s6e8ax0_lcd_ops);
- if (IS_ERR(lcd->ld)) {
- dev_err(lcd->dev, "failed to register lcd ops.\n");
- return PTR_ERR(lcd->ld);
- }
-
- lcd->bd = devm_backlight_device_register(lcd->dev, "s6e8ax0-bl",
- lcd->dev, lcd, &s6e8ax0_backlight_ops, NULL);
- if (IS_ERR(lcd->bd)) {
- dev_err(lcd->dev, "failed to register backlight ops.\n");
- return PTR_ERR(lcd->bd);
- }
-
- lcd->bd->props.max_brightness = MAX_BRIGHTNESS;
- lcd->bd->props.brightness = MAX_BRIGHTNESS;
-
- s6e8ax0_read_id(lcd, mtp_id);
- if (mtp_id[0] == 0x00)
- dev_err(lcd->dev, "read id failed\n");
-
- dev_info(lcd->dev, "Read ID : %x, %x, %x\n",
- mtp_id[0], mtp_id[1], mtp_id[2]);
-
- if (mtp_id[2] == 0x33)
- dev_info(lcd->dev,
- "ID-3 is 0xff does not support dynamic elvss\n");
- else
- dev_info(lcd->dev,
- "ID-3 is 0x%x support dynamic elvss\n", mtp_id[2]);
-
- lcd->acl_enable = 1;
- lcd->cur_acl = 0;
-
- dev_set_drvdata(&dsim_dev->dev, lcd);
-
- dev_dbg(lcd->dev, "probed s6e8ax0 panel driver.\n");
-
- return 0;
-}
-
-static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
-{
- struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
- s6e8ax0_sleep_in(lcd);
- msleep(lcd->ddi_pd->power_off_delay);
- s6e8ax0_display_off(lcd);
-
- s6e8ax0_regulator_disable(lcd);
-
- return 0;
-}
-
-static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
-{
- struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
-
- s6e8ax0_sleep_out(lcd);
- msleep(lcd->ddi_pd->power_on_delay);
-
- s6e8ax0_regulator_enable(lcd);
- s6e8ax0_set_sequence(dsim_dev);
-
- return 0;
-}
-
-static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
- .name = "s6e8ax0",
- .id = -1,
-
- .power_on = s6e8ax0_power_on,
- .set_sequence = s6e8ax0_set_sequence,
- .probe = s6e8ax0_probe,
- .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
- .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
-};
-
-static int s6e8ax0_init(void)
-{
- exynos_mipi_dsi_register_lcd_driver(&s6e8ax0_dsim_ddi_driver);
-
- return 0;
-}
-
-static void s6e8ax0_exit(void)
-{
- return;
-}
-
-module_init(s6e8ax0_init);
-module_exit(s6e8ax0_exit);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("MIPI-DSI based s6e8ax0 AMOLED LCD Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index e4031ef39491..8577195cb533 100644
--- a/drivers/video/fbdev/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
@@ -47,7 +47,7 @@
#define DPY_W 600
#define DPY_H 800
-static struct fb_fix_screeninfo hecubafb_fix = {
+static const struct fb_fix_screeninfo hecubafb_fix = {
.id = "hecubafb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO01,
@@ -58,7 +58,7 @@ static struct fb_fix_screeninfo hecubafb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo hecubafb_var = {
+static const struct fb_var_screeninfo hecubafb_var = {
.xres = DPY_W,
.yres = DPY_H,
.xres_virtual = DPY_W,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 15d3ccff2965..463028543173 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
/* Framebuffer driver structures */
-static struct fb_var_screeninfo hga_default_var = {
+static const struct fb_var_screeninfo hga_default_var = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index cf5ccd0f2252..7bc5f6056c77 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -82,7 +82,7 @@ struct i740fb_par {
#define DACSPEED24_SD 128
#define DACSPEED32 86
-static struct fb_fix_screeninfo i740fb_fix = {
+static const struct fb_fix_screeninfo i740fb_fix = {
.id = "i740fb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index 025b882a4826..483ab2592d0c 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -1691,7 +1691,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
- printk("i810fb_alloc_cursormem: can't allocate"
+ printk("i810fb_alloc_cursormem: can't allocate "
"cursor memory\n");
agp_backend_release(bridge);
return -ENOMEM;
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index bf207444ba0c..ff2a5d2023e1 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -1301,11 +1301,6 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
break;
}
- if (v.xoffset < 0)
- v.xoffset = 0;
- if (v.yoffset < 0)
- v.yoffset = 0;
-
if (v.xoffset > v.xres_virtual - v.xres)
v.xoffset = v.xres_virtual - v.xres;
if (v.yoffset > v.yres_virtual - v.yres)
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 5bb01533271e..f77478fb3d14 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -44,7 +44,7 @@ static struct fb_fix_screeninfo kyro_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo kyro_var = {
+static const struct fb_var_screeninfo kyro_var = {
/* 640x480, 16bpp @ 60 Hz */
.xres = 640,
.yres = 480,
diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
index 195ad7cac1ba..68fa037d8cbc 100644
--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
@@ -372,7 +372,7 @@ static int Ti3026_init(struct matrox_fb_info *minfo, struct my_timming *m)
DBG(__func__)
- memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
+ memcpy(hw->DACreg, MGADACbpp32, sizeof(MGADACbpp32));
switch (minfo->fbcon.var.bits_per_pixel) {
case 4: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1; /* or _8_1, they are same */
hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/matrox/matroxfb_g450.c b/drivers/video/fbdev/matrox/matroxfb_g450.c
index cff0546ea6fd..f108ae66fc83 100644
--- a/drivers/video/fbdev/matrox/matroxfb_g450.c
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.c
@@ -433,7 +433,7 @@ static void cve2_init_TVdata(int norm, struct mavenregs* data, const struct outp
0x00, /* 3E written multiple times */
0x00, /* 3F not written */
} };
- static struct mavenregs ntscregs = { {
+ static const struct mavenregs ntscregs = { {
0x21, 0xF0, 0x7C, 0x1F, /* 00: chroma subcarrier */
0x00,
0x00, /* test */
diff --git a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
index c87e17afb3e2..ba96c44f2761 100644
--- a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
@@ -157,17 +157,10 @@ static struct i2c_adapter mb862xx_i2c_adapter = {
int mb862xx_i2c_init(struct mb862xxfb_par *par)
{
- int ret;
-
mb862xx_i2c_adapter.algo_data = par;
par->adap = &mb862xx_i2c_adapter;
- ret = i2c_add_adapter(par->adap);
- if (ret < 0) {
- dev_err(par->dev, "failed to add %s\n",
- mb862xx_i2c_adapter.name);
- }
- return ret;
+ return i2c_add_adapter(par->adap);
}
void mb862xx_i2c_exit(struct mb862xxfb_par *par)
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index f91b1db262b0..8778e01cebac 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -845,7 +845,7 @@ static int __set_par(struct fb_info *fbi, bool lock)
if (fbi->var.sync & FB_SYNC_SHARP_MODE)
mode = IPU_PANEL_SHARP_TFT;
- dev_dbg(fbi->device, "pixclock = %ul Hz\n",
+ dev_dbg(fbi->device, "pixclock = %u Hz\n",
(u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
if (sdc_init_panel(mx3fb, mode,
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 4e6608ceac09..7846f0e8bbbb 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -800,6 +800,7 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host,
struct fb_videomode *vmode)
{
int ret;
+ struct device *dev = &host->pdev->dev;
struct fb_info *fb_info = &host->fb_info;
struct fb_var_screeninfo *var = &fb_info->var;
dma_addr_t fb_phys;
@@ -825,12 +826,10 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host,
/* Memory allocation for framebuffer */
fb_size = SZ_2M;
- fb_virt = alloc_pages_exact(fb_size, GFP_DMA);
+ fb_virt = dma_alloc_wc(dev, PAGE_ALIGN(fb_size), &fb_phys, GFP_KERNEL);
if (!fb_virt)
return -ENOMEM;
- fb_phys = virt_to_phys(fb_virt);
-
fb_info->fix.smem_start = fb_phys;
fb_info->screen_base = fb_virt;
fb_info->screen_size = fb_info->fix.smem_len = fb_size;
@@ -843,9 +842,11 @@ static int mxsfb_init_fbinfo(struct mxsfb_info *host,
static void mxsfb_free_videomem(struct mxsfb_info *host)
{
+ struct device *dev = &host->pdev->dev;
struct fb_info *fb_info = &host->fb_info;
- free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len);
+ dma_free_wc(dev, fb_info->screen_size, fb_info->screen_base,
+ fb_info->fix.smem_start);
}
static const struct platform_device_id mxsfb_devtype[] = {
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index fb60a8f0cc94..906c6e75c260 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -625,6 +625,21 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
if (address == OF_BAD_ADDR && addr_prop)
address = (u64)addr_prop;
if (address != OF_BAD_ADDR) {
+#ifdef CONFIG_PCI
+ const __be32 *vidp, *didp;
+ u32 vid, did;
+ struct pci_dev *pdev;
+
+ vidp = of_get_property(dp, "vendor-id", NULL);
+ didp = of_get_property(dp, "device-id", NULL);
+ if (vidp && didp) {
+ vid = be32_to_cpup(vidp);
+ did = be32_to_cpup(didp);
+ pdev = pci_get_device(vid, did, NULL);
+ if (!pdev || pci_enable_device(pdev))
+ return;
+ }
+#endif
/* kludge for valkyrie */
if (strcmp(dp->name, "valkyrie") == 0)
address += 0x1000;
diff --git a/drivers/video/fbdev/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index 0e4cee9a8d79..c81f150589e1 100644
--- a/drivers/video/fbdev/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
@@ -60,7 +60,6 @@ struct mipid_device {
struct mutex mutex;
struct lcd_panel panel;
- struct workqueue_struct *esd_wq;
struct delayed_work esd_work;
void (*esd_check)(struct mipid_device *m);
};
@@ -390,7 +389,7 @@ static void ls041y3_esd_check(struct mipid_device *md)
static void mipid_esd_start_check(struct mipid_device *md)
{
if (md->esd_check != NULL)
- queue_delayed_work(md->esd_wq, &md->esd_work,
+ schedule_delayed_work(&md->esd_work,
MIPID_ESD_CHECK_PERIOD);
}
@@ -476,11 +475,6 @@ static int mipid_init(struct lcd_panel *panel,
struct mipid_device *md = to_mipid_device(panel);
md->fbdev = fbdev;
- md->esd_wq = create_singlethread_workqueue("mipid_esd");
- if (md->esd_wq == NULL) {
- dev_err(&md->spi->dev, "can't create ESD workqueue\n");
- return -ENOMEM;
- }
INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work);
mutex_init(&md->mutex);
@@ -500,7 +494,6 @@ static void mipid_cleanup(struct lcd_panel *panel)
if (md->enabled)
mipid_esd_stop_check(md);
- destroy_workqueue(md->esd_wq);
}
static struct lcd_panel mipid_panel = {
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index b58012b82b6f..8b810696a42b 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -75,8 +75,6 @@ struct panel_drv_data {
bool intro_printed;
- struct workqueue_struct *workqueue;
-
bool ulps_enabled;
unsigned ulps_timeout;
struct delayed_work ulps_work;
@@ -232,7 +230,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata,
static void dsicm_queue_ulps_work(struct panel_drv_data *ddata)
{
if (ddata->ulps_timeout > 0)
- queue_delayed_work(ddata->workqueue, &ddata->ulps_work,
+ schedule_delayed_work(&ddata->ulps_work,
msecs_to_jiffies(ddata->ulps_timeout));
}
@@ -1244,11 +1242,6 @@ static int dsicm_probe(struct platform_device *pdev)
dev_dbg(dev, "Using GPIO TE\n");
}
- ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (ddata->workqueue == NULL) {
- dev_err(dev, "can't create workqueue\n");
- return -ENOMEM;
- }
INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
dsicm_hw_reset(ddata);
@@ -1262,7 +1255,7 @@ static int dsicm_probe(struct platform_device *pdev)
dev, ddata, &dsicm_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
- goto err_bl;
+ goto err_reg;
}
ddata->bldev = bldev;
@@ -1285,8 +1278,6 @@ static int dsicm_probe(struct platform_device *pdev)
err_sysfs_create:
if (bldev != NULL)
backlight_device_unregister(bldev);
-err_bl:
- destroy_workqueue(ddata->workqueue);
err_reg:
return r;
}
@@ -1316,7 +1307,6 @@ static int __exit dsicm_remove(struct platform_device *pdev)
omap_dss_put_device(ddata->in);
dsicm_cancel_ulps_work(ddata);
- destroy_workqueue(ddata->workqueue);
/* reset, to be sure that the panel is in a valid state */
dsicm_hw_reset(ddata);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 3691bde4ce0a..a864608c5df1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -644,6 +644,7 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
{
int r;
+ long time_left;
DECLARE_COMPLETION_ONSTACK(completion);
r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
@@ -652,15 +653,15 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
if (r)
return r;
- timeout = wait_for_completion_interruptible_timeout(&completion,
+ time_left = wait_for_completion_interruptible_timeout(&completion,
timeout);
omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
- if (timeout == 0)
+ if (time_left == 0)
return -ETIMEDOUT;
- if (timeout == -ERESTARTSYS)
+ if (time_left == -ERESTARTSYS)
return -ERESTARTSYS;
return 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 9e4800a4e3d1..30d49f3800b3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
@@ -1180,13 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return PTR_ERR(vdds_dsi);
}
- r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
- if (r) {
- devm_regulator_put(vdds_dsi);
- DSSERR("can't set the DSI regulator voltage\n");
- return r;
- }
-
dsi->vdds_dsi_reg = vdds_dsi;
return 0;
@@ -5348,7 +5340,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5368,7 +5360,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 926a6f20dbb2..156a254705ea 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -100,7 +100,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
@@ -114,13 +113,6 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
-
hdmi.vdda_reg = reg;
return 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index 0ee829a165c3..4da36bcab977 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -119,7 +119,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
@@ -131,13 +130,6 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
-
hdmi.vdda_reg = reg;
return 0;
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index aa8d28880912..1a4070f719c2 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -113,7 +113,7 @@ static struct fb_fix_screeninfo pm2fb_fix = {
/*
* Default video mode. In case the modedb doesn't work.
*/
-static struct fb_var_screeninfo pm2fb_var = {
+static const struct fb_var_screeninfo pm2fb_var = {
/* "640x480, 8 bpp @ 60 Hz */
.xres = 640,
.yres = 480,
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
- 0, pages);
+ ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+ FOLL_WRITE);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 2c0487f4f805..ef73f14d7ba0 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2125,7 +2125,7 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
timings = of_get_display_timings(disp);
if (!timings)
- goto out;
+ return -EINVAL;
ret = -ENOMEM;
info->modes = kmalloc_array(timings->num_timings,
@@ -2186,6 +2186,7 @@ static int of_get_pxafb_mode_info(struct device *dev,
ret = of_property_read_u32(np, "bus-width", &bus_width);
if (ret) {
dev_err(dev, "no bus-width specified: %d\n", ret);
+ of_node_put(np);
return ret;
}
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 96aa46dc696c..5d6179ef0298 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -83,7 +83,7 @@ static const char *s1d13xxxfb_prod_names[] = {
/*
* here we define the default struct fb_fix_screeninfo
*/
-static struct fb_fix_screeninfo s1d13xxxfb_fix = {
+static const struct fb_fix_screeninfo s1d13xxxfb_fix = {
.id = S1D_FBID,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -929,7 +929,7 @@ static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state)
s1dfb->disp_save = kmalloc(info->fix.smem_len, GFP_KERNEL);
if (!s1dfb->disp_save) {
- printk(KERN_ERR PFX "no memory to save screen");
+ printk(KERN_ERR PFX "no memory to save screen\n");
return -ENOMEM;
}
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index 0dd86be36afb..a67e4567e656 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -767,7 +767,7 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
diff --git a/drivers/video/fbdev/s3c2410fb.h b/drivers/video/fbdev/s3c2410fb.h
index 47a17bd23011..cdd11e2f8859 100644
--- a/drivers/video/fbdev/s3c2410fb.h
+++ b/drivers/video/fbdev/s3c2410fb.h
@@ -32,7 +32,7 @@ struct s3c2410fb_info {
unsigned long clk_rate;
unsigned int palette_ready;
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
struct notifier_block freq_transition;
#endif
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 6c77ab09b0b2..c30a91c1137c 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1660,7 +1660,7 @@ static struct fb_ops savagefb_ops = {
/* --------------------------------------------------------------------- */
-static struct fb_var_screeninfo savagefb_var800x600x8 = {
+static const struct fb_var_screeninfo savagefb_var800x600x8 = {
.accel_flags = FB_ACCELF_TEXT,
.xres = 800,
.yres = 600,
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index e9cf19977285..61f799a515dc 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -33,14 +33,14 @@
#include <linux/parser.h>
#include <linux/regulator/consumer.h>
-static struct fb_fix_screeninfo simplefb_fix = {
+static const struct fb_fix_screeninfo simplefb_fix = {
.id = "simple",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo simplefb_var = {
+static const struct fb_var_screeninfo simplefb_var = {
.height = -1,
.width = -1,
.activate = FB_ACTIVATE_NOW,
@@ -74,8 +74,14 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
}
+struct simplefb_par;
+static void simplefb_clocks_destroy(struct simplefb_par *par);
+static void simplefb_regulators_destroy(struct simplefb_par *par);
+
static void simplefb_destroy(struct fb_info *info)
{
+ simplefb_regulators_destroy(info->par);
+ simplefb_clocks_destroy(info->par);
if (info->screen_base)
iounmap(info->screen_base);
}
@@ -487,11 +493,8 @@ error_fb_release:
static int simplefb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
- struct simplefb_par *par = info->par;
unregister_framebuffer(info);
- simplefb_regulators_destroy(par);
- simplefb_clocks_destroy(par);
framebuffer_release(info);
return 0;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 86ae1d4556fc..73cb4ffff3c5 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -56,7 +56,7 @@ struct smtcfb_info {
void __iomem *smtc_regbaseaddress; /* Memory Map IO starting address */
-static struct fb_var_screeninfo smtcfb_var = {
+static const struct fb_var_screeninfo smtcfb_var = {
.xres = 1024,
.yres = 600,
.xres_virtual = 1024,
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 9279e5f6696e..ec2e7e353685 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -1761,10 +1761,8 @@ error:
static void ufx_usb_disconnect(struct usb_interface *interface)
{
struct ufx_data *dev;
- struct fb_info *info;
dev = usb_get_intfdata(interface);
- info = dev->info;
pr_debug("USB disconnect starting\n");
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index a9c45c89b15e..2925d5ce8d3e 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -64,7 +64,7 @@ struct ssd1307fb_par {
u32 contrast;
u32 dclk_div;
u32 dclk_frq;
- struct ssd1307fb_deviceinfo *device_info;
+ const struct ssd1307fb_deviceinfo *device_info;
struct i2c_client *client;
u32 height;
struct fb_info *info;
@@ -84,7 +84,7 @@ struct ssd1307fb_array {
u8 data[0];
};
-static struct fb_fix_screeninfo ssd1307fb_fix = {
+static const struct fb_fix_screeninfo ssd1307fb_fix = {
.id = "Solomon SSD1307",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_MONO10,
@@ -94,7 +94,7 @@ static struct fb_fix_screeninfo ssd1307fb_fix = {
.accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo ssd1307fb_var = {
+static const struct fb_var_screeninfo ssd1307fb_var = {
.bits_per_pixel = 1,
};
@@ -559,8 +559,7 @@ static int ssd1307fb_probe(struct i2c_client *client,
par->info = info;
par->client = client;
- par->device_info = (struct ssd1307fb_deviceinfo *)of_match_device(
- ssd1307fb_of_match, &client->dev)->data;
+ par->device_info = of_device_get_match_data(&client->dev);
par->reset = of_get_named_gpio(client->dev.of_node,
"reset-gpios", 0);
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 621fa441a6db..d5fa313806fe 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -82,7 +82,7 @@
#define VOODOO3_MAX_PIXCLOCK 300000
#define VOODOO5_MAX_PIXCLOCK 350000
-static struct fb_fix_screeninfo tdfx_fix = {
+static const struct fb_fix_screeninfo tdfx_fix = {
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.ypanstep = 1,
@@ -90,7 +90,7 @@ static struct fb_fix_screeninfo tdfx_fix = {
.accel = FB_ACCEL_3DFX_BANSHEE
};
-static struct fb_var_screeninfo tdfx_var = {
+static const struct fb_var_screeninfo tdfx_var = {
/* "640x480, 8 bpp @ 60 Hz */
.xres = 640,
.yres = 480,
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 178ae93b7ebd..98af9e02959b 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -33,7 +33,7 @@ static struct cb_id uvesafb_cn_id = {
static char v86d_path[PATH_MAX] = "/sbin/v86d";
static char v86d_started; /* has v86d been started by uvesafb? */
-static struct fb_fix_screeninfo uvesafb_fix = {
+static const struct fb_fix_screeninfo uvesafb_fix = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index b9c2f81fb6b9..da653a080394 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -35,76 +35,23 @@
static void *videomemory;
static u_long videomemorysize = VIDEOMEMSIZE;
module_param(videomemorysize, ulong, 0);
+MODULE_PARM_DESC(videomemorysize, "RAM available to frame buffer (in bytes)");
-/**********************************************************************
- *
- * Memory management
- *
- **********************************************************************/
-static void *rvmalloc(unsigned long size)
-{
- void *mem;
- unsigned long adr;
-
- size = PAGE_ALIGN(size);
- mem = vmalloc_32(size);
- if (!mem)
- return NULL;
-
- /*
- * VFB must clear memory to prevent kernel info
- * leakage into userspace
- * VGA-based drivers MUST NOT clear memory if
- * they want to be able to take over vgacon
- */
-
- memset(mem, 0, size);
- adr = (unsigned long) mem;
- while (size > 0) {
- SetPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- return mem;
-}
-
-static void rvfree(void *mem, unsigned long size)
-{
- unsigned long adr;
-
- if (!mem)
- return;
-
- adr = (unsigned long) mem;
- while ((long) size > 0) {
- ClearPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- vfree(mem);
-}
+static char *mode_option = NULL;
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Preferred video mode (e.g. 640x480-8@60)");
-static struct fb_var_screeninfo vfb_default = {
+static const struct fb_videomode vfb_default = {
.xres = 640,
.yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .activate = FB_ACTIVATE_TEST,
- .height = -1,
- .width = -1,
- .pixclock = 20000,
- .left_margin = 64,
- .right_margin = 64,
- .upper_margin = 32,
- .lower_margin = 32,
- .hsync_len = 64,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED,
+ .pixclock = 20000,
+ .left_margin = 64,
+ .right_margin = 64,
+ .upper_margin = 32,
+ .lower_margin = 32,
+ .hsync_len = 64,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo vfb_fix = {
@@ -119,6 +66,7 @@ static struct fb_fix_screeninfo vfb_fix = {
static bool vfb_enable __initdata = 0; /* disabled by default */
module_param(vfb_enable, bool, 0);
+MODULE_PARM_DESC(vfb_enable, "Enable Virtual FB driver");
static int vfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
@@ -421,35 +369,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
static int vfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
- unsigned long start = vma->vm_start;
- unsigned long size = vma->vm_end - vma->vm_start;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long page, pos;
-
- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
- return -EINVAL;
- if (size > info->fix.smem_len)
- return -EINVAL;
- if (offset > info->fix.smem_len - size)
- return -EINVAL;
-
- pos = (unsigned long)info->fix.smem_start + offset;
-
- while (size > 0) {
- page = vmalloc_to_pfn((void *)pos);
- if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
- return -EAGAIN;
- }
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
- }
-
- return 0;
-
+ return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
}
#ifndef MODULE
@@ -477,6 +397,8 @@ static int __init vfb_setup(char *options)
/* Test disable for backwards compatibility */
if (!strcmp(this_opt, "disable"))
vfb_enable = 0;
+ else
+ mode_option = this_opt;
}
return 1;
}
@@ -489,12 +411,13 @@ static int __init vfb_setup(char *options)
static int vfb_probe(struct platform_device *dev)
{
struct fb_info *info;
+ unsigned int size = PAGE_ALIGN(videomemorysize);
int retval = -ENOMEM;
/*
* For real video cards we use ioremap.
*/
- if (!(videomemory = rvmalloc(videomemorysize)))
+ if (!(videomemory = vmalloc_32_user(size)))
return retval;
info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
@@ -504,11 +427,13 @@ static int vfb_probe(struct platform_device *dev)
info->screen_base = (char __iomem *)videomemory;
info->fbops = &vfb_ops;
- retval = fb_find_mode(&info->var, info, NULL,
- NULL, 0, NULL, 8);
+ if (!fb_find_mode(&info->var, info, mode_option,
+ NULL, 0, &vfb_default, 8)){
+ fb_err(info, "Unable to find usable video mode.\n");
+ retval = -EINVAL;
+ goto err1;
+ }
- if (!retval || (retval == 4))
- info->var = vfb_default;
vfb_fix.smem_start = (unsigned long) videomemory;
vfb_fix.smem_len = videomemorysize;
info->fix = vfb_fix;
@@ -533,7 +458,7 @@ err2:
err1:
framebuffer_release(info);
err:
- rvfree(videomemory, videomemorysize);
+ vfree(videomemory);
return retval;
}
@@ -543,7 +468,7 @@ static int vfb_remove(struct platform_device *dev)
if (info) {
unregister_framebuffer(info);
- rvfree(videomemory, videomemorysize);
+ vfree(videomemory);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 283d335a759f..5f0690c8fc93 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined = {
};
/* name should not depend on EGA/VGA */
-static struct fb_fix_screeninfo vga16fb_fix = {
+static const struct fb_fix_screeninfo vga16fb_fix = {
.id = "VGA16 VGA",
.smem_start = VGA_FB_PHYS,
.smem_len = VGA_FB_PHYS_LEN,
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(param.local_vaddr - lb_offset,
- num_pages, (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);
if (num_pinned != num_pages) {
diff --git a/drivers/virtio/config.c b/drivers/virtio/config.c
deleted file mode 100644
index f70bcd2ff98f..000000000000
--- a/drivers/virtio/config.c
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Configuration space parsing helpers for virtio.
- *
- * The configuration is [type][len][... len bytes ...] fields.
- *
- * Copyright 2007 Rusty Russell, IBM Corporation.
- * GPL v2 or later.
- */
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/bug.h>
-
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e7003db12c4..181793f07852 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ if (towards_target(vb))
+ virtballoon_changed(vdev);
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783441..6d9e5173d5fa 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ } else {
+ /*
+ * The virtio ring base address is expressed as a 32-bit PFN,
+ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
+ */
+ dma_set_coherent_mask(&pci_dev->dev,
+ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
+ }
+
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeedfe5..489bfc61cf30 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
-static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
- * entry. Always do both to keep code simple. */
+ * entry. Always update the event index to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
}
/* Put everything in free lists. */
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 15b64076bc26..bdbadaa47ef3 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource)
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
+ if (retval)
+ return 0;
return size;
break;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
+ if (retval)
+ return 0;
return size;
break;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 50dbaa805658..fdd3228e0678 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1844,4 +1844,53 @@ config USBPCWATCHDOG
Most people will say N.
+comment "Watchdog Pretimeout Governors"
+
+config WATCHDOG_PRETIMEOUT_GOV
+ bool "Enable watchdog pretimeout governors"
+ help
+ The option allows to select watchdog pretimeout governors.
+
+if WATCHDOG_PRETIMEOUT_GOV
+
+choice
+ prompt "Default Watchdog Pretimeout Governor"
+ default WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ help
+ This option selects a default watchdog pretimeout governor.
+ The governor takes its action, if a watchdog is capable
+ to report a pretimeout event.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP
+ bool "noop"
+ select WATCHDOG_PRETIMEOUT_GOV_NOOP
+ help
+ Use noop watchdog pretimeout governor by default. If noop
+ governor is selected by a user, write a short message to
+ the kernel log buffer and don't do any system changes.
+
+config WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC
+ bool "panic"
+ select WATCHDOG_PRETIMEOUT_GOV_PANIC
+ help
+ Use panic watchdog pretimeout governor by default, if
+ a watchdog pretimeout event happens, consider that
+ a watchdog feeder is dead and reboot is unavoidable.
+
+endchoice
+
+config WATCHDOG_PRETIMEOUT_GOV_NOOP
+ tristate "Noop watchdog pretimeout governor"
+ help
+ Noop watchdog pretimeout governor, only an informational
+ message is added to kernel log buffer.
+
+config WATCHDOG_PRETIMEOUT_GOV_PANIC
+ tristate "Panic watchdog pretimeout governor"
+ help
+ Panic watchdog pretimeout governor, on watchdog pretimeout
+ event put the kernel into panic.
+
+endif # WATCHDOG_PRETIMEOUT_GOV
+
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index cba00430151b..caa9f4aa492a 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -3,9 +3,15 @@
#
# The WatchDog Timer Driver Core.
-watchdog-objs += watchdog_core.o watchdog_dev.o
obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o
+watchdog-objs += watchdog_core.o watchdog_dev.o
+
+watchdog-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV) += watchdog_pretimeout.o
+
+obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP) += pretimeout_noop.o
+obj-$(CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC) += pretimeout_panic.o
+
# Only one watchdog can succeed. We probe the ISA/PCI/USB based
# watchdog-cards first, then the architecture specific watchdog
# drivers and then the architecture independent "softdog" driver.
diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
index c9686b2fdafd..d0b59ba0f661 100644
--- a/drivers/watchdog/asm9260_wdt.c
+++ b/drivers/watchdog/asm9260_wdt.c
@@ -389,7 +389,6 @@ MODULE_DEVICE_TABLE(of, asm9260_wdt_of_match);
static struct platform_driver asm9260_wdt_driver = {
.driver = {
.name = "asm9260-wdt",
- .owner = THIS_MODULE,
.of_match_table = asm9260_wdt_of_match,
},
.probe = asm9260_wdt_probe,
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 835d310081e1..e2209bf5fa8a 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -35,6 +35,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/uaccess.h>
#define DRIVER_NAME "ath79-wdt"
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index 4245b65d645c..e238df4d75a2 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -107,7 +107,7 @@ static struct watchdog_info bcm7038_wdt_info = {
WDIOF_MAGICCLOSE
};
-static struct watchdog_ops bcm7038_wdt_ops = {
+static const struct watchdog_ops bcm7038_wdt_ops = {
.owner = THIS_MODULE,
.start = bcm7038_wdt_start,
.stop = bcm7038_wdt_stop,
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 4dda9024e229..98acef72334d 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -269,7 +269,7 @@ static struct watchdog_info cdns_wdt_info = {
};
/* Watchdog Core Ops */
-static struct watchdog_ops cdns_wdt_ops = {
+static const struct watchdog_ops cdns_wdt_ops = {
.owner = THIS_MODULE,
.start = cdns_wdt_start,
.stop = cdns_wdt_stop,
@@ -424,8 +424,10 @@ static int __maybe_unused cdns_wdt_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct cdns_wdt *wdt = platform_get_drvdata(pdev);
- cdns_wdt_stop(&wdt->cdns_wdt_device);
- clk_disable_unprepare(wdt->clk);
+ if (watchdog_active(&wdt->cdns_wdt_device)) {
+ cdns_wdt_stop(&wdt->cdns_wdt_device);
+ clk_disable_unprepare(wdt->clk);
+ }
return 0;
}
@@ -442,12 +444,14 @@ static int __maybe_unused cdns_wdt_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct cdns_wdt *wdt = platform_get_drvdata(pdev);
- ret = clk_prepare_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "unable to enable clock\n");
- return ret;
+ if (watchdog_active(&wdt->cdns_wdt_device)) {
+ ret = clk_prepare_enable(wdt->clk);
+ if (ret) {
+ dev_err(dev, "unable to enable clock\n");
+ return ret;
+ }
+ cdns_wdt_start(&wdt->cdns_wdt_device);
}
- cdns_wdt_start(&wdt->cdns_wdt_device);
return 0;
}
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 2acb51cf5504..3c6a3de13a1b 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -54,6 +54,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
struct dw_wdt {
void __iomem *regs;
struct clk *clk;
+ unsigned long rate;
struct notifier_block restart_handler;
struct watchdog_device wdd;
};
@@ -72,7 +73,7 @@ static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top)
* There are 16 possible timeout values in 0..15 where the number of
* cycles is 2 ^ (16 + i) and the watchdog counts down.
*/
- return (1U << (16 + top)) / clk_get_rate(dw_wdt->clk);
+ return (1U << (16 + top)) / dw_wdt->rate;
}
static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
@@ -163,7 +164,7 @@ static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
- clk_get_rate(dw_wdt->clk);
+ dw_wdt->rate;
}
static const struct watchdog_info dw_wdt_ident = {
@@ -231,6 +232,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dw_wdt->rate = clk_get_rate(dw_wdt->clk);
+ if (dw_wdt->rate == 0) {
+ ret = -EINVAL;
+ goto out_disable_clk;
+ }
+
wdd = &dw_wdt->wdd;
wdd->info = &dw_wdt_ident;
wdd->ops = &dw_wdt_ops;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8f89bd8a826a..70c7194e2810 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -39,7 +39,7 @@
#include <asm/nmi.h>
#include <asm/frame.h>
-#define HPWDT_VERSION "1.3.3"
+#define HPWDT_VERSION "1.4.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -814,7 +814,8 @@ static int hpwdt_init_one(struct pci_dev *dev,
* not run on a legacy ASM box.
* So we only support the G5 ProLiant servers and higher.
*/
- if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP &&
+ dev->subsystem_vendor != PCI_VENDOR_ID_HP_3PAR) {
dev_warn(&dev->dev,
"This server does not have an iLO2+ ASIC.\n");
return -ENODEV;
@@ -823,7 +824,8 @@ static int hpwdt_init_one(struct pci_dev *dev,
/*
* Ignore all auxilary iLO devices with the following PCI ID
*/
- if (dev->subsystem_device == 0x1979)
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ dev->subsystem_device == 0x1979)
return -ENODEV;
if (pci_enable_device(dev)) {
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 54cab189a763..06fcb6c8c917 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -629,7 +629,7 @@ static int iTCO_wdt_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops iTCO_wdt_pm = {
+static const struct dev_pm_ops iTCO_wdt_pm = {
.suspend_noirq = iTCO_wdt_suspend_noirq,
.resume_noirq = iTCO_wdt_resume_noirq,
};
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 62f346bb4348..4874b0f18650 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,18 +38,23 @@
#define IMX2_WDT_WCR 0x00 /* Control Register */
#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
-#define IMX2_WDT_WCR_WDA (1 << 5) /* -> External Reset WDOG_B */
-#define IMX2_WDT_WCR_SRS (1 << 4) /* -> Software Reset Signal */
-#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
-#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
-#define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */
+#define IMX2_WDT_WCR_WDA BIT(5) /* -> External Reset WDOG_B */
+#define IMX2_WDT_WCR_SRS BIT(4) /* -> Software Reset Signal */
+#define IMX2_WDT_WCR_WRE BIT(3) /* -> WDOG Reset Enable */
+#define IMX2_WDT_WCR_WDE BIT(2) /* -> Watchdog Enable */
+#define IMX2_WDT_WCR_WDZST BIT(0) /* -> Watchdog timer Suspend */
#define IMX2_WDT_WSR 0x02 /* Service Register */
#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */
#define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */
#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
-#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
+#define IMX2_WDT_WRSR_TOUT BIT(1) /* -> Reset due to Timeout */
+
+#define IMX2_WDT_WICR 0x06 /* Interrupt Control Register */
+#define IMX2_WDT_WICR_WIE BIT(15) /* -> Interrupt Enable */
+#define IMX2_WDT_WICR_WTIS BIT(14) /* -> Interrupt Status */
+#define IMX2_WDT_WICR_WICT 0xFF /* -> Interrupt Count Timeout */
#define IMX2_WDT_WMCR 0x08 /* Misc Register */
@@ -80,6 +86,12 @@ static const struct watchdog_info imx2_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
};
+static const struct watchdog_info imx2_wdt_pretimeout_info = {
+ .identity = "imx2+ watchdog",
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
+ WDIOF_PRETIMEOUT,
+};
+
static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action,
void *data)
{
@@ -169,6 +181,35 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
return 0;
}
+static int imx2_wdt_set_pretimeout(struct watchdog_device *wdog,
+ unsigned int new_pretimeout)
+{
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ if (new_pretimeout >= IMX2_WDT_MAX_TIME)
+ return -EINVAL;
+
+ wdog->pretimeout = new_pretimeout;
+
+ regmap_update_bits(wdev->regmap, IMX2_WDT_WICR,
+ IMX2_WDT_WICR_WIE | IMX2_WDT_WICR_WICT,
+ IMX2_WDT_WICR_WIE | (new_pretimeout << 1));
+ return 0;
+}
+
+static irqreturn_t imx2_wdt_isr(int irq, void *wdog_arg)
+{
+ struct watchdog_device *wdog = wdog_arg;
+ struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+
+ regmap_write_bits(wdev->regmap, IMX2_WDT_WICR,
+ IMX2_WDT_WICR_WTIS, IMX2_WDT_WICR_WTIS);
+
+ watchdog_notify_pretimeout(wdog);
+
+ return IRQ_HANDLED;
+}
+
static int imx2_wdt_start(struct watchdog_device *wdog)
{
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -188,6 +229,7 @@ static const struct watchdog_ops imx2_wdt_ops = {
.start = imx2_wdt_start,
.ping = imx2_wdt_ping,
.set_timeout = imx2_wdt_set_timeout,
+ .set_pretimeout = imx2_wdt_set_pretimeout,
.restart = imx2_wdt_restart,
};
@@ -236,6 +278,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
wdog->parent = &pdev->dev;
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0)
+ if (!devm_request_irq(&pdev->dev, ret, imx2_wdt_isr, 0,
+ dev_name(&pdev->dev), wdog))
+ wdog->info = &imx2_wdt_pretimeout_info;
+
ret = clk_prepare_enable(wdev->clk);
if (ret)
return ret;
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5bf931ce1353..8e302d0e346c 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -430,7 +430,7 @@ static struct watchdog_info kempld_wdt_info = {
WDIOF_PRETIMEOUT
};
-static struct watchdog_ops kempld_wdt_ops = {
+static const struct watchdog_ops kempld_wdt_ops = {
.owner = THIS_MODULE,
.start = kempld_wdt_start,
.stop = kempld_wdt_stop,
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 4a2290f900a8..d5735c12067d 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -139,7 +139,6 @@ static int mt7621_wdt_probe(struct platform_device *pdev)
if (!IS_ERR(mt7621_wdt_reset))
reset_control_deassert(mt7621_wdt_reset);
- mt7621_wdt_dev.dev = &pdev->dev;
mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index b2e1b4cbbdc1..fae7fe929ea3 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -10,6 +10,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -45,6 +46,7 @@ struct xwdt_device {
u32 wdt_interval;
spinlock_t spinlock;
struct watchdog_device xilinx_wdt_wdd;
+ struct clk *clk;
};
static int xilinx_wdt_start(struct watchdog_device *wdd)
@@ -195,16 +197,30 @@ static int xwdt_probe(struct platform_device *pdev)
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
+ xdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xdev->clk)) {
+ if (PTR_ERR(xdev->clk) == -ENOENT)
+ xdev->clk = NULL;
+ else
+ return PTR_ERR(xdev->clk);
+ }
+
+ rc = clk_prepare_enable(xdev->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ return rc;
+ }
+
rc = xwdt_selftest(xdev);
if (rc == XWT_TIMER_FAILED) {
dev_err(&pdev->dev, "SelfTest routine error\n");
- return rc;
+ goto err_clk_disable;
}
rc = watchdog_register_device(xilinx_wdt_wdd);
if (rc) {
dev_err(&pdev->dev, "Cannot register watchdog (err=%d)\n", rc);
- return rc;
+ goto err_clk_disable;
}
dev_info(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds\n",
@@ -213,6 +229,10 @@ static int xwdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xdev);
return 0;
+err_clk_disable:
+ clk_disable_unprepare(xdev->clk);
+
+ return rc;
}
static int xwdt_remove(struct platform_device *pdev)
@@ -220,6 +240,7 @@ static int xwdt_remove(struct platform_device *pdev)
struct xwdt_device *xdev = platform_get_drvdata(pdev);
watchdog_unregister_device(&xdev->xilinx_wdt_wdd);
+ clk_disable_unprepare(xdev->clk);
return 0;
}
diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c
new file mode 100644
index 000000000000..85f5299d197c
--- /dev/null
+++ b/drivers/watchdog/pretimeout_noop.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/**
+ * pretimeout_noop - No operation on watchdog pretimeout event
+ * @wdd - watchdog_device
+ *
+ * This function prints a message about pretimeout to kernel log.
+ */
+static void pretimeout_noop(struct watchdog_device *wdd)
+{
+ pr_alert("watchdog%d: pretimeout event\n", wdd->id);
+}
+
+static struct watchdog_governor watchdog_gov_noop = {
+ .name = "noop",
+ .pretimeout = pretimeout_noop,
+};
+
+static int __init watchdog_gov_noop_register(void)
+{
+ return watchdog_register_governor(&watchdog_gov_noop);
+}
+
+static void __exit watchdog_gov_noop_unregister(void)
+{
+ watchdog_unregister_governor(&watchdog_gov_noop);
+}
+module_init(watchdog_gov_noop_register);
+module_exit(watchdog_gov_noop_unregister);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
+MODULE_DESCRIPTION("Panic watchdog pretimeout governor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c
new file mode 100644
index 000000000000..0c197a1c97f4
--- /dev/null
+++ b/drivers/watchdog/pretimeout_panic.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/**
+ * pretimeout_panic - Panic on watchdog pretimeout event
+ * @wdd - watchdog_device
+ *
+ * Panic, watchdog has not been fed till pretimeout event.
+ */
+static void pretimeout_panic(struct watchdog_device *wdd)
+{
+ panic("watchdog pretimeout event\n");
+}
+
+static struct watchdog_governor watchdog_gov_panic = {
+ .name = "panic",
+ .pretimeout = pretimeout_panic,
+};
+
+static int __init watchdog_gov_panic_register(void)
+{
+ return watchdog_register_governor(&watchdog_gov_panic);
+}
+
+static void __exit watchdog_gov_panic_unregister(void)
+{
+ watchdog_unregister_governor(&watchdog_gov_panic);
+}
+module_init(watchdog_gov_panic_register);
+module_exit(watchdog_gov_panic_unregister);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir_zapolskiy@mentor.com>");
+MODULE_DESCRIPTION("Panic watchdog pretimeout governor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index d1c12278cb6a..0805ee2acd7a 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -136,7 +136,7 @@ static struct watchdog_info rn5t618_wdt_info = {
.identity = DRIVER_NAME,
};
-static struct watchdog_ops rn5t618_wdt_ops = {
+static const struct watchdog_ops rn5t618_wdt_ops = {
.owner = THIS_MODULE,
.start = rn5t618_wdt_start,
.stop = rn5t618_wdt_stop,
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 1967919ae743..14b4fd428fff 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -158,7 +158,6 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
- rt288x_wdt_dev.dev = &pdev->dev;
rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
rt288x_wdt_dev.parent = &pdev->dev;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index b067edf246df..c7bdc986dca1 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -72,10 +72,27 @@ static void softdog_fire(unsigned long data)
static struct timer_list softdog_ticktock =
TIMER_INITIALIZER(softdog_fire, 0, 0);
+static struct watchdog_device softdog_dev;
+
+static void softdog_pretimeout(unsigned long data)
+{
+ watchdog_notify_pretimeout(&softdog_dev);
+}
+
+static struct timer_list softdog_preticktock =
+ TIMER_INITIALIZER(softdog_pretimeout, 0, 0);
+
static int softdog_ping(struct watchdog_device *w)
{
if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ)))
__module_get(THIS_MODULE);
+
+ if (w->pretimeout)
+ mod_timer(&softdog_preticktock, jiffies +
+ (w->timeout - w->pretimeout) * HZ);
+ else
+ del_timer(&softdog_preticktock);
+
return 0;
}
@@ -84,15 +101,18 @@ static int softdog_stop(struct watchdog_device *w)
if (del_timer(&softdog_ticktock))
module_put(THIS_MODULE);
+ del_timer(&softdog_preticktock);
+
return 0;
}
static struct watchdog_info softdog_info = {
.identity = "Software Watchdog",
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE |
+ WDIOF_PRETIMEOUT,
};
-static struct watchdog_ops softdog_ops = {
+static const struct watchdog_ops softdog_ops = {
.owner = THIS_MODULE,
.start = softdog_ping,
.stop = softdog_stop,
diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c
index 14e9badf2bfa..e6100e447dd8 100644
--- a/drivers/watchdog/st_lpc_wdt.c
+++ b/drivers/watchdog/st_lpc_wdt.c
@@ -52,27 +52,6 @@ struct st_wdog {
bool warm_reset;
};
-static struct st_wdog_syscfg stid127_syscfg = {
- .reset_type_reg = 0x004,
- .reset_type_mask = BIT(2),
- .enable_reg = 0x000,
- .enable_mask = BIT(2),
-};
-
-static struct st_wdog_syscfg stih415_syscfg = {
- .reset_type_reg = 0x0B8,
- .reset_type_mask = BIT(6),
- .enable_reg = 0x0B4,
- .enable_mask = BIT(7),
-};
-
-static struct st_wdog_syscfg stih416_syscfg = {
- .reset_type_reg = 0x88C,
- .reset_type_mask = BIT(6),
- .enable_reg = 0x888,
- .enable_mask = BIT(7),
-};
-
static struct st_wdog_syscfg stih407_syscfg = {
.enable_reg = 0x204,
.enable_mask = BIT(19),
@@ -83,18 +62,6 @@ static const struct of_device_id st_wdog_match[] = {
.compatible = "st,stih407-lpc",
.data = &stih407_syscfg,
},
- {
- .compatible = "st,stih416-lpc",
- .data = &stih416_syscfg,
- },
- {
- .compatible = "st,stih415-lpc",
- .data = &stih415_syscfg,
- },
- {
- .compatible = "st,stid127-lpc",
- .data = &stid127_syscfg,
- },
{},
};
MODULE_DEVICE_TABLE(of, st_wdog_match);
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 9ec57608da82..2d53c3f9394f 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -178,7 +178,7 @@ static const struct watchdog_info tegra_wdt_info = {
.identity = "Tegra Watchdog",
};
-static struct watchdog_ops tegra_wdt_ops = {
+static const struct watchdog_ops tegra_wdt_ops = {
.owner = THIS_MODULE,
.start = tegra_wdt_start,
.stop = tegra_wdt_stop,
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index c2da880292bc..6f7a9deb27d0 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -112,7 +112,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
txx9_imclk = NULL;
goto exit;
}
- ret = clk_enable(txx9_imclk);
+ ret = clk_prepare_enable(txx9_imclk);
if (ret) {
clk_put(txx9_imclk);
txx9_imclk = NULL;
@@ -144,7 +144,7 @@ static int __init txx9wdt_probe(struct platform_device *dev)
return 0;
exit:
if (txx9_imclk) {
- clk_disable(txx9_imclk);
+ clk_disable_unprepare(txx9_imclk);
clk_put(txx9_imclk);
}
return ret;
@@ -153,7 +153,7 @@ exit:
static int __exit txx9wdt_remove(struct platform_device *dev)
{
watchdog_unregister_device(&txx9wdt);
- clk_disable(txx9_imclk);
+ clk_disable_unprepare(txx9_imclk);
clk_put(txx9_imclk);
return 0;
}
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 09e8003039dc..ef2ecaf53a14 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -302,7 +302,7 @@ static struct watchdog_info wdt_info = {
.identity = "W83627HF Watchdog",
};
-static struct watchdog_ops wdt_ops = {
+static const struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 6abb83cd7681..74265b2f806c 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -349,7 +349,7 @@ int devm_watchdog_register_device(struct device *dev,
struct watchdog_device **rcwdd;
int ret;
- rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+ rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
GFP_KERNEL);
if (!rcwdd)
return -ENOMEM;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 040bf8382f46..32930a073a12 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -49,6 +49,7 @@
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include "watchdog_core.h"
+#include "watchdog_pretimeout.h"
/*
* struct watchdog_core_data - watchdog core internal data
@@ -335,10 +336,14 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
if (watchdog_timeout_invalid(wdd, timeout))
return -EINVAL;
- if (wdd->ops->set_timeout)
+ if (wdd->ops->set_timeout) {
err = wdd->ops->set_timeout(wdd, timeout);
- else
+ } else {
wdd->timeout = timeout;
+ /* Disable pretimeout if it doesn't fit the new timeout */
+ if (wdd->pretimeout >= wdd->timeout)
+ wdd->pretimeout = 0;
+ }
watchdog_update_worker(wdd);
@@ -346,6 +351,31 @@ static int watchdog_set_timeout(struct watchdog_device *wdd,
}
/*
+ * watchdog_set_pretimeout: set the watchdog timer pretimeout
+ * @wdd: the watchdog device to set the timeout for
+ * @timeout: pretimeout to set in seconds
+ */
+
+static int watchdog_set_pretimeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ int err = 0;
+
+ if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ return -EOPNOTSUPP;
+
+ if (watchdog_pretimeout_invalid(wdd, timeout))
+ return -EINVAL;
+
+ if (wdd->ops->set_pretimeout)
+ err = wdd->ops->set_pretimeout(wdd, timeout);
+ else
+ wdd->pretimeout = timeout;
+
+ return err;
+}
+
+/*
* watchdog_get_timeleft: wrapper to get the time left before a reboot
* @wdd: the watchdog device to get the remaining time from
* @timeleft: the time that's left
@@ -429,6 +459,15 @@ static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(timeout);
+static ssize_t pretimeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", wdd->pretimeout);
+}
+static DEVICE_ATTR_RO(pretimeout);
+
static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -450,6 +489,36 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(state);
+static ssize_t pretimeout_available_governors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return watchdog_pretimeout_available_governors_get(buf);
+}
+static DEVICE_ATTR_RO(pretimeout_available_governors);
+
+static ssize_t pretimeout_governor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+
+ return watchdog_pretimeout_governor_get(wdd, buf);
+}
+
+static ssize_t pretimeout_governor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+ int ret = watchdog_pretimeout_governor_set(wdd, buf);
+
+ if (!ret)
+ ret = count;
+
+ return ret;
+}
+static DEVICE_ATTR_RW(pretimeout_governor);
+
static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
@@ -459,6 +528,14 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
mode = 0;
+ else if (attr == &dev_attr_pretimeout.attr &&
+ !(wdd->info->options & WDIOF_PRETIMEOUT))
+ mode = 0;
+ else if ((attr == &dev_attr_pretimeout_governor.attr ||
+ attr == &dev_attr_pretimeout_available_governors.attr) &&
+ (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
+ !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
+ mode = 0;
return mode;
}
@@ -466,10 +543,13 @@ static struct attribute *wdt_attrs[] = {
&dev_attr_state.attr,
&dev_attr_identity.attr,
&dev_attr_timeout.attr,
+ &dev_attr_pretimeout.attr,
&dev_attr_timeleft.attr,
&dev_attr_bootstatus.attr,
&dev_attr_status.attr,
&dev_attr_nowayout.attr,
+ &dev_attr_pretimeout_governor.attr,
+ &dev_attr_pretimeout_available_governors.attr,
NULL,
};
@@ -646,6 +726,16 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
break;
err = put_user(val, p);
break;
+ case WDIOC_SETPRETIMEOUT:
+ if (get_user(val, p)) {
+ err = -EFAULT;
+ break;
+ }
+ err = watchdog_set_pretimeout(wdd, val);
+ break;
+ case WDIOC_GETPRETIMEOUT:
+ err = put_user(wdd->pretimeout, p);
+ break;
default:
err = -ENOTTY;
break;
@@ -937,6 +1027,12 @@ int watchdog_dev_register(struct watchdog_device *wdd)
return PTR_ERR(dev);
}
+ ret = watchdog_register_pretimeout(wdd);
+ if (ret) {
+ device_destroy(&watchdog_class, devno);
+ watchdog_cdev_unregister(wdd);
+ }
+
return ret;
}
@@ -950,6 +1046,7 @@ int watchdog_dev_register(struct watchdog_device *wdd)
void watchdog_dev_unregister(struct watchdog_device *wdd)
{
+ watchdog_unregister_pretimeout(wdd);
device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
watchdog_cdev_unregister(wdd);
}
diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c
new file mode 100644
index 000000000000..9db07bfb4334
--- /dev/null
+++ b/drivers/watchdog/watchdog_pretimeout.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015-2016 Mentor Graphics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/watchdog.h>
+
+#include "watchdog_pretimeout.h"
+
+/* Default watchdog pretimeout governor */
+static struct watchdog_governor *default_gov;
+
+/* The spinlock protects default_gov, wdd->gov and pretimeout_list */
+static DEFINE_SPINLOCK(pretimeout_lock);
+
+/* List of watchdog devices, which can generate a pretimeout event */
+static LIST_HEAD(pretimeout_list);
+
+struct watchdog_pretimeout {
+ struct watchdog_device *wdd;
+ struct list_head entry;
+};
+
+/* The mutex protects governor list and serializes external interfaces */
+static DEFINE_MUTEX(governor_lock);
+
+/* List of the registered watchdog pretimeout governors */
+static LIST_HEAD(governor_list);
+
+struct governor_priv {
+ struct watchdog_governor *gov;
+ struct list_head entry;
+};
+
+static struct governor_priv *find_governor_by_name(const char *gov_name)
+{
+ struct governor_priv *priv;
+
+ list_for_each_entry(priv, &governor_list, entry)
+ if (sysfs_streq(gov_name, priv->gov->name))
+ return priv;
+
+ return NULL;
+}
+
+int watchdog_pretimeout_available_governors_get(char *buf)
+{
+ struct governor_priv *priv;
+ int count = 0;
+
+ mutex_lock(&governor_lock);
+
+ list_for_each_entry(priv, &governor_list, entry)
+ count += sprintf(buf + count, "%s\n", priv->gov->name);
+
+ mutex_unlock(&governor_lock);
+
+ return count;
+}
+
+int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf)
+{
+ int count = 0;
+
+ spin_lock_irq(&pretimeout_lock);
+ if (wdd->gov)
+ count = sprintf(buf, "%s\n", wdd->gov->name);
+ spin_unlock_irq(&pretimeout_lock);
+
+ return count;
+}
+
+int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+ const char *buf)
+{
+ struct governor_priv *priv;
+
+ mutex_lock(&governor_lock);
+
+ priv = find_governor_by_name(buf);
+ if (!priv) {
+ mutex_unlock(&governor_lock);
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&pretimeout_lock);
+ wdd->gov = priv->gov;
+ spin_unlock_irq(&pretimeout_lock);
+
+ mutex_unlock(&governor_lock);
+
+ return 0;
+}
+
+void watchdog_notify_pretimeout(struct watchdog_device *wdd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pretimeout_lock, flags);
+ if (!wdd->gov) {
+ spin_unlock_irqrestore(&pretimeout_lock, flags);
+ return;
+ }
+
+ wdd->gov->pretimeout(wdd);
+ spin_unlock_irqrestore(&pretimeout_lock, flags);
+}
+EXPORT_SYMBOL_GPL(watchdog_notify_pretimeout);
+
+int watchdog_register_governor(struct watchdog_governor *gov)
+{
+ struct watchdog_pretimeout *p;
+ struct governor_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_lock(&governor_lock);
+
+ if (find_governor_by_name(gov->name)) {
+ mutex_unlock(&governor_lock);
+ kfree(priv);
+ return -EBUSY;
+ }
+
+ priv->gov = gov;
+ list_add(&priv->entry, &governor_list);
+
+ if (!strncmp(gov->name, WATCHDOG_PRETIMEOUT_DEFAULT_GOV,
+ WATCHDOG_GOV_NAME_MAXLEN)) {
+ spin_lock_irq(&pretimeout_lock);
+ default_gov = gov;
+
+ list_for_each_entry(p, &pretimeout_list, entry)
+ if (!p->wdd->gov)
+ p->wdd->gov = default_gov;
+ spin_unlock_irq(&pretimeout_lock);
+ }
+
+ mutex_unlock(&governor_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(watchdog_register_governor);
+
+void watchdog_unregister_governor(struct watchdog_governor *gov)
+{
+ struct watchdog_pretimeout *p;
+ struct governor_priv *priv, *t;
+
+ mutex_lock(&governor_lock);
+
+ list_for_each_entry_safe(priv, t, &governor_list, entry) {
+ if (priv->gov == gov) {
+ list_del(&priv->entry);
+ kfree(priv);
+ break;
+ }
+ }
+
+ spin_lock_irq(&pretimeout_lock);
+ list_for_each_entry(p, &pretimeout_list, entry)
+ if (p->wdd->gov == gov)
+ p->wdd->gov = default_gov;
+ spin_unlock_irq(&pretimeout_lock);
+
+ mutex_unlock(&governor_lock);
+}
+EXPORT_SYMBOL(watchdog_unregister_governor);
+
+int watchdog_register_pretimeout(struct watchdog_device *wdd)
+{
+ struct watchdog_pretimeout *p;
+
+ if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ return 0;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ spin_lock_irq(&pretimeout_lock);
+ list_add(&p->entry, &pretimeout_list);
+ p->wdd = wdd;
+ wdd->gov = default_gov;
+ spin_unlock_irq(&pretimeout_lock);
+
+ return 0;
+}
+
+void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
+{
+ struct watchdog_pretimeout *p, *t;
+
+ if (!(wdd->info->options & WDIOF_PRETIMEOUT))
+ return;
+
+ spin_lock_irq(&pretimeout_lock);
+ wdd->gov = NULL;
+
+ list_for_each_entry_safe(p, t, &pretimeout_list, entry) {
+ if (p->wdd == wdd) {
+ list_del(&p->entry);
+ break;
+ }
+ }
+ spin_unlock_irq(&pretimeout_lock);
+
+ kfree(p);
+}
diff --git a/drivers/watchdog/watchdog_pretimeout.h b/drivers/watchdog/watchdog_pretimeout.h
new file mode 100644
index 000000000000..a5a32b39c56d
--- /dev/null
+++ b/drivers/watchdog/watchdog_pretimeout.h
@@ -0,0 +1,60 @@
+#ifndef __WATCHDOG_PRETIMEOUT_H
+#define __WATCHDOG_PRETIMEOUT_H
+
+#define WATCHDOG_GOV_NAME_MAXLEN 20
+
+struct watchdog_device;
+
+struct watchdog_governor {
+ const char name[WATCHDOG_GOV_NAME_MAXLEN];
+ void (*pretimeout)(struct watchdog_device *wdd);
+};
+
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)
+/* Interfaces to watchdog pretimeout governors */
+int watchdog_register_governor(struct watchdog_governor *gov);
+void watchdog_unregister_governor(struct watchdog_governor *gov);
+
+/* Interfaces to watchdog_dev.c */
+int watchdog_register_pretimeout(struct watchdog_device *wdd);
+void watchdog_unregister_pretimeout(struct watchdog_device *wdd);
+int watchdog_pretimeout_available_governors_get(char *buf);
+int watchdog_pretimeout_governor_get(struct watchdog_device *wdd, char *buf);
+int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+ const char *buf);
+
+#if IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP)
+#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "noop"
+#elif IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC)
+#define WATCHDOG_PRETIMEOUT_DEFAULT_GOV "panic"
+#endif
+
+#else
+static inline int watchdog_register_pretimeout(struct watchdog_device *wdd)
+{
+ return 0;
+}
+
+static inline void watchdog_unregister_pretimeout(struct watchdog_device *wdd)
+{
+}
+
+static inline int watchdog_pretimeout_available_governors_get(char *buf)
+{
+ return -EINVAL;
+}
+
+static inline int watchdog_pretimeout_governor_get(struct watchdog_device *wdd,
+ char *buf)
+{
+ return -EINVAL;
+}
+
+static inline int watchdog_pretimeout_governor_set(struct watchdog_device *wdd,
+ const char *buf)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e473e3b23720..6d1fbda0f461 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
return ret;
+
+ ret = wdat_wdt_ping(&wdat->wdd);
+ if (ret)
+ return ret;
}
return wdat_wdt_start(&wdat->wdd);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index fa1efef3c96e..b4e0cea5a64e 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -18,7 +18,10 @@
* GNU General Public License for more details.
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/ihex.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -36,6 +39,8 @@
#define ZIIRAVE_STATE_OFF 0x1
#define ZIIRAVE_STATE_ON 0x2
+#define ZIIRAVE_FW_NAME "ziirave_wdt.fw"
+
static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
"host request", NULL, "illegal configuration",
"illegal instruction", "illegal trap",
@@ -50,12 +55,35 @@ static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
#define ZIIRAVE_WDT_PING 0x9
#define ZIIRAVE_WDT_RESET_DURATION 0xa
+#define ZIIRAVE_FIRM_PKT_TOTAL_SIZE 20
+#define ZIIRAVE_FIRM_PKT_DATA_SIZE 16
+#define ZIIRAVE_FIRM_FLASH_MEMORY_START 0x1600
+#define ZIIRAVE_FIRM_FLASH_MEMORY_END 0x2bbf
+
+/* Received and ready for next Download packet. */
+#define ZIIRAVE_FIRM_DOWNLOAD_ACK 1
+/* Currently writing to flash. Retry Download status in a moment! */
+#define ZIIRAVE_FIRM_DOWNLOAD_BUSY 2
+
+/* Wait for ACK timeout in ms */
+#define ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT 50
+
+/* Firmware commands */
+#define ZIIRAVE_CMD_DOWNLOAD_START 0x10
+#define ZIIRAVE_CMD_DOWNLOAD_END 0x11
+#define ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR 0x12
+#define ZIIRAVE_CMD_DOWNLOAD_READ_BYTE 0x13
+#define ZIIRAVE_CMD_RESET_PROCESSOR 0x0b
+#define ZIIRAVE_CMD_JUMP_TO_BOOTLOADER 0x0c
+#define ZIIRAVE_CMD_DOWNLOAD_PACKET 0x0e
+
struct ziirave_wdt_rev {
unsigned char major;
unsigned char minor;
};
struct ziirave_wdt_data {
+ struct mutex sysfs_mutex;
struct watchdog_device wdd;
struct ziirave_wdt_rev bootloader_rev;
struct ziirave_wdt_rev firmware_rev;
@@ -146,6 +174,293 @@ static unsigned int ziirave_wdt_get_timeleft(struct watchdog_device *wdd)
return ret;
}
+static int ziirave_firm_wait_for_ack(struct watchdog_device *wdd)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ int ret;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ZIIRAVE_FIRM_WAIT_FOR_ACK_TIMEOUT);
+ do {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ usleep_range(5000, 10000);
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read byte\n");
+ return ret;
+ }
+ } while (ret == ZIIRAVE_FIRM_DOWNLOAD_BUSY);
+
+ return ret == ZIIRAVE_FIRM_DOWNLOAD_ACK ? 0 : -EIO;
+}
+
+static int ziirave_firm_set_read_addr(struct watchdog_device *wdd, u16 addr)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ u8 address[2];
+
+ address[0] = addr & 0xff;
+ address[1] = (addr >> 8) & 0xff;
+
+ return i2c_smbus_write_block_data(client,
+ ZIIRAVE_CMD_DOWNLOAD_SET_READ_ADDR,
+ ARRAY_SIZE(address), address);
+}
+
+static int ziirave_firm_write_block_data(struct watchdog_device *wdd,
+ u8 command, u8 length, const u8 *data,
+ bool wait_for_ack)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ int ret;
+
+ ret = i2c_smbus_write_block_data(client, command, length, data);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to send command 0x%02x: %d\n", command, ret);
+ return ret;
+ }
+
+ if (wait_for_ack)
+ ret = ziirave_firm_wait_for_ack(wdd);
+
+ return ret;
+}
+
+static int ziirave_firm_write_byte(struct watchdog_device *wdd, u8 command,
+ u8 byte, bool wait_for_ack)
+{
+ return ziirave_firm_write_block_data(wdd, command, 1, &byte,
+ wait_for_ack);
+}
+
+/*
+ * ziirave_firm_write_pkt() - Build and write a firmware packet
+ *
+ * A packet to send to the firmware is composed by following bytes:
+ * Length | Addr0 | Addr1 | Data0 .. Data15 | Checksum |
+ * Where,
+ * Length: A data byte containing the length of the data.
+ * Addr0: Low byte of the address.
+ * Addr1: High byte of the address.
+ * Data0 .. Data15: Array of 16 bytes of data.
+ * Checksum: Checksum byte to verify data integrity.
+ */
+static int ziirave_firm_write_pkt(struct watchdog_device *wdd,
+ const struct ihex_binrec *rec)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ u8 i, checksum = 0, packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE];
+ int ret;
+ u16 addr;
+
+ memset(packet, 0, ARRAY_SIZE(packet));
+
+ /* Packet length */
+ packet[0] = (u8)be16_to_cpu(rec->len);
+ /* Packet address */
+ addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
+ packet[1] = addr & 0xff;
+ packet[2] = (addr & 0xff00) >> 8;
+
+ /* Packet data */
+ if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE)
+ return -EMSGSIZE;
+ memcpy(packet + 3, rec->data, be16_to_cpu(rec->len));
+
+ /* Packet checksum */
+ for (i = 0; i < ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1; i++)
+ checksum += packet[i];
+ packet[ZIIRAVE_FIRM_PKT_TOTAL_SIZE - 1] = checksum;
+
+ ret = ziirave_firm_write_block_data(wdd, ZIIRAVE_CMD_DOWNLOAD_PACKET,
+ ARRAY_SIZE(packet), packet, true);
+ if (ret)
+ dev_err(&client->dev,
+ "Failed to write firmware packet at address 0x%04x: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+static int ziirave_firm_verify(struct watchdog_device *wdd,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ const struct ihex_binrec *rec;
+ int i, ret;
+ u8 data[ZIIRAVE_FIRM_PKT_DATA_SIZE];
+ u16 addr;
+
+ for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
+ /* Zero length marks end of records */
+ if (!be16_to_cpu(rec->len))
+ break;
+
+ addr = (be32_to_cpu(rec->addr) & 0xffff) >> 1;
+ if (addr < ZIIRAVE_FIRM_FLASH_MEMORY_START ||
+ addr > ZIIRAVE_FIRM_FLASH_MEMORY_END)
+ continue;
+
+ ret = ziirave_firm_set_read_addr(wdd, addr);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to send SET_READ_ADDR command: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = i2c_smbus_read_byte_data(client,
+ ZIIRAVE_CMD_DOWNLOAD_READ_BYTE);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to READ DATA: %d\n", ret);
+ return ret;
+ }
+ data[i] = ret;
+ }
+
+ if (memcmp(data, rec->data, be16_to_cpu(rec->len))) {
+ dev_err(&client->dev,
+ "Firmware mismatch at address 0x%04x\n", addr);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ziirave_firm_upload(struct watchdog_device *wdd,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = to_i2c_client(wdd->parent);
+ int ret, words_till_page_break;
+ const struct ihex_binrec *rec;
+ struct ihex_binrec *rec_new;
+
+ ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_JUMP_TO_BOOTLOADER, 1,
+ false);
+ if (ret)
+ return ret;
+
+ msleep(500);
+
+ ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_START, 1, true);
+ if (ret)
+ return ret;
+
+ msleep(500);
+
+ for (rec = (void *)fw->data; rec; rec = ihex_next_binrec(rec)) {
+ /* Zero length marks end of records */
+ if (!be16_to_cpu(rec->len))
+ break;
+
+ /* Check max data size */
+ if (be16_to_cpu(rec->len) > ZIIRAVE_FIRM_PKT_DATA_SIZE) {
+ dev_err(&client->dev, "Firmware packet too long (%d)\n",
+ be16_to_cpu(rec->len));
+ return -EMSGSIZE;
+ }
+
+ /* Calculate words till page break */
+ words_till_page_break = (64 - ((be32_to_cpu(rec->addr) >> 1) &
+ 0x3f));
+ if ((be16_to_cpu(rec->len) >> 1) > words_till_page_break) {
+ /*
+ * Data in passes page boundary, so we need to split in
+ * two blocks of data. Create a packet with the first
+ * block of data.
+ */
+ rec_new = kzalloc(sizeof(struct ihex_binrec) +
+ (words_till_page_break << 1),
+ GFP_KERNEL);
+ if (!rec_new)
+ return -ENOMEM;
+
+ rec_new->len = cpu_to_be16(words_till_page_break << 1);
+ rec_new->addr = rec->addr;
+ memcpy(rec_new->data, rec->data,
+ be16_to_cpu(rec_new->len));
+
+ ret = ziirave_firm_write_pkt(wdd, rec_new);
+ kfree(rec_new);
+ if (ret)
+ return ret;
+
+ /* Create a packet with the second block of data */
+ rec_new = kzalloc(sizeof(struct ihex_binrec) +
+ be16_to_cpu(rec->len) -
+ (words_till_page_break << 1),
+ GFP_KERNEL);
+ if (!rec_new)
+ return -ENOMEM;
+
+ /* Remaining bytes */
+ rec_new->len = rec->len -
+ cpu_to_be16(words_till_page_break << 1);
+
+ rec_new->addr = cpu_to_be32(be32_to_cpu(rec->addr) +
+ (words_till_page_break << 1));
+
+ memcpy(rec_new->data,
+ rec->data + (words_till_page_break << 1),
+ be16_to_cpu(rec_new->len));
+
+ ret = ziirave_firm_write_pkt(wdd, rec_new);
+ kfree(rec_new);
+ if (ret)
+ return ret;
+ } else {
+ ret = ziirave_firm_write_pkt(wdd, rec);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* For end of download, the length field will be set to 0 */
+ rec_new = kzalloc(sizeof(struct ihex_binrec) + 1, GFP_KERNEL);
+ if (!rec_new)
+ return -ENOMEM;
+
+ ret = ziirave_firm_write_pkt(wdd, rec_new);
+ kfree(rec_new);
+ if (ret) {
+ dev_err(&client->dev, "Failed to send EMPTY packet: %d\n", ret);
+ return ret;
+ }
+
+ /* This sleep seems to be required */
+ msleep(20);
+
+ /* Start firmware verification */
+ ret = ziirave_firm_verify(wdd, fw);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to verify firmware: %d\n", ret);
+ return ret;
+ }
+
+ /* End download operation */
+ ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_DOWNLOAD_END, 1, false);
+ if (ret)
+ return ret;
+
+ /* Reset the processor */
+ ret = ziirave_firm_write_byte(wdd, ZIIRAVE_CMD_RESET_PROCESSOR, 1,
+ false);
+ if (ret)
+ return ret;
+
+ msleep(500);
+
+ return 0;
+}
+
static const struct watchdog_info ziirave_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "Zodiac RAVE Watchdog",
@@ -166,9 +481,18 @@ static ssize_t ziirave_wdt_sysfs_show_firm(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev->parent);
struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+ int ret;
+
+ ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ ret = sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major,
+ w_priv->firmware_rev.minor);
- return sprintf(buf, "02.%02u.%02u", w_priv->firmware_rev.major,
- w_priv->firmware_rev.minor);
+ mutex_unlock(&w_priv->sysfs_mutex);
+
+ return ret;
}
static DEVICE_ATTR(firmware_version, S_IRUGO, ziirave_wdt_sysfs_show_firm,
@@ -180,9 +504,18 @@ static ssize_t ziirave_wdt_sysfs_show_boot(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev->parent);
struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+ int ret;
- return sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major,
- w_priv->bootloader_rev.minor);
+ ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ ret = sprintf(buf, "01.%02u.%02u", w_priv->bootloader_rev.major,
+ w_priv->bootloader_rev.minor);
+
+ mutex_unlock(&w_priv->sysfs_mutex);
+
+ return ret;
}
static DEVICE_ATTR(bootloader_version, S_IRUGO, ziirave_wdt_sysfs_show_boot,
@@ -194,17 +527,81 @@ static ssize_t ziirave_wdt_sysfs_show_reason(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev->parent);
struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+ int ret;
+
+ ret = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+ if (ret)
+ return ret;
+
+ ret = sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]);
- return sprintf(buf, "%s", ziirave_reasons[w_priv->reset_reason]);
+ mutex_unlock(&w_priv->sysfs_mutex);
+
+ return ret;
}
static DEVICE_ATTR(reset_reason, S_IRUGO, ziirave_wdt_sysfs_show_reason,
NULL);
+static ssize_t ziirave_wdt_sysfs_store_firm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int err;
+
+ err = request_ihex_firmware(&fw, ZIIRAVE_FW_NAME, dev);
+ if (err) {
+ dev_err(&client->dev, "Failed to request ihex firmware\n");
+ return err;
+ }
+
+ err = mutex_lock_interruptible(&w_priv->sysfs_mutex);
+ if (err)
+ goto release_firmware;
+
+ err = ziirave_firm_upload(&w_priv->wdd, fw);
+ if (err) {
+ dev_err(&client->dev, "The firmware update failed: %d\n", err);
+ goto unlock_mutex;
+ }
+
+ /* Update firmware version */
+ err = ziirave_wdt_revision(client, &w_priv->firmware_rev,
+ ZIIRAVE_WDT_FIRM_VER_MAJOR);
+ if (err) {
+ dev_err(&client->dev, "Failed to read firmware version: %d\n",
+ err);
+ goto unlock_mutex;
+ }
+
+ dev_info(&client->dev, "Firmware updated to version 02.%02u.%02u\n",
+ w_priv->firmware_rev.major, w_priv->firmware_rev.minor);
+
+ /* Restore the watchdog timeout */
+ err = ziirave_wdt_set_timeout(&w_priv->wdd, w_priv->wdd.timeout);
+ if (err)
+ dev_err(&client->dev, "Failed to set timeout: %d\n", err);
+
+unlock_mutex:
+ mutex_unlock(&w_priv->sysfs_mutex);
+
+release_firmware:
+ release_firmware(fw);
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(update_firmware, S_IWUSR, NULL,
+ ziirave_wdt_sysfs_store_firm);
+
static struct attribute *ziirave_wdt_attrs[] = {
&dev_attr_firmware_version.attr,
&dev_attr_bootloader_version.attr,
&dev_attr_reset_reason.attr,
+ &dev_attr_update_firmware.attr,
NULL
};
ATTRIBUTE_GROUPS(ziirave_wdt);
@@ -252,6 +649,8 @@ static int ziirave_wdt_probe(struct i2c_client *client,
if (!w_priv)
return -ENOMEM;
+ mutex_init(&w_priv->sysfs_mutex);
+
w_priv->wdd.info = &ziirave_wdt_info;
w_priv->wdd.ops = &ziirave_wdt_ops;
w_priv->wdd.min_timeout = ZIIRAVE_TIMEOUT_MIN;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..26e5e8507f03 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -168,7 +168,9 @@ out:
#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
- const char *command;
+#define SHUTDOWN_CMD_SIZE 11
+ const char command[SHUTDOWN_CMD_SIZE];
+ bool flag;
void (*cb)(void);
};
@@ -206,22 +208,22 @@ static void do_reboot(void)
ctrl_alt_del();
}
+static struct shutdown_handler shutdown_handlers[] = {
+ { "poweroff", true, do_poweroff },
+ { "halt", false, do_poweroff },
+ { "reboot", true, do_reboot },
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ { "suspend", true, do_suspend },
+#endif
+};
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
- static struct shutdown_handler handlers[] = {
- { "poweroff", do_poweroff },
- { "halt", do_poweroff },
- { "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- { "suspend", do_suspend },
-#endif
- {NULL, NULL},
- };
- static struct shutdown_handler *handler;
+ int idx;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- for (handler = &handlers[0]; handler->command; handler++) {
- if (strcmp(str, handler->command) == 0)
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (strcmp(str, shutdown_handlers[idx].command) == 0)
break;
}
/* Only acknowledge commands which we are prepared to handle. */
- if (handler->cb)
+ if (idx < ARRAY_SIZE(shutdown_handlers))
xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (handler->cb) {
- handler->cb();
+ if (idx < ARRAY_SIZE(shutdown_handlers)) {
+ shutdown_handlers[idx].cb();
} else {
pr_info("Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
static int setup_shutdown_watcher(void)
{
int err;
+ int idx;
+#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
+ char node[FEATURE_PATH_SIZE];
err = register_xenbus_watch(&shutdown_watch);
if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
}
#endif
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (!shutdown_handlers[idx].flag)
+ continue;
+ snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+ shutdown_handlers[idx].command);
+ xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ }
+
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c1010f018bd8..1e8be12ebb55 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -364,7 +364,7 @@ out:
static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
{
- struct watch_adapter *watch, *tmp_watch;
+ struct watch_adapter *watch;
char *path, *token;
int err, rc;
LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
list_add(&watch->list, &u->watches);
} else {
- list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+ list_for_each_entry(watch, &u->watches, list) {
if (!strcmp(watch->token, token) &&
!strcmp(watch->watch.node, path)) {
unregister_xenbus_watch(&watch->watch);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 611a23119675..6d40a972ffb2 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -335,7 +335,9 @@ static int backend_state;
static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
const char **v, unsigned int l)
{
- xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+ &backend_state) != 1)
+ backend_state = XenbusStateUnknown;
printk(KERN_DEBUG "XENBUS: backend %s %s\n",
v[XS_WATCH_PATH], xenbus_strstate(backend_state));
wake_up(&backend_state_wq);